Compare commits
53 Commits
Author | SHA1 | Date | |
---|---|---|---|
2672509d3f | |||
ee3a635852 | |||
a222b1c2fa | |||
f0556e89f3 | |||
fe8540c8ba | |||
e34bf19698 | |||
f70353e6ca | |||
0403443634 | |||
e2ed429aac | |||
5c856ec3ed | |||
052f37294d | |||
93bb375059 | |||
574f7a594c | |||
0b2a058550 | |||
88d15c89e5 | |||
4bf7113334 | |||
6bdbeae144 | |||
09c27379cb | |||
2bc6f7ee5e | |||
0ac50d647d | |||
5f9ffc7356 | |||
502b665224 | |||
bda0d7ed7e | |||
de2a60d12f | |||
5b3a93a43a | |||
6b241f8889 | |||
0a80ac0a8a | |||
6ce442354e | |||
9b38a3c06e | |||
5dead05324 | |||
6916dd9e2a | |||
f89888a542 | |||
d93b198b09 | |||
9e390d0fdb | |||
8329ee861e | |||
b8585a0afb | |||
c96f5118cf | |||
17e1a1f1e1 | |||
de940dff75 | |||
4fc1e029e4 | |||
d0a4151a2b | |||
ad5dd4799b | |||
1c49af74ac | |||
eda8ce36df | |||
e82c510094 | |||
0378308721 | |||
189a32683f | |||
f731b9f78d | |||
3701e21284 | |||
490d4996d2 | |||
f099a8f1ed | |||
a0228a0abc | |||
a5257b52e7 |
249
changelog.md
Normal file
249
changelog.md
Normal file
@@ -0,0 +1,249 @@
|
|||||||
|
# Changelog
|
||||||
|
|
||||||
|
## 2025-10-03 - 0.7.1 - fix(docs)
|
||||||
|
Add README image generation docs and .claude local settings
|
||||||
|
|
||||||
|
- Add .claude/settings.local.json with permission allow-list for local assistant tooling and web search
|
||||||
|
- Update README provider capabilities table to include an Images column and reference gpt-image-1
|
||||||
|
- Add Image Generation & Editing section with examples, options, and gpt-image-1 advantages
|
||||||
|
- Mark image generation support as implemented in the roadmap and remove duplicate entry
|
||||||
|
|
||||||
|
## 2025-10-03 - 0.7.0 - feat(providers)
|
||||||
|
Add research API and image generation/editing support; extend providers and tests
|
||||||
|
|
||||||
|
- Introduce ResearchOptions and ResearchResponse to the MultiModalModel interface and implement research() where supported
|
||||||
|
- OpenAiProvider: implement research(), add imageGenerate() and imageEdit() methods (gpt-image-1 / DALL·E support), and expose imageModel option
|
||||||
|
- AnthropicProvider: implement research() and vision handling; explicitly throw for unsupported image generation/editing
|
||||||
|
- PerplexityProvider: implement research() (sonar / sonar-pro support) and expose citation parsing
|
||||||
|
- Add image/document-related interfaces (ImageGenerateOptions, ImageEditOptions, ImageResponse) to abstract API
|
||||||
|
- Add image generation/editing/no-op stubs for other providers (Exo, Groq, Ollama, XAI) that throw informative errors to preserve API compatibility
|
||||||
|
- Add comprehensive OpenAI image generation tests and helper to save test outputs (test/test.image.openai.ts)
|
||||||
|
- Update README with Research & Web Search documentation, capability matrix, and roadmap entry for Research & Web Search API
|
||||||
|
- Add local Claude agent permissions file (.claude/settings.local.json) and various provider type/import updates
|
||||||
|
|
||||||
|
## 2025-09-28 - 0.6.1 - fix(provider.anthropic)
|
||||||
|
Fix Anthropic research tool identifier and add tests + local Claude permissions
|
||||||
|
|
||||||
|
- Replace Anthropic research tool type from 'computer_20241022' to 'web_search_20250305' to match the expected web-search tool schema.
|
||||||
|
- Add comprehensive test suites and fixtures for providers and research features (new/updated tests under test/ including anthropic, openai, research.* and stubs).
|
||||||
|
- Fix test usage of XAI provider class name (use XAIProvider) and adjust basic provider test expectations (provider instantiation moved to start()).
|
||||||
|
- Add .claude/settings.local.json with local Claude permissions to allow common CI/dev commands and web search during testing.
|
||||||
|
|
||||||
|
## 2025-09-28 - 0.6.0 - feat(research)
|
||||||
|
Introduce research API with provider implementations, docs and tests
|
||||||
|
|
||||||
|
- Add ResearchOptions and ResearchResponse interfaces and a new abstract research() method to MultiModalModel
|
||||||
|
- Implement research() for OpenAiProvider (deep research model selection, optional web search/tools, background flag, source extraction)
|
||||||
|
- Implement research() for AnthropicProvider (web search tool support, domain filters, citation extraction)
|
||||||
|
- Implement research() for PerplexityProvider (sonar / sonar-pro model usage and citation parsing)
|
||||||
|
- Add research() stubs to Exo, Groq, Ollama and XAI providers that throw a clear 'not yet supported' error to preserve interface compatibility
|
||||||
|
- Add tests for research interfaces and provider research methods (test files updated/added)
|
||||||
|
- Add documentation: readme.research.md describing the research API, usage and configuration
|
||||||
|
- Export additional providers from ts/index.ts and update provider typings/imports across files
|
||||||
|
- Add a 'typecheck' script to package.json
|
||||||
|
- Add .claude/settings.local.json (local agent permissions for CI/dev tasks)
|
||||||
|
|
||||||
|
## 2025-08-12 - 0.5.11 - fix(openaiProvider)
|
||||||
|
Update default chat model to gpt-5-mini and bump dependency versions
|
||||||
|
|
||||||
|
- Changed default chat model in OpenAiProvider from 'o3-mini' and 'o4-mini' to 'gpt-5-mini'
|
||||||
|
- Upgraded @anthropic-ai/sdk from ^0.57.0 to ^0.59.0
|
||||||
|
- Upgraded openai from ^5.11.0 to ^5.12.2
|
||||||
|
- Added new local Claude settings configuration (.claude/settings.local.json)
|
||||||
|
|
||||||
|
## 2025-08-03 - 0.5.10 - fix(dependencies)
|
||||||
|
Update SmartPdf to v4.1.1 for enhanced PDF processing capabilities
|
||||||
|
|
||||||
|
- Updated @push.rocks/smartpdf from ^3.3.0 to ^4.1.1
|
||||||
|
- Enhanced PDF conversion with improved scale options and quality controls
|
||||||
|
- Dependency updates for better performance and compatibility
|
||||||
|
|
||||||
|
## 2025-08-01 - 0.5.9 - fix(documentation)
|
||||||
|
Remove contribution section from readme
|
||||||
|
|
||||||
|
- Removed the contribution section from readme.md as requested
|
||||||
|
- Kept the roadmap section for future development plans
|
||||||
|
|
||||||
|
## 2025-08-01 - 0.5.8 - fix(core)
|
||||||
|
Fix SmartPdf lifecycle management and update dependencies
|
||||||
|
|
||||||
|
- Moved SmartPdf instance management to the MultiModalModel base class for better resource sharing
|
||||||
|
- Fixed memory leaks by properly implementing cleanup in the base class stop() method
|
||||||
|
- Updated SmartAi class to properly stop all providers on shutdown
|
||||||
|
- Updated @push.rocks/smartrequest from v2.1.0 to v4.2.1 with migration to new API
|
||||||
|
- Enhanced readme with professional documentation and feature matrix
|
||||||
|
|
||||||
|
## 2025-07-26 - 0.5.7 - fix(provider.openai)
|
||||||
|
Fix stream type mismatch in audio method
|
||||||
|
|
||||||
|
- Fixed type error where OpenAI SDK returns a web ReadableStream but the audio method needs to return a Node.js ReadableStream
|
||||||
|
- Added conversion using Node.js's built-in Readable.fromWeb() method
|
||||||
|
|
||||||
|
## 2025-07-25 - 0.5.5 - feat(documentation)
|
||||||
|
Comprehensive documentation enhancement and test improvements
|
||||||
|
|
||||||
|
- Completely rewrote readme.md with detailed provider comparisons, advanced usage examples, and performance tips
|
||||||
|
- Added comprehensive examples for all supported providers (OpenAI, Anthropic, Perplexity, Groq, XAI, Ollama, Exo)
|
||||||
|
- Included detailed sections on chat interactions, streaming, TTS, vision processing, and document analysis
|
||||||
|
- Added verbose flag to test script for better debugging
|
||||||
|
|
||||||
|
## 2025-05-13 - 0.5.4 - fix(provider.openai)
|
||||||
|
Update dependency versions, clean test imports, and adjust default OpenAI model configurations
|
||||||
|
|
||||||
|
- Bump dependency versions in package.json (@git.zone/tsbuild, @push.rocks/tapbundle, openai, etc.)
|
||||||
|
- Change default chatModel from 'gpt-4o' to 'o4-mini' and visionModel from 'gpt-4o' to '04-mini' in provider.openai.ts
|
||||||
|
- Remove unused 'expectAsync' import from test file
|
||||||
|
|
||||||
|
## 2025-04-03 - 0.5.3 - fix(package.json)
|
||||||
|
Add explicit packageManager field to package.json
|
||||||
|
|
||||||
|
- Include the packageManager property to specify the pnpm version and checksum.
|
||||||
|
- Align package metadata with current standards.
|
||||||
|
|
||||||
|
## 2025-04-03 - 0.5.2 - fix(readme)
|
||||||
|
Remove redundant conclusion section from README to streamline documentation.
|
||||||
|
|
||||||
|
- Eliminated the conclusion block describing SmartAi's capabilities and documentation pointers.
|
||||||
|
|
||||||
|
## 2025-02-25 - 0.5.1 - fix(OpenAiProvider)
|
||||||
|
Corrected audio model ID in OpenAiProvider
|
||||||
|
|
||||||
|
- Fixed audio model identifier from 'o3-mini' to 'tts-1-hd' in the OpenAiProvider's audio method.
|
||||||
|
- Addressed minor code formatting issues in test suite for better readability.
|
||||||
|
- Corrected spelling errors in test documentation and comments.
|
||||||
|
|
||||||
|
## 2025-02-25 - 0.5.0 - feat(documentation and configuration)
|
||||||
|
Enhanced package and README documentation
|
||||||
|
|
||||||
|
- Expanded the package description to better reflect the library's capabilities.
|
||||||
|
- Improved README with detailed usage examples for initialization, chat interactions, streaming chat, audio generation, document analysis, and vision processing.
|
||||||
|
- Provided error handling strategies and advanced streaming customization examples.
|
||||||
|
|
||||||
|
## 2025-02-25 - 0.4.2 - fix(core)
|
||||||
|
Fix OpenAI chat streaming and PDF document processing logic.
|
||||||
|
|
||||||
|
- Updated OpenAI chat streaming to handle new async iterable format.
|
||||||
|
- Improved PDF document processing by filtering out empty image buffers.
|
||||||
|
- Removed unsupported temperature options from OpenAI requests.
|
||||||
|
|
||||||
|
## 2025-02-25 - 0.4.1 - fix(provider)
|
||||||
|
Fix provider modules for consistency
|
||||||
|
|
||||||
|
- Updated TypeScript interfaces and options in provider modules for better type safety.
|
||||||
|
- Modified transform stream handlers in Exo, Groq, and Ollama providers for consistency.
|
||||||
|
- Added optional model options to OpenAI provider for custom model usage.
|
||||||
|
|
||||||
|
## 2025-02-08 - 0.4.0 - feat(core)
|
||||||
|
Added support for Exo AI provider
|
||||||
|
|
||||||
|
- Introduced ExoProvider with chat functionalities.
|
||||||
|
- Updated SmartAi class to initialize ExoProvider.
|
||||||
|
- Extended Conversation class to support ExoProvider.
|
||||||
|
|
||||||
|
## 2025-02-05 - 0.3.3 - fix(documentation)
|
||||||
|
Update readme with detailed license and legal information.
|
||||||
|
|
||||||
|
- Added explicit section on License and Legal Information in the README.
|
||||||
|
- Clarified the use of trademarks and company information.
|
||||||
|
|
||||||
|
## 2025-02-05 - 0.3.2 - fix(documentation)
|
||||||
|
Remove redundant badges from readme
|
||||||
|
|
||||||
|
- Removed Build Status badge from the readme file.
|
||||||
|
- Removed License badge from the readme file.
|
||||||
|
|
||||||
|
## 2025-02-05 - 0.3.1 - fix(documentation)
|
||||||
|
Updated README structure and added detailed usage examples
|
||||||
|
|
||||||
|
- Introduced a Table of Contents
|
||||||
|
- Included comprehensive sections for chat, streaming chat, audio generation, document processing, and vision processing
|
||||||
|
- Added example code and detailed configuration steps for supported AI providers
|
||||||
|
- Clarified the development setup with instructions for running tests and building the project
|
||||||
|
|
||||||
|
## 2025-02-05 - 0.3.0 - feat(integration-xai)
|
||||||
|
Add support for X.AI provider with chat and document processing capabilities.
|
||||||
|
|
||||||
|
- Introduced XAIProvider class for integrating X.AI features.
|
||||||
|
- Implemented chat streaming and synchronous chat for X.AI.
|
||||||
|
- Enabled document processing capabilities with PDF conversion in X.AI.
|
||||||
|
|
||||||
|
## 2025-02-03 - 0.2.0 - feat(provider.anthropic)
|
||||||
|
Add support for vision and document processing in Anthropic provider
|
||||||
|
|
||||||
|
- Implemented vision tasks for Anthropic provider using Claude-3-opus-20240229 model.
|
||||||
|
- Implemented document processing for Anthropic provider, supporting conversion of PDF documents to images and analysis with Claude-3-opus-20240229 model.
|
||||||
|
- Updated documentation to reflect the new capabilities of the Anthropic provider.
|
||||||
|
|
||||||
|
## 2025-02-03 - 0.1.0 - feat(providers)
|
||||||
|
Add vision and document processing capabilities to providers
|
||||||
|
|
||||||
|
- OpenAI and Ollama providers now support vision tasks using GPT-4 Vision and Llava models respectively.
|
||||||
|
- Document processing has been implemented for OpenAI and Ollama providers, converting PDFs to images for analysis.
|
||||||
|
- Introduced abstract methods for vision and document processing in the MultiModalModel class.
|
||||||
|
- Updated the readme file with examples for vision and document processing.
|
||||||
|
|
||||||
|
## 2025-02-03 - 0.0.19 - fix(core)
|
||||||
|
Enhanced chat streaming and error handling across providers
|
||||||
|
|
||||||
|
- Refactored chatStream method to properly handle input streams and processes in Perplexity, OpenAI, Ollama, and Anthropic providers.
|
||||||
|
- Improved error handling and message parsing in chatStream implementations.
|
||||||
|
- Defined distinct interfaces for chat options, messages, and responses.
|
||||||
|
- Adjusted the test logic in test/test.ts for the new classification response requirement.
|
||||||
|
|
||||||
|
## 2024-09-19 - 0.0.18 - fix(dependencies)
|
||||||
|
Update dependencies to the latest versions.
|
||||||
|
|
||||||
|
- Updated @git.zone/tsbuild from ^2.1.76 to ^2.1.84
|
||||||
|
- Updated @git.zone/tsrun from ^1.2.46 to ^1.2.49
|
||||||
|
- Updated @push.rocks/tapbundle from ^5.0.23 to ^5.3.0
|
||||||
|
- Updated @types/node from ^20.12.12 to ^22.5.5
|
||||||
|
- Updated @anthropic-ai/sdk from ^0.21.0 to ^0.27.3
|
||||||
|
- Updated @push.rocks/smartfile from ^11.0.14 to ^11.0.21
|
||||||
|
- Updated @push.rocks/smartpromise from ^4.0.3 to ^4.0.4
|
||||||
|
- Updated @push.rocks/webstream from ^1.0.8 to ^1.0.10
|
||||||
|
- Updated openai from ^4.47.1 to ^4.62.1
|
||||||
|
|
||||||
|
## 2024-05-29 - 0.0.17 - Documentation
|
||||||
|
Updated project description.
|
||||||
|
|
||||||
|
- Improved project description for clarity and details.
|
||||||
|
|
||||||
|
## 2024-05-17 - 0.0.16 to 0.0.15 - Core
|
||||||
|
Fixes and updates.
|
||||||
|
|
||||||
|
- Various core updates and fixes for stability improvements.
|
||||||
|
|
||||||
|
## 2024-04-29 - 0.0.14 to 0.0.13 - Core
|
||||||
|
Fixes and updates.
|
||||||
|
|
||||||
|
- Multiple core updates and fixes for enhanced functionality.
|
||||||
|
|
||||||
|
## 2024-04-29 - 0.0.12 - Core
|
||||||
|
Fixes and updates.
|
||||||
|
|
||||||
|
- Core update and bug fixes.
|
||||||
|
|
||||||
|
## 2024-04-29 - 0.0.11 - Provider
|
||||||
|
Fix integration for anthropic provider.
|
||||||
|
|
||||||
|
- Correction in the integration process with anthropic provider for better compatibility.
|
||||||
|
|
||||||
|
## 2024-04-27 - 0.0.10 to 0.0.9 - Core
|
||||||
|
Fixes and updates.
|
||||||
|
|
||||||
|
- Updates and fixes to core components.
|
||||||
|
- Updated tsconfig for improved TypeScript configuration.
|
||||||
|
|
||||||
|
## 2024-04-01 - 0.0.8 to 0.0.7 - Core and npmextra
|
||||||
|
Core updates and npmextra configuration.
|
||||||
|
|
||||||
|
- Core fixes and updates.
|
||||||
|
- Updates to npmextra.json for githost configuration.
|
||||||
|
|
||||||
|
## 2024-03-31 - 0.0.6 to 0.0.2 - Core
|
||||||
|
Initial core updates and fixes.
|
||||||
|
|
||||||
|
- Multiple updates and fixes to core following initial versions.
|
||||||
|
|
||||||
|
|
||||||
|
This summarizes the relevant updates and changes based on the provided commit messages. The changelog excludes commits that are version tags without meaningful content or repeated entries.
|
19
license
Normal file
19
license
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
Copyright (c) 2024 Task Venture Capital GmbH (hello@task.vc)
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
@@ -5,20 +5,33 @@
|
|||||||
"githost": "code.foss.global",
|
"githost": "code.foss.global",
|
||||||
"gitscope": "push.rocks",
|
"gitscope": "push.rocks",
|
||||||
"gitrepo": "smartai",
|
"gitrepo": "smartai",
|
||||||
"description": "A TypeScript library for integrating and interacting with multiple AI models, offering capabilities for chat and potentially audio responses.",
|
"description": "SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.",
|
||||||
"npmPackagename": "@push.rocks/smartai",
|
"npmPackagename": "@push.rocks/smartai",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"projectDomain": "push.rocks",
|
"projectDomain": "push.rocks",
|
||||||
"keywords": [
|
"keywords": [
|
||||||
"AI integration",
|
"AI integration",
|
||||||
"chatbot",
|
|
||||||
"TypeScript",
|
"TypeScript",
|
||||||
|
"chatbot",
|
||||||
"OpenAI",
|
"OpenAI",
|
||||||
"Anthropic",
|
"Anthropic",
|
||||||
"multi-model support",
|
"multi-model",
|
||||||
"audio responses",
|
"audio generation",
|
||||||
"text-to-speech",
|
"text-to-speech",
|
||||||
"streaming chat"
|
"document processing",
|
||||||
|
"vision processing",
|
||||||
|
"streaming chat",
|
||||||
|
"API",
|
||||||
|
"multiple providers",
|
||||||
|
"AI models",
|
||||||
|
"synchronous chat",
|
||||||
|
"asynchronous chat",
|
||||||
|
"real-time interaction",
|
||||||
|
"content analysis",
|
||||||
|
"image description",
|
||||||
|
"document classification",
|
||||||
|
"AI toolkit",
|
||||||
|
"provider switching"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
73
package.json
73
package.json
@@ -1,46 +1,47 @@
|
|||||||
{
|
{
|
||||||
"name": "@push.rocks/smartai",
|
"name": "@push.rocks/smartai",
|
||||||
"version": "0.0.15",
|
"version": "0.7.1",
|
||||||
"private": false,
|
"private": false,
|
||||||
"description": "A TypeScript library for integrating and interacting with multiple AI models, offering capabilities for chat and potentially audio responses.",
|
"description": "SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.",
|
||||||
"main": "dist_ts/index.js",
|
"main": "dist_ts/index.js",
|
||||||
"typings": "dist_ts/index.d.ts",
|
"typings": "dist_ts/index.d.ts",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
"author": "Task Venture Capital GmbH",
|
"author": "Task Venture Capital GmbH",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"test": "(tstest test/ --web)",
|
"test": "(tstest test/ --web --verbose)",
|
||||||
|
"typecheck": "tsbuild check",
|
||||||
"build": "(tsbuild --web --allowimplicitany)",
|
"build": "(tsbuild --web --allowimplicitany)",
|
||||||
"buildDocs": "(tsdoc)"
|
"buildDocs": "(tsdoc)"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@git.zone/tsbuild": "^2.1.25",
|
"@git.zone/tsbuild": "^2.6.4",
|
||||||
"@git.zone/tsbundle": "^2.0.5",
|
"@git.zone/tsbundle": "^2.5.1",
|
||||||
"@git.zone/tsrun": "^1.2.46",
|
"@git.zone/tsrun": "^1.3.3",
|
||||||
"@git.zone/tstest": "^1.0.90",
|
"@git.zone/tstest": "^2.3.2",
|
||||||
"@push.rocks/qenv": "^6.0.5",
|
"@push.rocks/qenv": "^6.1.0",
|
||||||
"@push.rocks/tapbundle": "^5.0.23",
|
"@push.rocks/tapbundle": "^6.0.3",
|
||||||
"@types/node": "^20.12.7"
|
"@types/node": "^22.15.17"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@anthropic-ai/sdk": "^0.20.7",
|
"@anthropic-ai/sdk": "^0.59.0",
|
||||||
"@push.rocks/smartarray": "^1.0.8",
|
"@push.rocks/smartarray": "^1.1.0",
|
||||||
"@push.rocks/smartfile": "^11.0.14",
|
"@push.rocks/smartfile": "^11.2.5",
|
||||||
"@push.rocks/smartpath": "^5.0.18",
|
"@push.rocks/smartpath": "^6.0.0",
|
||||||
"@push.rocks/smartpdf": "^3.1.5",
|
"@push.rocks/smartpdf": "^4.1.1",
|
||||||
"@push.rocks/smartpromise": "^4.0.3",
|
"@push.rocks/smartpromise": "^4.2.3",
|
||||||
"@push.rocks/smartrequest": "^2.0.22",
|
"@push.rocks/smartrequest": "^4.2.1",
|
||||||
"@push.rocks/webstream": "^1.0.8",
|
"@push.rocks/webstream": "^1.0.10",
|
||||||
"openai": "^4.38.5"
|
"openai": "^5.12.2"
|
||||||
},
|
},
|
||||||
"repository": {
|
"repository": {
|
||||||
"type": "git",
|
"type": "git",
|
||||||
"url": "git+https://code.foss.global/push.rocks/smartai.git"
|
"url": "https://code.foss.global/push.rocks/smartai.git"
|
||||||
},
|
},
|
||||||
"bugs": {
|
"bugs": {
|
||||||
"url": "https://code.foss.global/push.rocks/smartai/issues"
|
"url": "https://code.foss.global/push.rocks/smartai/issues"
|
||||||
},
|
},
|
||||||
"homepage": "https://code.foss.global/push.rocks/smartai#readme",
|
"homepage": "https://code.foss.global/push.rocks/smartai",
|
||||||
"browserslist": [
|
"browserslist": [
|
||||||
"last 1 chrome versions"
|
"last 1 chrome versions"
|
||||||
],
|
],
|
||||||
@@ -58,13 +59,33 @@
|
|||||||
],
|
],
|
||||||
"keywords": [
|
"keywords": [
|
||||||
"AI integration",
|
"AI integration",
|
||||||
"chatbot",
|
|
||||||
"TypeScript",
|
"TypeScript",
|
||||||
|
"chatbot",
|
||||||
"OpenAI",
|
"OpenAI",
|
||||||
"Anthropic",
|
"Anthropic",
|
||||||
"multi-model support",
|
"multi-model",
|
||||||
"audio responses",
|
"audio generation",
|
||||||
"text-to-speech",
|
"text-to-speech",
|
||||||
"streaming chat"
|
"document processing",
|
||||||
]
|
"vision processing",
|
||||||
|
"streaming chat",
|
||||||
|
"API",
|
||||||
|
"multiple providers",
|
||||||
|
"AI models",
|
||||||
|
"synchronous chat",
|
||||||
|
"asynchronous chat",
|
||||||
|
"real-time interaction",
|
||||||
|
"content analysis",
|
||||||
|
"image description",
|
||||||
|
"document classification",
|
||||||
|
"AI toolkit",
|
||||||
|
"provider switching"
|
||||||
|
],
|
||||||
|
"pnpm": {
|
||||||
|
"onlyBuiltDependencies": [
|
||||||
|
"esbuild",
|
||||||
|
"puppeteer"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"packageManager": "pnpm@10.7.0+sha512.6b865ad4b62a1d9842b61d674a393903b871d9244954f652b8842c2b553c72176b278f64c463e52d40fff8aba385c235c8c9ecf5cc7de4fd78b8bb6d49633ab6"
|
||||||
}
|
}
|
||||||
|
14129
pnpm-lock.yaml
generated
14129
pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
615
readme.md
615
readme.md
@@ -1,95 +1,594 @@
|
|||||||
# @push.rocks/smartai
|
# @push.rocks/smartai
|
||||||
|
**One API to rule them all** 🚀
|
||||||
|
|
||||||
Provides a standardized interface for integrating and conversing with multiple AI models, supporting operations like chat and potentially audio responses.
|
[](https://www.npmjs.com/package/@push.rocks/smartai)
|
||||||
|
[](https://www.typescriptlang.org/)
|
||||||
|
[](https://opensource.org/licenses/MIT)
|
||||||
|
|
||||||
## Install
|
SmartAI unifies the world's leading AI providers - OpenAI, Anthropic, Perplexity, Ollama, Groq, XAI, and Exo - under a single, elegant TypeScript interface. Build AI applications at lightning speed without vendor lock-in.
|
||||||
|
|
||||||
To add @push.rocks/smartai to your project, run the following command in your terminal:
|
## 🎯 Why SmartAI?
|
||||||
|
|
||||||
|
- **🔌 Universal Interface**: Write once, run with any AI provider. Switch between GPT-4, Claude, Llama, or Grok with a single line change.
|
||||||
|
- **🛡️ Type-Safe**: Full TypeScript support with comprehensive type definitions for all operations
|
||||||
|
- **🌊 Streaming First**: Built for real-time applications with native streaming support
|
||||||
|
- **🎨 Multi-Modal**: Seamlessly work with text, images, audio, and documents
|
||||||
|
- **🏠 Local & Cloud**: Support for both cloud providers and local models via Ollama
|
||||||
|
- **⚡ Zero Lock-In**: Your code remains portable across all AI providers
|
||||||
|
|
||||||
|
## 🚀 Quick Start
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
npm install @push.rocks/smartai
|
npm install @push.rocks/smartai
|
||||||
```
|
```
|
||||||
|
|
||||||
This command installs the package and adds it to your project's dependencies.
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
The `@push.rocks/smartai` package is a comprehensive solution for integrating and interacting with various AI models, designed to support operations ranging from chat interactions to possibly handling audio responses. This documentation will guide you through the process of utilizing `@push.rocks/smartai` in your applications, focusing on TypeScript and ESM syntax to demonstrate its full capabilities.
|
|
||||||
|
|
||||||
### Getting Started
|
|
||||||
|
|
||||||
Before you begin, ensure you have installed the package in your project as described in the **Install** section above. Once installed, you can start integrating AI functionalities into your application.
|
|
||||||
|
|
||||||
### Initializing SmartAi
|
|
||||||
|
|
||||||
The first step is to import and initialize the `SmartAi` class with appropriate options, including tokens for the AI services you plan to use:
|
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
import { SmartAi } from '@push.rocks/smartai';
|
import { SmartAi } from '@push.rocks/smartai';
|
||||||
|
|
||||||
const smartAi = new SmartAi({
|
// Initialize with your favorite providers
|
||||||
openaiToken: 'your-openai-access-token',
|
const ai = new SmartAi({
|
||||||
anthropicToken: 'your-anthropic-access-token'
|
openaiToken: 'sk-...',
|
||||||
|
anthropicToken: 'sk-ant-...'
|
||||||
});
|
});
|
||||||
|
|
||||||
await smartAi.start();
|
await ai.start();
|
||||||
|
|
||||||
|
// Same API, multiple providers
|
||||||
|
const response = await ai.openaiProvider.chat({
|
||||||
|
systemMessage: 'You are a helpful assistant.',
|
||||||
|
userMessage: 'Explain quantum computing in simple terms',
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
```
|
```
|
||||||
|
|
||||||
### Creating Conversations with AI
|
## 📊 Provider Capabilities Matrix
|
||||||
|
|
||||||
`SmartAi` provides a flexible interface to create and manage conversations with different AI providers. You can create a conversation with any supported AI provider like OpenAI or Anthropic by specifying the provider you want to use:
|
Choose the right provider for your use case:
|
||||||
|
|
||||||
|
| Provider | Chat | Streaming | TTS | Vision | Documents | Research | Images | Highlights |
|
||||||
|
|----------|:----:|:---------:|:---:|:------:|:---------:|:--------:|:------:|------------|
|
||||||
|
| **OpenAI** | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | • gpt-image-1<br>• DALL-E 3<br>• Deep research API |
|
||||||
|
| **Anthropic** | ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ❌ | • Claude 3 Opus<br>• Superior reasoning<br>• Web search API |
|
||||||
|
| **Ollama** | ✅ | ✅ | ❌ | ✅ | ✅ | ❌ | ❌ | • 100% local<br>• Privacy-first<br>• No API costs |
|
||||||
|
| **XAI** | ✅ | ✅ | ❌ | ❌ | ✅ | ❌ | ❌ | • Grok models<br>• Real-time data<br>• Uncensored |
|
||||||
|
| **Perplexity** | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ❌ | • Web-aware<br>• Research-focused<br>• Sonar Pro models |
|
||||||
|
| **Groq** | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | • 10x faster<br>• LPU inference<br>• Low latency |
|
||||||
|
| **Exo** | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | • Distributed<br>• P2P compute<br>• Decentralized |
|
||||||
|
|
||||||
|
## 🎮 Core Features
|
||||||
|
|
||||||
|
### 💬 Universal Chat Interface
|
||||||
|
|
||||||
|
Works identically across all providers:
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
const openAiConversation = await smartAi.createConversation('openai');
|
// Use GPT-4 for complex reasoning
|
||||||
const anthropicConversation = await smartAi.createConversation('anthropic');
|
const gptResponse = await ai.openaiProvider.chat({
|
||||||
```
|
systemMessage: 'You are a expert physicist.',
|
||||||
|
userMessage: 'Explain the implications of quantum entanglement',
|
||||||
### Chatting with AI
|
messageHistory: []
|
||||||
|
|
||||||
Once you have a conversation instance, you can start sending messages to the AI and receive responses. Each conversation object provides methods to interact in a synchronous or asynchronous manner, depending on your use case.
|
|
||||||
|
|
||||||
#### Synchronous Chat Example
|
|
||||||
|
|
||||||
Here's how you can have a synchronous chat with OpenAI:
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
const response = await openAiConversation.chat({
|
|
||||||
systemMessage: 'This is a greeting from the system.',
|
|
||||||
userMessage: 'Hello, AI! How are you today?',
|
|
||||||
messageHistory: [] // Previous messages in the conversation
|
|
||||||
});
|
});
|
||||||
|
|
||||||
console.log(response.message); // Log the response from AI
|
// Use Claude for safety-critical applications
|
||||||
|
const claudeResponse = await ai.anthropicProvider.chat({
|
||||||
|
systemMessage: 'You are a medical advisor.',
|
||||||
|
userMessage: 'Review this patient data for concerns',
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
|
|
||||||
|
// Use Groq for lightning-fast responses
|
||||||
|
const groqResponse = await ai.groqProvider.chat({
|
||||||
|
systemMessage: 'You are a code reviewer.',
|
||||||
|
userMessage: 'Quick! Find the bug in this code: ...',
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Streaming Chat Example
|
### 🌊 Real-Time Streaming
|
||||||
|
|
||||||
For real-time, streaming interactions, you can utilize the streaming capabilities provided by the conversation object. This enables a continuous exchange of messages between your application and the AI:
|
Build responsive chat interfaces with token-by-token streaming:
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
const inputStreamWriter = openAiConversation.getInputStreamWriter();
|
// Create a chat stream
|
||||||
const outputStream = openAiConversation.getOutputStream();
|
const stream = await ai.openaiProvider.chatStream(inputStream);
|
||||||
|
const reader = stream.getReader();
|
||||||
|
|
||||||
inputStreamWriter.write('Hello, AI! Can you stream responses?');
|
// Display responses as they arrive
|
||||||
|
while (true) {
|
||||||
|
const { done, value } = await reader.read();
|
||||||
|
if (done) break;
|
||||||
|
|
||||||
const reader = outputStream.getReader();
|
// Update UI in real-time
|
||||||
reader.read().then(function processText({done, value}) {
|
process.stdout.write(value);
|
||||||
if (done) {
|
}
|
||||||
console.log('Stream finished.');
|
```
|
||||||
return;
|
|
||||||
|
### 🎙️ Text-to-Speech
|
||||||
|
|
||||||
|
Generate natural voices with OpenAI:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const audioStream = await ai.openaiProvider.audio({
|
||||||
|
message: 'Welcome to the future of AI development!'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Stream directly to speakers
|
||||||
|
audioStream.pipe(speakerOutput);
|
||||||
|
|
||||||
|
// Or save to file
|
||||||
|
audioStream.pipe(fs.createWriteStream('welcome.mp3'));
|
||||||
|
```
|
||||||
|
|
||||||
|
### 👁️ Vision Analysis
|
||||||
|
|
||||||
|
Understand images with multiple providers:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const image = fs.readFileSync('product-photo.jpg');
|
||||||
|
|
||||||
|
// OpenAI: General purpose vision
|
||||||
|
const gptVision = await ai.openaiProvider.vision({
|
||||||
|
image,
|
||||||
|
prompt: 'Describe this product and suggest marketing angles'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Anthropic: Detailed analysis
|
||||||
|
const claudeVision = await ai.anthropicProvider.vision({
|
||||||
|
image,
|
||||||
|
prompt: 'Identify any safety concerns or defects'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Ollama: Private, local analysis
|
||||||
|
const ollamaVision = await ai.ollamaProvider.vision({
|
||||||
|
image,
|
||||||
|
prompt: 'Extract all text and categorize the content'
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### 📄 Document Intelligence
|
||||||
|
|
||||||
|
Extract insights from PDFs with AI:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const contract = fs.readFileSync('contract.pdf');
|
||||||
|
const invoice = fs.readFileSync('invoice.pdf');
|
||||||
|
|
||||||
|
// Analyze documents
|
||||||
|
const analysis = await ai.openaiProvider.document({
|
||||||
|
systemMessage: 'You are a legal expert.',
|
||||||
|
userMessage: 'Compare these documents and highlight key differences',
|
||||||
|
messageHistory: [],
|
||||||
|
pdfDocuments: [contract, invoice]
|
||||||
|
});
|
||||||
|
|
||||||
|
// Multi-document analysis
|
||||||
|
const taxDocs = [form1099, w2, receipts];
|
||||||
|
const taxAnalysis = await ai.anthropicProvider.document({
|
||||||
|
systemMessage: 'You are a tax advisor.',
|
||||||
|
userMessage: 'Prepare a tax summary from these documents',
|
||||||
|
messageHistory: [],
|
||||||
|
pdfDocuments: taxDocs
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### 🔬 Research & Web Search
|
||||||
|
|
||||||
|
Perform deep research with web search capabilities across multiple providers:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// OpenAI Deep Research - Comprehensive analysis
|
||||||
|
const deepResearch = await ai.openaiProvider.research({
|
||||||
|
query: 'What are the latest developments in quantum computing?',
|
||||||
|
searchDepth: 'deep',
|
||||||
|
includeWebSearch: true
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(deepResearch.answer);
|
||||||
|
console.log('Sources:', deepResearch.sources);
|
||||||
|
|
||||||
|
// Anthropic Web Search - Domain-filtered research
|
||||||
|
const anthropic = new AnthropicProvider({
|
||||||
|
anthropicToken: 'sk-ant-...',
|
||||||
|
enableWebSearch: true,
|
||||||
|
searchDomainAllowList: ['nature.com', 'science.org']
|
||||||
|
});
|
||||||
|
|
||||||
|
const scientificResearch = await anthropic.research({
|
||||||
|
query: 'Latest breakthroughs in CRISPR gene editing',
|
||||||
|
searchDepth: 'advanced'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Perplexity - Research-focused with citations
|
||||||
|
const perplexityResearch = await ai.perplexityProvider.research({
|
||||||
|
query: 'Current state of autonomous vehicle technology',
|
||||||
|
searchDepth: 'deep' // Uses Sonar Pro model
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**Research Options:**
|
||||||
|
- `searchDepth`: 'basic' | 'advanced' | 'deep'
|
||||||
|
- `maxSources`: Number of sources to include
|
||||||
|
- `includeWebSearch`: Enable web search (OpenAI)
|
||||||
|
- `background`: Run as background task (OpenAI)
|
||||||
|
|
||||||
|
**Supported Providers:**
|
||||||
|
- **OpenAI**: Deep Research API with specialized models (`o3-deep-research-2025-06-26`, `o4-mini-deep-research-2025-06-26`)
|
||||||
|
- **Anthropic**: Web Search API with domain filtering
|
||||||
|
- **Perplexity**: Sonar and Sonar Pro models with built-in citations
|
||||||
|
|
||||||
|
### 🎨 Image Generation & Editing
|
||||||
|
|
||||||
|
Generate and edit images with OpenAI's cutting-edge models:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Basic image generation with gpt-image-1
|
||||||
|
const image = await ai.openaiProvider.imageGenerate({
|
||||||
|
prompt: 'A futuristic robot assistant in a modern office, digital art',
|
||||||
|
model: 'gpt-image-1',
|
||||||
|
quality: 'high',
|
||||||
|
size: '1024x1024'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Save the generated image
|
||||||
|
const imageBuffer = Buffer.from(image.images[0].b64_json!, 'base64');
|
||||||
|
fs.writeFileSync('robot.png', imageBuffer);
|
||||||
|
|
||||||
|
// Advanced: Transparent background with custom format
|
||||||
|
const logo = await ai.openaiProvider.imageGenerate({
|
||||||
|
prompt: 'Minimalist mountain peak logo, geometric design',
|
||||||
|
model: 'gpt-image-1',
|
||||||
|
quality: 'high',
|
||||||
|
size: '1024x1024',
|
||||||
|
background: 'transparent',
|
||||||
|
outputFormat: 'png'
|
||||||
|
});
|
||||||
|
|
||||||
|
// WebP with compression for web use
|
||||||
|
const webImage = await ai.openaiProvider.imageGenerate({
|
||||||
|
prompt: 'Product showcase: sleek smartphone on marble surface',
|
||||||
|
model: 'gpt-image-1',
|
||||||
|
quality: 'high',
|
||||||
|
size: '1536x1024',
|
||||||
|
outputFormat: 'webp',
|
||||||
|
outputCompression: 85
|
||||||
|
});
|
||||||
|
|
||||||
|
// Superior text rendering (gpt-image-1's strength)
|
||||||
|
const signage = await ai.openaiProvider.imageGenerate({
|
||||||
|
prompt: 'Vintage cafe sign saying "COFFEE & CODE" in hand-lettered typography',
|
||||||
|
model: 'gpt-image-1',
|
||||||
|
quality: 'high',
|
||||||
|
size: '1024x1024'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Generate multiple variations at once
|
||||||
|
const variations = await ai.openaiProvider.imageGenerate({
|
||||||
|
prompt: 'Abstract geometric pattern, colorful minimalist art',
|
||||||
|
model: 'gpt-image-1',
|
||||||
|
n: 3,
|
||||||
|
quality: 'medium',
|
||||||
|
size: '1024x1024'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Edit an existing image
|
||||||
|
const editedImage = await ai.openaiProvider.imageEdit({
|
||||||
|
image: originalImageBuffer,
|
||||||
|
prompt: 'Add sunglasses and change the background to a beach sunset',
|
||||||
|
model: 'gpt-image-1',
|
||||||
|
quality: 'high'
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**Image Generation Options:**
|
||||||
|
- `model`: 'gpt-image-1' | 'dall-e-3' | 'dall-e-2'
|
||||||
|
- `quality`: 'low' | 'medium' | 'high' | 'auto'
|
||||||
|
- `size`: Multiple aspect ratios up to 4096×4096
|
||||||
|
- `background`: 'transparent' | 'opaque' | 'auto'
|
||||||
|
- `outputFormat`: 'png' | 'jpeg' | 'webp'
|
||||||
|
- `outputCompression`: 0-100 for webp/jpeg
|
||||||
|
- `moderation`: 'low' | 'auto'
|
||||||
|
- `n`: Number of images (1-10)
|
||||||
|
|
||||||
|
**gpt-image-1 Advantages:**
|
||||||
|
- Superior text rendering in images
|
||||||
|
- Up to 4096×4096 resolution
|
||||||
|
- Transparent background support
|
||||||
|
- Advanced output formats (WebP with compression)
|
||||||
|
- Better prompt understanding
|
||||||
|
- Streaming support for progressive rendering
|
||||||
|
|
||||||
|
### 🔄 Persistent Conversations
|
||||||
|
|
||||||
|
Maintain context across interactions:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Create a coding assistant conversation
|
||||||
|
const assistant = ai.createConversation('openai');
|
||||||
|
await assistant.setSystemMessage('You are an expert TypeScript developer.');
|
||||||
|
|
||||||
|
// First question
|
||||||
|
const inputWriter = assistant.getInputStreamWriter();
|
||||||
|
await inputWriter.write('How do I implement a singleton pattern?');
|
||||||
|
|
||||||
|
// Continue the conversation
|
||||||
|
await inputWriter.write('Now show me how to make it thread-safe');
|
||||||
|
|
||||||
|
// The assistant remembers the entire context
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🚀 Real-World Examples
|
||||||
|
|
||||||
|
### Build a Customer Support Bot
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const supportBot = new SmartAi({
|
||||||
|
anthropicToken: process.env.ANTHROPIC_KEY // Claude for empathetic responses
|
||||||
|
});
|
||||||
|
|
||||||
|
async function handleCustomerQuery(query: string, history: ChatMessage[]) {
|
||||||
|
try {
|
||||||
|
const response = await supportBot.anthropicProvider.chat({
|
||||||
|
systemMessage: `You are a helpful customer support agent.
|
||||||
|
Be empathetic, professional, and solution-oriented.`,
|
||||||
|
userMessage: query,
|
||||||
|
messageHistory: history
|
||||||
|
});
|
||||||
|
|
||||||
|
return response.message;
|
||||||
|
} catch (error) {
|
||||||
|
// Fallback to another provider if needed
|
||||||
|
return await supportBot.openaiProvider.chat({...});
|
||||||
}
|
}
|
||||||
console.log('AI says:', value);
|
}
|
||||||
reader.read().then(processText); // Continue reading messages
|
|
||||||
});
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Extending Conversations
|
### Create a Code Review Assistant
|
||||||
|
|
||||||
The modular design of `@push.rocks/smartai` allows you to extend conversations with additional features, such as handling audio responses or integrating other AI-powered functionalities. Utilize the provided AI providers' APIs to explore and implement a wide range of AI interactions within your conversations.
|
```typescript
|
||||||
|
const codeReviewer = new SmartAi({
|
||||||
|
groqToken: process.env.GROQ_KEY // Groq for speed
|
||||||
|
});
|
||||||
|
|
||||||
### Conclusion
|
async function reviewCode(code: string, language: string) {
|
||||||
|
const startTime = Date.now();
|
||||||
|
|
||||||
With `@push.rocks/smartai`, integrating AI functionalities into your applications becomes streamlined and efficient. By leveraging the standardized interface provided by the package, you can easily converse with multiple AI models, expanding the capabilities of your applications with cutting-edge AI features. Whether you're implementing simple chat interactions or complex, real-time communication flows, `@push.rocks/smartai` offers the tools and flexibility needed to create engaging, AI-enhanced experiences.
|
const review = await codeReviewer.groqProvider.chat({
|
||||||
|
systemMessage: `You are a ${language} expert. Review code for:
|
||||||
|
- Security vulnerabilities
|
||||||
|
- Performance issues
|
||||||
|
- Best practices
|
||||||
|
- Potential bugs`,
|
||||||
|
userMessage: `Review this code:\n\n${code}`,
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`Review completed in ${Date.now() - startTime}ms`);
|
||||||
|
return review.message;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Build a Research Assistant
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const researcher = new SmartAi({
|
||||||
|
perplexityToken: process.env.PERPLEXITY_KEY
|
||||||
|
});
|
||||||
|
|
||||||
|
async function research(topic: string) {
|
||||||
|
// Perplexity excels at web-aware research
|
||||||
|
const findings = await researcher.perplexityProvider.chat({
|
||||||
|
systemMessage: 'You are a research assistant. Provide factual, cited information.',
|
||||||
|
userMessage: `Research the latest developments in ${topic}`,
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
|
|
||||||
|
return findings.message;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Local AI for Sensitive Data
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const localAI = new SmartAi({
|
||||||
|
ollama: {
|
||||||
|
baseUrl: 'http://localhost:11434',
|
||||||
|
model: 'llama2',
|
||||||
|
visionModel: 'llava'
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Process sensitive documents without leaving your infrastructure
|
||||||
|
async function analyzeSensitiveDoc(pdfBuffer: Buffer) {
|
||||||
|
const analysis = await localAI.ollamaProvider.document({
|
||||||
|
systemMessage: 'Extract and summarize key information.',
|
||||||
|
userMessage: 'Analyze this confidential document',
|
||||||
|
messageHistory: [],
|
||||||
|
pdfDocuments: [pdfBuffer]
|
||||||
|
});
|
||||||
|
|
||||||
|
// Data never leaves your servers
|
||||||
|
return analysis.message;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## ⚡ Performance Tips
|
||||||
|
|
||||||
|
### 1. Provider Selection Strategy
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
class SmartAIRouter {
|
||||||
|
constructor(private ai: SmartAi) {}
|
||||||
|
|
||||||
|
async query(message: string, requirements: {
|
||||||
|
speed?: boolean;
|
||||||
|
accuracy?: boolean;
|
||||||
|
cost?: boolean;
|
||||||
|
privacy?: boolean;
|
||||||
|
}) {
|
||||||
|
if (requirements.privacy) {
|
||||||
|
return this.ai.ollamaProvider.chat({...}); // Local only
|
||||||
|
}
|
||||||
|
if (requirements.speed) {
|
||||||
|
return this.ai.groqProvider.chat({...}); // 10x faster
|
||||||
|
}
|
||||||
|
if (requirements.accuracy) {
|
||||||
|
return this.ai.anthropicProvider.chat({...}); // Best reasoning
|
||||||
|
}
|
||||||
|
// Default fallback
|
||||||
|
return this.ai.openaiProvider.chat({...});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Streaming for Large Responses
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Don't wait for the entire response
|
||||||
|
async function streamResponse(userQuery: string) {
|
||||||
|
const stream = await ai.openaiProvider.chatStream(createInputStream(userQuery));
|
||||||
|
|
||||||
|
// Process tokens as they arrive
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
updateUI(chunk); // Immediate feedback
|
||||||
|
await processChunk(chunk); // Parallel processing
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Parallel Multi-Provider Queries
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Get the best answer from multiple AIs
|
||||||
|
async function consensusQuery(question: string) {
|
||||||
|
const providers = [
|
||||||
|
ai.openaiProvider.chat({...}),
|
||||||
|
ai.anthropicProvider.chat({...}),
|
||||||
|
ai.perplexityProvider.chat({...})
|
||||||
|
];
|
||||||
|
|
||||||
|
const responses = await Promise.all(providers);
|
||||||
|
return synthesizeResponses(responses);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🛠️ Advanced Features
|
||||||
|
|
||||||
|
### Custom Streaming Transformations
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Add real-time translation
|
||||||
|
const translationStream = new TransformStream({
|
||||||
|
async transform(chunk, controller) {
|
||||||
|
const translated = await translateChunk(chunk);
|
||||||
|
controller.enqueue(translated);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
const responseStream = await ai.openaiProvider.chatStream(input);
|
||||||
|
const translatedStream = responseStream.pipeThrough(translationStream);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Error Handling & Fallbacks
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
class ResilientAI {
|
||||||
|
private providers = ['openai', 'anthropic', 'groq'];
|
||||||
|
|
||||||
|
async query(opts: ChatOptions): Promise<ChatResponse> {
|
||||||
|
for (const provider of this.providers) {
|
||||||
|
try {
|
||||||
|
return await this.ai[`${provider}Provider`].chat(opts);
|
||||||
|
} catch (error) {
|
||||||
|
console.warn(`${provider} failed, trying next...`);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
throw new Error('All providers failed');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Token Counting & Cost Management
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Track usage across providers
|
||||||
|
class UsageTracker {
|
||||||
|
async trackedChat(provider: string, options: ChatOptions) {
|
||||||
|
const start = Date.now();
|
||||||
|
const response = await ai[`${provider}Provider`].chat(options);
|
||||||
|
|
||||||
|
const usage = {
|
||||||
|
provider,
|
||||||
|
duration: Date.now() - start,
|
||||||
|
inputTokens: estimateTokens(options),
|
||||||
|
outputTokens: estimateTokens(response.message)
|
||||||
|
};
|
||||||
|
|
||||||
|
await this.logUsage(usage);
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 📦 Installation & Setup
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
- Node.js 16+
|
||||||
|
- TypeScript 4.5+
|
||||||
|
- API keys for your chosen providers
|
||||||
|
|
||||||
|
### Environment Setup
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install
|
||||||
|
npm install @push.rocks/smartai
|
||||||
|
|
||||||
|
# Set up environment variables
|
||||||
|
export OPENAI_API_KEY=sk-...
|
||||||
|
export ANTHROPIC_API_KEY=sk-ant-...
|
||||||
|
export PERPLEXITY_API_KEY=pplx-...
|
||||||
|
# ... etc
|
||||||
|
```
|
||||||
|
|
||||||
|
### TypeScript Configuration
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"compilerOptions": {
|
||||||
|
"target": "ES2022",
|
||||||
|
"module": "NodeNext",
|
||||||
|
"lib": ["ES2022"],
|
||||||
|
"strict": true,
|
||||||
|
"esModuleInterop": true,
|
||||||
|
"skipLibCheck": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🎯 Choosing the Right Provider
|
||||||
|
|
||||||
|
| Use Case | Recommended Provider | Why |
|
||||||
|
|----------|---------------------|-----|
|
||||||
|
| **General Purpose** | OpenAI | Most features, stable, well-documented |
|
||||||
|
| **Complex Reasoning** | Anthropic | Superior logical thinking, safer outputs |
|
||||||
|
| **Research & Facts** | Perplexity | Web-aware, provides citations |
|
||||||
|
| **Deep Research** | OpenAI | Deep Research API with comprehensive analysis |
|
||||||
|
| **Speed Critical** | Groq | 10x faster inference, sub-second responses |
|
||||||
|
| **Privacy Critical** | Ollama | 100% local, no data leaves your servers |
|
||||||
|
| **Real-time Data** | XAI | Access to current information |
|
||||||
|
| **Cost Sensitive** | Ollama/Exo | Free (local) or distributed compute |
|
||||||
|
|
||||||
|
## 📈 Roadmap
|
||||||
|
|
||||||
|
- [x] Research & Web Search API
|
||||||
|
- [x] Image generation support (gpt-image-1, DALL-E 3, DALL-E 2)
|
||||||
|
- [ ] Streaming function calls
|
||||||
|
- [ ] Voice input processing
|
||||||
|
- [ ] Fine-tuning integration
|
||||||
|
- [ ] Embedding support
|
||||||
|
- [ ] Agent framework
|
||||||
|
- [ ] More providers (Cohere, AI21, etc.)
|
||||||
|
|
||||||
## License and Legal Information
|
## License and Legal Information
|
||||||
|
|
||||||
|
216
test/test.anthropic.ts
Normal file
216
test/test.anthropic.ts
Normal file
@@ -0,0 +1,216 @@
|
|||||||
|
import { expect, tap } from '@push.rocks/tapbundle';
|
||||||
|
import * as qenv from '@push.rocks/qenv';
|
||||||
|
import * as smartrequest from '@push.rocks/smartrequest';
|
||||||
|
import * as smartfile from '@push.rocks/smartfile';
|
||||||
|
|
||||||
|
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||||
|
|
||||||
|
import * as smartai from '../ts/index.js';
|
||||||
|
|
||||||
|
let anthropicProvider: smartai.AnthropicProvider;
|
||||||
|
|
||||||
|
tap.test('Anthropic: should create and start Anthropic provider', async () => {
|
||||||
|
anthropicProvider = new smartai.AnthropicProvider({
|
||||||
|
anthropicToken: await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN'),
|
||||||
|
});
|
||||||
|
await anthropicProvider.start();
|
||||||
|
expect(anthropicProvider).toBeInstanceOf(smartai.AnthropicProvider);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Anthropic: should create chat response', async () => {
|
||||||
|
const userMessage = 'What is the capital of France? Answer in one word.';
|
||||||
|
const response = await anthropicProvider.chat({
|
||||||
|
systemMessage: 'You are a helpful assistant. Be concise.',
|
||||||
|
userMessage: userMessage,
|
||||||
|
messageHistory: [],
|
||||||
|
});
|
||||||
|
console.log(`Anthropic Chat - User: ${userMessage}`);
|
||||||
|
console.log(`Anthropic Chat - Response: ${response.message}`);
|
||||||
|
|
||||||
|
expect(response.role).toEqual('assistant');
|
||||||
|
expect(response.message).toBeTruthy();
|
||||||
|
expect(response.message.toLowerCase()).toInclude('paris');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Anthropic: should handle message history', async () => {
|
||||||
|
const messageHistory: smartai.ChatMessage[] = [
|
||||||
|
{ role: 'user', content: 'My name is Claude Test' },
|
||||||
|
{ role: 'assistant', content: 'Nice to meet you, Claude Test!' }
|
||||||
|
];
|
||||||
|
|
||||||
|
const response = await anthropicProvider.chat({
|
||||||
|
systemMessage: 'You are a helpful assistant with good memory.',
|
||||||
|
userMessage: 'What is my name?',
|
||||||
|
messageHistory: messageHistory,
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`Anthropic Memory Test - Response: ${response.message}`);
|
||||||
|
expect(response.message.toLowerCase()).toInclude('claude test');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Anthropic: should analyze coffee image with latte art', async () => {
|
||||||
|
// Test 1: Coffee image from Unsplash by Dani
|
||||||
|
const imagePath = './test/testimages/coffee-dani/coffee.jpg';
|
||||||
|
console.log(`Loading coffee image from: ${imagePath}`);
|
||||||
|
|
||||||
|
const imageBuffer = await smartfile.fs.toBuffer(imagePath);
|
||||||
|
console.log(`Image loaded, size: ${imageBuffer.length} bytes`);
|
||||||
|
|
||||||
|
const result = await anthropicProvider.vision({
|
||||||
|
image: imageBuffer,
|
||||||
|
prompt: 'Describe this coffee image. What do you see in terms of the cup, foam pattern, and overall composition?'
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`Anthropic Vision (Coffee) - Result: ${result}`);
|
||||||
|
expect(result).toBeTruthy();
|
||||||
|
expect(typeof result).toEqual('string');
|
||||||
|
expect(result.toLowerCase()).toInclude('coffee');
|
||||||
|
// The image has a heart pattern in the latte art
|
||||||
|
const mentionsLatte = result.toLowerCase().includes('heart') ||
|
||||||
|
result.toLowerCase().includes('latte') ||
|
||||||
|
result.toLowerCase().includes('foam');
|
||||||
|
expect(mentionsLatte).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Anthropic: should analyze laptop/workspace image', async () => {
|
||||||
|
// Test 2: Laptop image from Unsplash by Nicolas Bichon
|
||||||
|
const imagePath = './test/testimages/laptop-nicolas/laptop.jpg';
|
||||||
|
console.log(`Loading laptop image from: ${imagePath}`);
|
||||||
|
|
||||||
|
const imageBuffer = await smartfile.fs.toBuffer(imagePath);
|
||||||
|
console.log(`Image loaded, size: ${imageBuffer.length} bytes`);
|
||||||
|
|
||||||
|
const result = await anthropicProvider.vision({
|
||||||
|
image: imageBuffer,
|
||||||
|
prompt: 'Describe the technology and workspace setup in this image. What devices and equipment can you see?'
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`Anthropic Vision (Laptop) - Result: ${result}`);
|
||||||
|
expect(result).toBeTruthy();
|
||||||
|
expect(typeof result).toEqual('string');
|
||||||
|
// Should mention laptop, computer, keyboard, or desk
|
||||||
|
const mentionsTech = result.toLowerCase().includes('laptop') ||
|
||||||
|
result.toLowerCase().includes('computer') ||
|
||||||
|
result.toLowerCase().includes('keyboard') ||
|
||||||
|
result.toLowerCase().includes('desk');
|
||||||
|
expect(mentionsTech).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Anthropic: should analyze receipt/document image', async () => {
|
||||||
|
// Test 3: Receipt image from Unsplash by Annie Spratt
|
||||||
|
const imagePath = './test/testimages/receipt-annie/receipt.jpg';
|
||||||
|
console.log(`Loading receipt image from: ${imagePath}`);
|
||||||
|
|
||||||
|
const imageBuffer = await smartfile.fs.toBuffer(imagePath);
|
||||||
|
console.log(`Image loaded, size: ${imageBuffer.length} bytes`);
|
||||||
|
|
||||||
|
const result = await anthropicProvider.vision({
|
||||||
|
image: imageBuffer,
|
||||||
|
prompt: 'What type of document is this? Can you identify any text or numbers visible in the image?'
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`Anthropic Vision (Receipt) - Result: ${result}`);
|
||||||
|
expect(result).toBeTruthy();
|
||||||
|
expect(typeof result).toEqual('string');
|
||||||
|
// Should mention receipt, document, text, or paper
|
||||||
|
const mentionsDocument = result.toLowerCase().includes('receipt') ||
|
||||||
|
result.toLowerCase().includes('document') ||
|
||||||
|
result.toLowerCase().includes('text') ||
|
||||||
|
result.toLowerCase().includes('paper');
|
||||||
|
expect(mentionsDocument).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Anthropic: should document a PDF', async () => {
|
||||||
|
const pdfUrl = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf';
|
||||||
|
const pdfResponse = await smartrequest.SmartRequest.create()
|
||||||
|
.url(pdfUrl)
|
||||||
|
.get();
|
||||||
|
|
||||||
|
const result = await anthropicProvider.document({
|
||||||
|
systemMessage: 'Classify the document. Only the following answers are allowed: "invoice", "bank account statement", "contract", "test document", "other". The answer should only contain the keyword for machine use.',
|
||||||
|
userMessage: 'Classify this document.',
|
||||||
|
messageHistory: [],
|
||||||
|
pdfDocuments: [Buffer.from(await pdfResponse.arrayBuffer())],
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`Anthropic Document - Result:`, result);
|
||||||
|
expect(result).toBeTruthy();
|
||||||
|
expect(result.message).toBeTruthy();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Anthropic: should handle complex document analysis', async () => {
|
||||||
|
// Test with the demo PDF if it exists
|
||||||
|
const pdfPath = './.nogit/demo_without_textlayer.pdf';
|
||||||
|
let pdfBuffer: Uint8Array;
|
||||||
|
|
||||||
|
try {
|
||||||
|
pdfBuffer = await smartfile.fs.toBuffer(pdfPath);
|
||||||
|
} catch (error) {
|
||||||
|
// If the file doesn't exist, use the dummy PDF
|
||||||
|
console.log('Demo PDF not found, using dummy PDF instead');
|
||||||
|
const pdfUrl = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf';
|
||||||
|
const pdfResponse = await smartrequest.SmartRequest.create()
|
||||||
|
.url(pdfUrl)
|
||||||
|
.get();
|
||||||
|
pdfBuffer = Buffer.from(await pdfResponse.arrayBuffer());
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await anthropicProvider.document({
|
||||||
|
systemMessage: `
|
||||||
|
Analyze this document and provide a JSON response with the following structure:
|
||||||
|
{
|
||||||
|
"documentType": "string",
|
||||||
|
"hasText": boolean,
|
||||||
|
"summary": "string"
|
||||||
|
}
|
||||||
|
`,
|
||||||
|
userMessage: 'Analyze this document.',
|
||||||
|
messageHistory: [],
|
||||||
|
pdfDocuments: [pdfBuffer],
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`Anthropic Complex Document Analysis:`, result);
|
||||||
|
expect(result).toBeTruthy();
|
||||||
|
expect(result.message).toBeTruthy();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Anthropic: should handle errors gracefully', async () => {
|
||||||
|
// Test with invalid message (empty)
|
||||||
|
let errorCaught = false;
|
||||||
|
|
||||||
|
try {
|
||||||
|
await anthropicProvider.chat({
|
||||||
|
systemMessage: '',
|
||||||
|
userMessage: '',
|
||||||
|
messageHistory: [],
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
errorCaught = true;
|
||||||
|
console.log('Expected error caught:', error.message);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Anthropic might handle empty messages, so we don't assert error
|
||||||
|
console.log(`Error handling test - Error caught: ${errorCaught}`);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Anthropic: audio should throw not supported error', async () => {
|
||||||
|
let errorCaught = false;
|
||||||
|
|
||||||
|
try {
|
||||||
|
await anthropicProvider.audio({
|
||||||
|
message: 'This should fail'
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
errorCaught = true;
|
||||||
|
expect(error.message).toInclude('not yet supported');
|
||||||
|
}
|
||||||
|
|
||||||
|
expect(errorCaught).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Anthropic: should stop the provider', async () => {
|
||||||
|
await anthropicProvider.stop();
|
||||||
|
console.log('Anthropic provider stopped successfully');
|
||||||
|
});
|
||||||
|
|
||||||
|
export default tap.start();
|
93
test/test.basic.ts
Normal file
93
test/test.basic.ts
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
import { tap, expect } from '@push.rocks/tapbundle';
|
||||||
|
import * as smartai from '../ts/index.js';
|
||||||
|
|
||||||
|
// Basic instantiation tests that don't require API tokens
|
||||||
|
// These tests can run in CI/CD environments without credentials
|
||||||
|
|
||||||
|
tap.test('Basic: should create SmartAi instance', async () => {
|
||||||
|
const testSmartai = new smartai.SmartAi({
|
||||||
|
openaiToken: 'dummy-token-for-testing'
|
||||||
|
});
|
||||||
|
expect(testSmartai).toBeInstanceOf(smartai.SmartAi);
|
||||||
|
// Provider is only created after calling start()
|
||||||
|
expect(testSmartai.options.openaiToken).toEqual('dummy-token-for-testing');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Basic: should instantiate OpenAI provider', async () => {
|
||||||
|
const openaiProvider = new smartai.OpenAiProvider({
|
||||||
|
openaiToken: 'dummy-token'
|
||||||
|
});
|
||||||
|
expect(openaiProvider).toBeInstanceOf(smartai.OpenAiProvider);
|
||||||
|
expect(typeof openaiProvider.chat).toEqual('function');
|
||||||
|
expect(typeof openaiProvider.audio).toEqual('function');
|
||||||
|
expect(typeof openaiProvider.vision).toEqual('function');
|
||||||
|
expect(typeof openaiProvider.document).toEqual('function');
|
||||||
|
expect(typeof openaiProvider.research).toEqual('function');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Basic: should instantiate Anthropic provider', async () => {
|
||||||
|
const anthropicProvider = new smartai.AnthropicProvider({
|
||||||
|
anthropicToken: 'dummy-token'
|
||||||
|
});
|
||||||
|
expect(anthropicProvider).toBeInstanceOf(smartai.AnthropicProvider);
|
||||||
|
expect(typeof anthropicProvider.chat).toEqual('function');
|
||||||
|
expect(typeof anthropicProvider.audio).toEqual('function');
|
||||||
|
expect(typeof anthropicProvider.vision).toEqual('function');
|
||||||
|
expect(typeof anthropicProvider.document).toEqual('function');
|
||||||
|
expect(typeof anthropicProvider.research).toEqual('function');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Basic: should instantiate Perplexity provider', async () => {
|
||||||
|
const perplexityProvider = new smartai.PerplexityProvider({
|
||||||
|
perplexityToken: 'dummy-token'
|
||||||
|
});
|
||||||
|
expect(perplexityProvider).toBeInstanceOf(smartai.PerplexityProvider);
|
||||||
|
expect(typeof perplexityProvider.chat).toEqual('function');
|
||||||
|
expect(typeof perplexityProvider.research).toEqual('function');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Basic: should instantiate Groq provider', async () => {
|
||||||
|
const groqProvider = new smartai.GroqProvider({
|
||||||
|
groqToken: 'dummy-token'
|
||||||
|
});
|
||||||
|
expect(groqProvider).toBeInstanceOf(smartai.GroqProvider);
|
||||||
|
expect(typeof groqProvider.chat).toEqual('function');
|
||||||
|
expect(typeof groqProvider.research).toEqual('function');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Basic: should instantiate Ollama provider', async () => {
|
||||||
|
const ollamaProvider = new smartai.OllamaProvider({
|
||||||
|
baseUrl: 'http://localhost:11434'
|
||||||
|
});
|
||||||
|
expect(ollamaProvider).toBeInstanceOf(smartai.OllamaProvider);
|
||||||
|
expect(typeof ollamaProvider.chat).toEqual('function');
|
||||||
|
expect(typeof ollamaProvider.research).toEqual('function');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Basic: should instantiate xAI provider', async () => {
|
||||||
|
const xaiProvider = new smartai.XAIProvider({
|
||||||
|
xaiToken: 'dummy-token'
|
||||||
|
});
|
||||||
|
expect(xaiProvider).toBeInstanceOf(smartai.XAIProvider);
|
||||||
|
expect(typeof xaiProvider.chat).toEqual('function');
|
||||||
|
expect(typeof xaiProvider.research).toEqual('function');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Basic: should instantiate Exo provider', async () => {
|
||||||
|
const exoProvider = new smartai.ExoProvider({
|
||||||
|
exoBaseUrl: 'http://localhost:8000'
|
||||||
|
});
|
||||||
|
expect(exoProvider).toBeInstanceOf(smartai.ExoProvider);
|
||||||
|
expect(typeof exoProvider.chat).toEqual('function');
|
||||||
|
expect(typeof exoProvider.research).toEqual('function');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Basic: all providers should extend MultiModalModel', async () => {
|
||||||
|
const openai = new smartai.OpenAiProvider({ openaiToken: 'test' });
|
||||||
|
const anthropic = new smartai.AnthropicProvider({ anthropicToken: 'test' });
|
||||||
|
|
||||||
|
expect(openai).toBeInstanceOf(smartai.MultiModalModel);
|
||||||
|
expect(anthropic).toBeInstanceOf(smartai.MultiModalModel);
|
||||||
|
});
|
||||||
|
|
||||||
|
export default tap.start();
|
203
test/test.image.openai.ts
Normal file
203
test/test.image.openai.ts
Normal file
@@ -0,0 +1,203 @@
|
|||||||
|
import { expect, tap } from '@push.rocks/tapbundle';
|
||||||
|
import * as qenv from '@push.rocks/qenv';
|
||||||
|
import * as smartai from '../ts/index.js';
|
||||||
|
import * as path from 'path';
|
||||||
|
import { promises as fs } from 'fs';
|
||||||
|
|
||||||
|
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||||
|
|
||||||
|
let openaiProvider: smartai.OpenAiProvider;
|
||||||
|
|
||||||
|
// Helper function to save image results
|
||||||
|
async function saveImageResult(testName: string, result: any) {
|
||||||
|
const sanitizedName = testName.replace(/[^a-z0-9]/gi, '_').toLowerCase();
|
||||||
|
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
|
||||||
|
const filename = `openai_${sanitizedName}_${timestamp}.json`;
|
||||||
|
const filepath = path.join('.nogit', 'testresults', 'images', filename);
|
||||||
|
|
||||||
|
await fs.mkdir(path.dirname(filepath), { recursive: true });
|
||||||
|
await fs.writeFile(filepath, JSON.stringify(result, null, 2), 'utf-8');
|
||||||
|
|
||||||
|
console.log(` 💾 Saved to: ${filepath}`);
|
||||||
|
|
||||||
|
// Also save the actual image if b64_json is present
|
||||||
|
if (result.images && result.images[0]?.b64_json) {
|
||||||
|
const imageFilename = `openai_${sanitizedName}_${timestamp}.png`;
|
||||||
|
const imageFilepath = path.join('.nogit', 'testresults', 'images', imageFilename);
|
||||||
|
await fs.writeFile(imageFilepath, Buffer.from(result.images[0].b64_json, 'base64'));
|
||||||
|
console.log(` 🖼️ Image saved to: ${imageFilepath}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tap.test('OpenAI Image Generation: should initialize provider', async () => {
|
||||||
|
const openaiToken = await testQenv.getEnvVarOnDemand('OPENAI_TOKEN');
|
||||||
|
expect(openaiToken).toBeTruthy();
|
||||||
|
|
||||||
|
openaiProvider = new smartai.OpenAiProvider({
|
||||||
|
openaiToken,
|
||||||
|
imageModel: 'gpt-image-1'
|
||||||
|
});
|
||||||
|
|
||||||
|
await openaiProvider.start();
|
||||||
|
expect(openaiProvider).toBeInstanceOf(smartai.OpenAiProvider);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('OpenAI Image: Basic generation with gpt-image-1', async () => {
|
||||||
|
const result = await openaiProvider.imageGenerate({
|
||||||
|
prompt: 'A cute robot reading a book in a cozy library, digital art style',
|
||||||
|
model: 'gpt-image-1',
|
||||||
|
quality: 'medium',
|
||||||
|
size: '1024x1024'
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log('Basic gpt-image-1 Generation:');
|
||||||
|
console.log('- Images generated:', result.images.length);
|
||||||
|
console.log('- Model used:', result.metadata?.model);
|
||||||
|
console.log('- Quality:', result.metadata?.quality);
|
||||||
|
console.log('- Size:', result.metadata?.size);
|
||||||
|
console.log('- Tokens used:', result.metadata?.tokensUsed);
|
||||||
|
|
||||||
|
await saveImageResult('basic_generation_gptimage1', result);
|
||||||
|
|
||||||
|
expect(result.images).toBeTruthy();
|
||||||
|
expect(result.images.length).toEqual(1);
|
||||||
|
expect(result.images[0].b64_json).toBeTruthy();
|
||||||
|
expect(result.metadata?.model).toEqual('gpt-image-1');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('OpenAI Image: High quality with transparent background', async () => {
|
||||||
|
const result = await openaiProvider.imageGenerate({
|
||||||
|
prompt: 'A simple geometric logo of a mountain peak, minimal design, clean lines',
|
||||||
|
model: 'gpt-image-1',
|
||||||
|
quality: 'high',
|
||||||
|
size: '1024x1024',
|
||||||
|
background: 'transparent',
|
||||||
|
outputFormat: 'png'
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log('High Quality Transparent:');
|
||||||
|
console.log('- Quality:', result.metadata?.quality);
|
||||||
|
console.log('- Background: transparent');
|
||||||
|
console.log('- Format:', result.metadata?.outputFormat);
|
||||||
|
console.log('- Tokens used:', result.metadata?.tokensUsed);
|
||||||
|
|
||||||
|
await saveImageResult('high_quality_transparent', result);
|
||||||
|
|
||||||
|
expect(result.images.length).toEqual(1);
|
||||||
|
expect(result.images[0].b64_json).toBeTruthy();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('OpenAI Image: WebP format with compression', async () => {
|
||||||
|
const result = await openaiProvider.imageGenerate({
|
||||||
|
prompt: 'A futuristic cityscape at sunset with flying cars, photorealistic',
|
||||||
|
model: 'gpt-image-1',
|
||||||
|
quality: 'high',
|
||||||
|
size: '1536x1024',
|
||||||
|
outputFormat: 'webp',
|
||||||
|
outputCompression: 85
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log('WebP with Compression:');
|
||||||
|
console.log('- Format:', result.metadata?.outputFormat);
|
||||||
|
console.log('- Compression: 85%');
|
||||||
|
console.log('- Size:', result.metadata?.size);
|
||||||
|
|
||||||
|
await saveImageResult('webp_compression', result);
|
||||||
|
|
||||||
|
expect(result.images.length).toEqual(1);
|
||||||
|
expect(result.images[0].b64_json).toBeTruthy();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('OpenAI Image: Text rendering with gpt-image-1', async () => {
|
||||||
|
const result = await openaiProvider.imageGenerate({
|
||||||
|
prompt: 'A vintage cafe sign that says "COFFEE & CODE" in elegant hand-lettered typography, warm colors',
|
||||||
|
model: 'gpt-image-1',
|
||||||
|
quality: 'high',
|
||||||
|
size: '1024x1024'
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log('Text Rendering:');
|
||||||
|
console.log('- Prompt includes text: "COFFEE & CODE"');
|
||||||
|
console.log('- gpt-image-1 has superior text rendering');
|
||||||
|
console.log('- Tokens used:', result.metadata?.tokensUsed);
|
||||||
|
|
||||||
|
await saveImageResult('text_rendering', result);
|
||||||
|
|
||||||
|
expect(result.images.length).toEqual(1);
|
||||||
|
expect(result.images[0].b64_json).toBeTruthy();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('OpenAI Image: Multiple images generation', async () => {
|
||||||
|
const result = await openaiProvider.imageGenerate({
|
||||||
|
prompt: 'Abstract colorful geometric patterns, modern minimalist art',
|
||||||
|
model: 'gpt-image-1',
|
||||||
|
n: 2,
|
||||||
|
quality: 'medium',
|
||||||
|
size: '1024x1024'
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log('Multiple Images:');
|
||||||
|
console.log('- Images requested: 2');
|
||||||
|
console.log('- Images generated:', result.images.length);
|
||||||
|
|
||||||
|
await saveImageResult('multiple_images', result);
|
||||||
|
|
||||||
|
expect(result.images.length).toEqual(2);
|
||||||
|
expect(result.images[0].b64_json).toBeTruthy();
|
||||||
|
expect(result.images[1].b64_json).toBeTruthy();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('OpenAI Image: Low moderation setting', async () => {
|
||||||
|
const result = await openaiProvider.imageGenerate({
|
||||||
|
prompt: 'A fantasy battle scene with warriors and dragons',
|
||||||
|
model: 'gpt-image-1',
|
||||||
|
moderation: 'low',
|
||||||
|
quality: 'medium'
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log('Low Moderation:');
|
||||||
|
console.log('- Moderation: low (less restrictive filtering)');
|
||||||
|
console.log('- Tokens used:', result.metadata?.tokensUsed);
|
||||||
|
|
||||||
|
await saveImageResult('low_moderation', result);
|
||||||
|
|
||||||
|
expect(result.images.length).toEqual(1);
|
||||||
|
expect(result.images[0].b64_json).toBeTruthy();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('OpenAI Image Editing: edit with gpt-image-1', async () => {
|
||||||
|
// First, generate a base image
|
||||||
|
const baseResult = await openaiProvider.imageGenerate({
|
||||||
|
prompt: 'A simple white cat sitting on a red cushion',
|
||||||
|
model: 'gpt-image-1',
|
||||||
|
quality: 'low',
|
||||||
|
size: '1024x1024'
|
||||||
|
});
|
||||||
|
|
||||||
|
const baseImageBuffer = Buffer.from(baseResult.images[0].b64_json!, 'base64');
|
||||||
|
|
||||||
|
// Now edit it
|
||||||
|
const editResult = await openaiProvider.imageEdit({
|
||||||
|
image: baseImageBuffer,
|
||||||
|
prompt: 'Change the cat to orange and add stylish sunglasses',
|
||||||
|
model: 'gpt-image-1',
|
||||||
|
quality: 'medium'
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log('Image Editing:');
|
||||||
|
console.log('- Base image created');
|
||||||
|
console.log('- Edit: change color and add sunglasses');
|
||||||
|
console.log('- Result images:', editResult.images.length);
|
||||||
|
|
||||||
|
await saveImageResult('image_edit', editResult);
|
||||||
|
|
||||||
|
expect(editResult.images.length).toEqual(1);
|
||||||
|
expect(editResult.images[0].b64_json).toBeTruthy();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('OpenAI Image: should clean up provider', async () => {
|
||||||
|
await openaiProvider.stop();
|
||||||
|
console.log('OpenAI image provider stopped successfully');
|
||||||
|
});
|
||||||
|
|
||||||
|
export default tap.start();
|
140
test/test.interfaces.ts
Normal file
140
test/test.interfaces.ts
Normal file
@@ -0,0 +1,140 @@
|
|||||||
|
import { tap, expect } from '@push.rocks/tapbundle';
|
||||||
|
import * as smartai from '../ts/index.js';
|
||||||
|
|
||||||
|
// Test interface exports and type checking
|
||||||
|
// These tests verify that all interfaces are properly exported and usable
|
||||||
|
|
||||||
|
tap.test('Interfaces: ResearchOptions should be properly typed', async () => {
|
||||||
|
const testOptions: smartai.ResearchOptions = {
|
||||||
|
query: 'test query',
|
||||||
|
searchDepth: 'basic',
|
||||||
|
maxSources: 10,
|
||||||
|
includeWebSearch: true,
|
||||||
|
background: false
|
||||||
|
};
|
||||||
|
|
||||||
|
expect(testOptions).toBeInstanceOf(Object);
|
||||||
|
expect(testOptions.query).toEqual('test query');
|
||||||
|
expect(testOptions.searchDepth).toEqual('basic');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Interfaces: ResearchResponse should be properly typed', async () => {
|
||||||
|
const testResponse: smartai.ResearchResponse = {
|
||||||
|
answer: 'test answer',
|
||||||
|
sources: [
|
||||||
|
{
|
||||||
|
url: 'https://example.com',
|
||||||
|
title: 'Example Source',
|
||||||
|
snippet: 'This is a snippet'
|
||||||
|
}
|
||||||
|
],
|
||||||
|
searchQueries: ['query1', 'query2'],
|
||||||
|
metadata: {
|
||||||
|
model: 'test-model',
|
||||||
|
tokensUsed: 100
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
expect(testResponse).toBeInstanceOf(Object);
|
||||||
|
expect(testResponse.answer).toEqual('test answer');
|
||||||
|
expect(testResponse.sources).toBeArray();
|
||||||
|
expect(testResponse.sources[0].url).toEqual('https://example.com');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Interfaces: ChatOptions should be properly typed', async () => {
|
||||||
|
const testChatOptions: smartai.ChatOptions = {
|
||||||
|
systemMessage: 'You are a helpful assistant',
|
||||||
|
userMessage: 'Hello',
|
||||||
|
messageHistory: [
|
||||||
|
{ role: 'user', content: 'Previous message' },
|
||||||
|
{ role: 'assistant', content: 'Previous response' }
|
||||||
|
]
|
||||||
|
};
|
||||||
|
|
||||||
|
expect(testChatOptions).toBeInstanceOf(Object);
|
||||||
|
expect(testChatOptions.systemMessage).toBeTruthy();
|
||||||
|
expect(testChatOptions.messageHistory).toBeArray();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Interfaces: ChatResponse should be properly typed', async () => {
|
||||||
|
const testChatResponse: smartai.ChatResponse = {
|
||||||
|
role: 'assistant',
|
||||||
|
message: 'This is a response'
|
||||||
|
};
|
||||||
|
|
||||||
|
expect(testChatResponse).toBeInstanceOf(Object);
|
||||||
|
expect(testChatResponse.role).toEqual('assistant');
|
||||||
|
expect(testChatResponse.message).toBeTruthy();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Interfaces: ChatMessage should be properly typed', async () => {
|
||||||
|
const testMessage: smartai.ChatMessage = {
|
||||||
|
role: 'user',
|
||||||
|
content: 'Test message'
|
||||||
|
};
|
||||||
|
|
||||||
|
expect(testMessage).toBeInstanceOf(Object);
|
||||||
|
expect(testMessage.role).toBeOneOf(['user', 'assistant', 'system']);
|
||||||
|
expect(testMessage.content).toBeTruthy();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Interfaces: Provider options should be properly typed', async () => {
|
||||||
|
// OpenAI options
|
||||||
|
const openaiOptions: smartai.IOpenaiProviderOptions = {
|
||||||
|
openaiToken: 'test-token',
|
||||||
|
chatModel: 'gpt-5-mini',
|
||||||
|
audioModel: 'tts-1-hd',
|
||||||
|
visionModel: '04-mini',
|
||||||
|
researchModel: 'o4-mini-deep-research-2025-06-26',
|
||||||
|
enableWebSearch: true
|
||||||
|
};
|
||||||
|
|
||||||
|
expect(openaiOptions).toBeInstanceOf(Object);
|
||||||
|
expect(openaiOptions.openaiToken).toBeTruthy();
|
||||||
|
|
||||||
|
// Anthropic options
|
||||||
|
const anthropicOptions: smartai.IAnthropicProviderOptions = {
|
||||||
|
anthropicToken: 'test-token',
|
||||||
|
enableWebSearch: true,
|
||||||
|
searchDomainAllowList: ['example.com'],
|
||||||
|
searchDomainBlockList: ['blocked.com']
|
||||||
|
};
|
||||||
|
|
||||||
|
expect(anthropicOptions).toBeInstanceOf(Object);
|
||||||
|
expect(anthropicOptions.anthropicToken).toBeTruthy();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Interfaces: Search depth values should be valid', async () => {
|
||||||
|
const validDepths: smartai.ResearchOptions['searchDepth'][] = ['basic', 'advanced', 'deep'];
|
||||||
|
|
||||||
|
for (const depth of validDepths) {
|
||||||
|
const options: smartai.ResearchOptions = {
|
||||||
|
query: 'test',
|
||||||
|
searchDepth: depth
|
||||||
|
};
|
||||||
|
expect(options.searchDepth).toBeOneOf(['basic', 'advanced', 'deep', undefined]);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Interfaces: Optional properties should work correctly', async () => {
|
||||||
|
// Minimal ResearchOptions
|
||||||
|
const minimalOptions: smartai.ResearchOptions = {
|
||||||
|
query: 'test query'
|
||||||
|
};
|
||||||
|
|
||||||
|
expect(minimalOptions.query).toBeTruthy();
|
||||||
|
expect(minimalOptions.searchDepth).toBeUndefined();
|
||||||
|
expect(minimalOptions.maxSources).toBeUndefined();
|
||||||
|
|
||||||
|
// Minimal ChatOptions
|
||||||
|
const minimalChat: smartai.ChatOptions = {
|
||||||
|
systemMessage: 'system',
|
||||||
|
userMessage: 'user',
|
||||||
|
messageHistory: []
|
||||||
|
};
|
||||||
|
|
||||||
|
expect(minimalChat.messageHistory).toBeArray();
|
||||||
|
expect(minimalChat.messageHistory.length).toEqual(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
export default tap.start();
|
@@ -1,4 +1,4 @@
|
|||||||
import { expect, expectAsync, tap } from '@push.rocks/tapbundle';
|
import { expect, tap } from '@push.rocks/tapbundle';
|
||||||
import * as qenv from '@push.rocks/qenv';
|
import * as qenv from '@push.rocks/qenv';
|
||||||
import * as smartrequest from '@push.rocks/smartrequest';
|
import * as smartrequest from '@push.rocks/smartrequest';
|
||||||
import * as smartfile from '@push.rocks/smartfile';
|
import * as smartfile from '@push.rocks/smartfile';
|
||||||
@@ -9,38 +9,39 @@ import * as smartai from '../ts/index.js';
|
|||||||
|
|
||||||
let testSmartai: smartai.SmartAi;
|
let testSmartai: smartai.SmartAi;
|
||||||
|
|
||||||
tap.test('should create a smartai instance', async () => {
|
tap.test('OpenAI: should create a smartai instance with OpenAI provider', async () => {
|
||||||
testSmartai = new smartai.SmartAi({
|
testSmartai = new smartai.SmartAi({
|
||||||
openaiToken: await testQenv.getEnvVarOnDemand('OPENAI_TOKEN'),
|
openaiToken: await testQenv.getEnvVarOnDemand('OPENAI_TOKEN'),
|
||||||
});
|
});
|
||||||
await testSmartai.start();
|
await testSmartai.start();
|
||||||
});
|
});
|
||||||
|
|
||||||
tap.test('should create chat response with openai', async () => {
|
tap.test('OpenAI: should create chat response', async () => {
|
||||||
const userMessage = 'How are you?';
|
const userMessage = 'How are you?';
|
||||||
const response = await testSmartai.openaiProvider.chat({
|
const response = await testSmartai.openaiProvider.chat({
|
||||||
systemMessage: 'Hello',
|
systemMessage: 'Hello',
|
||||||
userMessage: userMessage,
|
userMessage: userMessage,
|
||||||
messageHistory: [
|
messageHistory: [],
|
||||||
],
|
|
||||||
});
|
});
|
||||||
console.log(`userMessage: ${userMessage}`);
|
console.log(`userMessage: ${userMessage}`);
|
||||||
console.log(response.message);
|
console.log(response.message);
|
||||||
});
|
});
|
||||||
|
|
||||||
tap.test('should document a pdf', async () => {
|
tap.test('OpenAI: should document a pdf', async () => {
|
||||||
const pdfUrl = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf';
|
const pdfUrl = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf';
|
||||||
const pdfResponse = await smartrequest.getBinary(pdfUrl);
|
const pdfResponse = await smartrequest.SmartRequest.create()
|
||||||
|
.url(pdfUrl)
|
||||||
|
.get();
|
||||||
const result = await testSmartai.openaiProvider.document({
|
const result = await testSmartai.openaiProvider.document({
|
||||||
systemMessage: 'Classify the document. Only the following answers are allowed: "invoice", "bank account statement", "contract", "other"',
|
systemMessage: 'Classify the document. Only the following answers are allowed: "invoice", "bank account statement", "contract", "other". The answer should only contain the keyword for machine use.',
|
||||||
userMessage: "Classify the document.",
|
userMessage: "Classify the document.",
|
||||||
messageHistory: [],
|
messageHistory: [],
|
||||||
pdfDocuments: [pdfResponse.body],
|
pdfDocuments: [Buffer.from(await pdfResponse.arrayBuffer())],
|
||||||
});
|
});
|
||||||
console.log(result);
|
console.log(result);
|
||||||
});
|
});
|
||||||
|
|
||||||
tap.test('should recognize companies in a pdf', async () => {
|
tap.test('OpenAI: should recognize companies in a pdf', async () => {
|
||||||
const pdfBuffer = await smartfile.fs.toBuffer('./.nogit/demo_without_textlayer.pdf');
|
const pdfBuffer = await smartfile.fs.toBuffer('./.nogit/demo_without_textlayer.pdf');
|
||||||
const result = await testSmartai.openaiProvider.document({
|
const result = await testSmartai.openaiProvider.document({
|
||||||
systemMessage: `
|
systemMessage: `
|
||||||
@@ -55,7 +56,7 @@ tap.test('should recognize companies in a pdf', async () => {
|
|||||||
address: string;
|
address: string;
|
||||||
city: string;
|
city: string;
|
||||||
country: string;
|
country: string;
|
||||||
EU: boolean; // wether the entity is within EU
|
EU: boolean; // whether the entity is within EU
|
||||||
};
|
};
|
||||||
entityReceiver: {
|
entityReceiver: {
|
||||||
type: 'official state entity' | 'company' | 'person';
|
type: 'official state entity' | 'company' | 'person';
|
||||||
@@ -63,7 +64,7 @@ tap.test('should recognize companies in a pdf', async () => {
|
|||||||
address: string;
|
address: string;
|
||||||
city: string;
|
city: string;
|
||||||
country: string;
|
country: string;
|
||||||
EU: boolean; // wether the entity is within EU
|
EU: boolean; // whether the entity is within EU
|
||||||
};
|
};
|
||||||
date: string; // the date of the document as YYYY-MM-DD
|
date: string; // the date of the document as YYYY-MM-DD
|
||||||
title: string; // a short title, suitable for a filename
|
title: string; // a short title, suitable for a filename
|
||||||
@@ -75,9 +76,26 @@ tap.test('should recognize companies in a pdf', async () => {
|
|||||||
pdfDocuments: [pdfBuffer],
|
pdfDocuments: [pdfBuffer],
|
||||||
});
|
});
|
||||||
console.log(result);
|
console.log(result);
|
||||||
})
|
});
|
||||||
|
|
||||||
tap.test('should stop the smartai instance', async () => {
|
tap.test('OpenAI: should create audio response', async () => {
|
||||||
|
// Call the audio method with a sample message.
|
||||||
|
const audioStream = await testSmartai.openaiProvider.audio({
|
||||||
|
message: 'This is a test of audio generation.',
|
||||||
|
});
|
||||||
|
// Read all chunks from the stream.
|
||||||
|
const chunks: Uint8Array[] = [];
|
||||||
|
for await (const chunk of audioStream) {
|
||||||
|
chunks.push(chunk as Uint8Array);
|
||||||
|
}
|
||||||
|
const audioBuffer = Buffer.concat(chunks);
|
||||||
|
await smartfile.fs.toFs(audioBuffer, './.nogit/testoutput.mp3');
|
||||||
|
console.log(`Audio Buffer length: ${audioBuffer.length}`);
|
||||||
|
// Assert that the resulting buffer is not empty.
|
||||||
|
expect(audioBuffer.length).toBeGreaterThan(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('OpenAI: should stop the smartai instance', async () => {
|
||||||
await testSmartai.stop();
|
await testSmartai.stop();
|
||||||
});
|
});
|
||||||
|
|
223
test/test.research.anthropic.ts
Normal file
223
test/test.research.anthropic.ts
Normal file
@@ -0,0 +1,223 @@
|
|||||||
|
import { expect, tap } from '@push.rocks/tapbundle';
|
||||||
|
import * as qenv from '@push.rocks/qenv';
|
||||||
|
import * as smartai from '../ts/index.js';
|
||||||
|
import * as path from 'path';
|
||||||
|
import { promises as fs } from 'fs';
|
||||||
|
|
||||||
|
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||||
|
|
||||||
|
// Helper function to save research results
|
||||||
|
async function saveResearchResult(testName: string, result: any) {
|
||||||
|
const sanitizedName = testName.replace(/[^a-z0-9]/gi, '_').toLowerCase();
|
||||||
|
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
|
||||||
|
const filename = `${sanitizedName}_${timestamp}.json`;
|
||||||
|
const filepath = path.join('.nogit', 'testresults', 'research', filename);
|
||||||
|
|
||||||
|
await fs.mkdir(path.dirname(filepath), { recursive: true });
|
||||||
|
await fs.writeFile(filepath, JSON.stringify(result, null, 2), 'utf-8');
|
||||||
|
|
||||||
|
console.log(` 💾 Saved to: ${filepath}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
let anthropicProvider: smartai.AnthropicProvider;
|
||||||
|
|
||||||
|
tap.test('Anthropic Research: should initialize provider with web search', async () => {
|
||||||
|
anthropicProvider = new smartai.AnthropicProvider({
|
||||||
|
anthropicToken: await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN'),
|
||||||
|
enableWebSearch: true
|
||||||
|
});
|
||||||
|
|
||||||
|
await anthropicProvider.start();
|
||||||
|
expect(anthropicProvider).toBeInstanceOf(smartai.AnthropicProvider);
|
||||||
|
expect(typeof anthropicProvider.research).toEqual('function');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Anthropic Research: should perform basic research query', async () => {
|
||||||
|
const result = await anthropicProvider.research({
|
||||||
|
query: 'What is machine learning and its main applications?',
|
||||||
|
searchDepth: 'basic'
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log('Anthropic Basic Research:');
|
||||||
|
console.log('- Answer length:', result.answer.length);
|
||||||
|
console.log('- Sources found:', result.sources.length);
|
||||||
|
console.log('- First 200 chars:', result.answer.substring(0, 200));
|
||||||
|
|
||||||
|
await saveResearchResult('basic_research_machine_learning', result);
|
||||||
|
|
||||||
|
expect(result).toBeTruthy();
|
||||||
|
expect(result.answer).toBeTruthy();
|
||||||
|
expect(result.answer.toLowerCase()).toInclude('machine learning');
|
||||||
|
expect(result.sources).toBeArray();
|
||||||
|
expect(result.metadata).toBeTruthy();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Anthropic Research: should perform research with web search', async () => {
|
||||||
|
const result = await anthropicProvider.research({
|
||||||
|
query: 'What are the latest developments in renewable energy technology?',
|
||||||
|
searchDepth: 'advanced',
|
||||||
|
includeWebSearch: true,
|
||||||
|
maxSources: 5
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log('Anthropic Web Search Research:');
|
||||||
|
console.log('- Answer length:', result.answer.length);
|
||||||
|
console.log('- Sources:', result.sources.length);
|
||||||
|
if (result.searchQueries) {
|
||||||
|
console.log('- Search queries:', result.searchQueries);
|
||||||
|
}
|
||||||
|
|
||||||
|
await saveResearchResult('web_search_renewable_energy', result);
|
||||||
|
|
||||||
|
expect(result.answer).toBeTruthy();
|
||||||
|
expect(result.answer.toLowerCase()).toInclude('renewable');
|
||||||
|
|
||||||
|
// Check if sources were extracted
|
||||||
|
if (result.sources.length > 0) {
|
||||||
|
console.log('- Example source:', result.sources[0]);
|
||||||
|
expect(result.sources[0]).toHaveProperty('url');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Anthropic Research: should handle deep research queries', async () => {
|
||||||
|
const result = await anthropicProvider.research({
|
||||||
|
query: 'Explain the differences between REST and GraphQL APIs',
|
||||||
|
searchDepth: 'deep'
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log('Anthropic Deep Research:');
|
||||||
|
console.log('- Answer length:', result.answer.length);
|
||||||
|
console.log('- Token usage:', result.metadata?.tokensUsed);
|
||||||
|
|
||||||
|
await saveResearchResult('deep_research_rest_vs_graphql', result);
|
||||||
|
|
||||||
|
expect(result.answer).toBeTruthy();
|
||||||
|
expect(result.answer.length).toBeGreaterThan(300);
|
||||||
|
expect(result.answer.toLowerCase()).toInclude('rest');
|
||||||
|
expect(result.answer.toLowerCase()).toInclude('graphql');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Anthropic Research: should extract citations from response', async () => {
|
||||||
|
const result = await anthropicProvider.research({
|
||||||
|
query: 'What is Docker and how does containerization work?',
|
||||||
|
searchDepth: 'basic',
|
||||||
|
maxSources: 3
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log('Anthropic Citation Extraction:');
|
||||||
|
console.log('- Sources found:', result.sources.length);
|
||||||
|
console.log('- Answer includes Docker:', result.answer.toLowerCase().includes('docker'));
|
||||||
|
|
||||||
|
await saveResearchResult('citation_extraction_docker', result);
|
||||||
|
|
||||||
|
expect(result.answer).toInclude('Docker');
|
||||||
|
|
||||||
|
// Check for URL extraction (both markdown and plain URLs)
|
||||||
|
const hasUrls = result.answer.includes('http') || result.sources.length > 0;
|
||||||
|
console.log('- Contains URLs or sources:', hasUrls);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Anthropic Research: should use domain filtering when configured', async () => {
|
||||||
|
// Create a new provider with domain restrictions
|
||||||
|
const filteredProvider = new smartai.AnthropicProvider({
|
||||||
|
anthropicToken: await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN'),
|
||||||
|
enableWebSearch: true,
|
||||||
|
searchDomainAllowList: ['wikipedia.org', 'docs.microsoft.com'],
|
||||||
|
searchDomainBlockList: ['reddit.com']
|
||||||
|
});
|
||||||
|
|
||||||
|
await filteredProvider.start();
|
||||||
|
|
||||||
|
const result = await filteredProvider.research({
|
||||||
|
query: 'What is JavaScript?',
|
||||||
|
searchDepth: 'basic'
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log('Anthropic Domain Filtering Test:');
|
||||||
|
console.log('- Answer length:', result.answer.length);
|
||||||
|
console.log('- Applied domain filters (allow: wikipedia, docs.microsoft)');
|
||||||
|
|
||||||
|
await saveResearchResult('domain_filtering_javascript', result);
|
||||||
|
|
||||||
|
expect(result.answer).toBeTruthy();
|
||||||
|
expect(result.answer.toLowerCase()).toInclude('javascript');
|
||||||
|
|
||||||
|
await filteredProvider.stop();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Anthropic Research: should handle errors gracefully', async () => {
|
||||||
|
let errorCaught = false;
|
||||||
|
|
||||||
|
try {
|
||||||
|
await anthropicProvider.research({
|
||||||
|
query: '', // Empty query
|
||||||
|
searchDepth: 'basic'
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
errorCaught = true;
|
||||||
|
console.log('Expected error for empty query:', error.message.substring(0, 100));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Anthropic might handle empty queries differently
|
||||||
|
console.log(`Empty query error test - Error caught: ${errorCaught}`);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Anthropic Research: should handle different search depths', async () => {
|
||||||
|
// Test basic search depth
|
||||||
|
const basicResult = await anthropicProvider.research({
|
||||||
|
query: 'What is Python?',
|
||||||
|
searchDepth: 'basic'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Test advanced search depth
|
||||||
|
const advancedResult = await anthropicProvider.research({
|
||||||
|
query: 'What is Python?',
|
||||||
|
searchDepth: 'advanced'
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log('Anthropic Search Depth Comparison:');
|
||||||
|
console.log('- Basic answer length:', basicResult.answer.length);
|
||||||
|
console.log('- Advanced answer length:', advancedResult.answer.length);
|
||||||
|
console.log('- Basic tokens:', basicResult.metadata?.tokensUsed);
|
||||||
|
console.log('- Advanced tokens:', advancedResult.metadata?.tokensUsed);
|
||||||
|
|
||||||
|
await saveResearchResult('search_depth_python_basic', basicResult);
|
||||||
|
await saveResearchResult('search_depth_python_advanced', advancedResult);
|
||||||
|
|
||||||
|
expect(basicResult.answer).toBeTruthy();
|
||||||
|
expect(advancedResult.answer).toBeTruthy();
|
||||||
|
|
||||||
|
// Advanced search typically produces longer answers
|
||||||
|
// But this isn't guaranteed, so we just check they exist
|
||||||
|
expect(basicResult.answer.toLowerCase()).toInclude('python');
|
||||||
|
expect(advancedResult.answer.toLowerCase()).toInclude('python');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Anthropic Research: ARM vs. Qualcomm comparison', async () => {
|
||||||
|
const result = await anthropicProvider.research({
|
||||||
|
query: 'Compare ARM and Qualcomm: their technologies, market positions, and recent developments in the mobile and computing sectors',
|
||||||
|
searchDepth: 'advanced',
|
||||||
|
includeWebSearch: true,
|
||||||
|
maxSources: 10
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log('ARM vs. Qualcomm Research:');
|
||||||
|
console.log('- Answer length:', result.answer.length);
|
||||||
|
console.log('- Sources found:', result.sources.length);
|
||||||
|
console.log('- First 300 chars:', result.answer.substring(0, 300));
|
||||||
|
|
||||||
|
await saveResearchResult('arm_vs_qualcomm_comparison', result);
|
||||||
|
|
||||||
|
expect(result.answer).toBeTruthy();
|
||||||
|
expect(result.answer.length).toBeGreaterThan(500);
|
||||||
|
expect(result.answer.toLowerCase()).toInclude('arm');
|
||||||
|
expect(result.answer.toLowerCase()).toInclude('qualcomm');
|
||||||
|
expect(result.sources.length).toBeGreaterThan(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Anthropic Research: should clean up provider', async () => {
|
||||||
|
await anthropicProvider.stop();
|
||||||
|
console.log('Anthropic research provider stopped successfully');
|
||||||
|
});
|
||||||
|
|
||||||
|
export default tap.start();
|
172
test/test.research.openai.ts
Normal file
172
test/test.research.openai.ts
Normal file
@@ -0,0 +1,172 @@
|
|||||||
|
import { expect, tap } from '@push.rocks/tapbundle';
|
||||||
|
import * as qenv from '@push.rocks/qenv';
|
||||||
|
import * as smartai from '../ts/index.js';
|
||||||
|
import * as path from 'path';
|
||||||
|
import { promises as fs } from 'fs';
|
||||||
|
|
||||||
|
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||||
|
|
||||||
|
// Helper function to save research results
|
||||||
|
async function saveResearchResult(testName: string, result: any) {
|
||||||
|
const sanitizedName = testName.replace(/[^a-z0-9]/gi, '_').toLowerCase();
|
||||||
|
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
|
||||||
|
const filename = `openai_${sanitizedName}_${timestamp}.json`;
|
||||||
|
const filepath = path.join('.nogit', 'testresults', 'research', filename);
|
||||||
|
|
||||||
|
await fs.mkdir(path.dirname(filepath), { recursive: true });
|
||||||
|
await fs.writeFile(filepath, JSON.stringify(result, null, 2), 'utf-8');
|
||||||
|
|
||||||
|
console.log(` 💾 Saved to: ${filepath}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
let openaiProvider: smartai.OpenAiProvider;
|
||||||
|
|
||||||
|
tap.test('OpenAI Research: should initialize provider with research capabilities', async () => {
|
||||||
|
openaiProvider = new smartai.OpenAiProvider({
|
||||||
|
openaiToken: await testQenv.getEnvVarOnDemand('OPENAI_TOKEN'),
|
||||||
|
researchModel: 'o4-mini-deep-research-2025-06-26',
|
||||||
|
enableWebSearch: true
|
||||||
|
});
|
||||||
|
|
||||||
|
await openaiProvider.start();
|
||||||
|
expect(openaiProvider).toBeInstanceOf(smartai.OpenAiProvider);
|
||||||
|
expect(typeof openaiProvider.research).toEqual('function');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('OpenAI Research: should perform basic research query', async () => {
|
||||||
|
const result = await openaiProvider.research({
|
||||||
|
query: 'What is TypeScript and why is it useful for web development?',
|
||||||
|
searchDepth: 'basic'
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log('OpenAI Basic Research:');
|
||||||
|
console.log('- Answer length:', result.answer.length);
|
||||||
|
console.log('- Sources found:', result.sources.length);
|
||||||
|
console.log('- First 200 chars:', result.answer.substring(0, 200));
|
||||||
|
|
||||||
|
await saveResearchResult('basic_research_typescript', result);
|
||||||
|
|
||||||
|
expect(result).toBeTruthy();
|
||||||
|
expect(result.answer).toBeTruthy();
|
||||||
|
expect(result.answer.toLowerCase()).toInclude('typescript');
|
||||||
|
expect(result.sources).toBeArray();
|
||||||
|
expect(result.metadata).toBeTruthy();
|
||||||
|
expect(result.metadata.model).toBeTruthy();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('OpenAI Research: should perform research with web search enabled', async () => {
|
||||||
|
const result = await openaiProvider.research({
|
||||||
|
query: 'What are the latest features in ECMAScript 2024?',
|
||||||
|
searchDepth: 'advanced',
|
||||||
|
includeWebSearch: true,
|
||||||
|
maxSources: 5
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log('OpenAI Web Search Research:');
|
||||||
|
console.log('- Answer length:', result.answer.length);
|
||||||
|
console.log('- Sources:', result.sources.length);
|
||||||
|
if (result.searchQueries) {
|
||||||
|
console.log('- Search queries used:', result.searchQueries);
|
||||||
|
}
|
||||||
|
|
||||||
|
await saveResearchResult('web_search_ecmascript', result);
|
||||||
|
|
||||||
|
expect(result.answer).toBeTruthy();
|
||||||
|
expect(result.answer.toLowerCase()).toInclude('ecmascript');
|
||||||
|
|
||||||
|
// The model might include sources or search queries
|
||||||
|
if (result.sources.length > 0) {
|
||||||
|
expect(result.sources[0]).toHaveProperty('url');
|
||||||
|
expect(result.sources[0]).toHaveProperty('title');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('OpenAI Research: should handle deep research for complex topics', async () => {
|
||||||
|
// Skip this test if it takes too long or costs too much
|
||||||
|
// You can enable it for thorough testing
|
||||||
|
const skipDeepResearch = true;
|
||||||
|
|
||||||
|
if (skipDeepResearch) {
|
||||||
|
console.log('Skipping deep research test to save API costs');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await openaiProvider.research({
|
||||||
|
query: 'Compare the pros and cons of microservices vs monolithic architecture',
|
||||||
|
searchDepth: 'deep',
|
||||||
|
includeWebSearch: true
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log('OpenAI Deep Research:');
|
||||||
|
console.log('- Answer length:', result.answer.length);
|
||||||
|
console.log('- Token usage:', result.metadata?.tokensUsed);
|
||||||
|
|
||||||
|
expect(result.answer).toBeTruthy();
|
||||||
|
expect(result.answer.length).toBeGreaterThan(500);
|
||||||
|
expect(result.answer.toLowerCase()).toInclude('microservices');
|
||||||
|
expect(result.answer.toLowerCase()).toInclude('monolithic');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('OpenAI Research: should extract sources from markdown links', async () => {
|
||||||
|
const result = await openaiProvider.research({
|
||||||
|
query: 'What is Node.js and provide some official documentation links?',
|
||||||
|
searchDepth: 'basic',
|
||||||
|
maxSources: 3
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log('OpenAI Source Extraction:');
|
||||||
|
console.log('- Sources found:', result.sources.length);
|
||||||
|
|
||||||
|
await saveResearchResult('source_extraction_nodejs', result);
|
||||||
|
|
||||||
|
if (result.sources.length > 0) {
|
||||||
|
console.log('- Example source:', result.sources[0]);
|
||||||
|
expect(result.sources[0].url).toBeTruthy();
|
||||||
|
expect(result.sources[0].title).toBeTruthy();
|
||||||
|
}
|
||||||
|
|
||||||
|
expect(result.answer).toInclude('Node.js');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('OpenAI Research: should handle research errors gracefully', async () => {
|
||||||
|
// Test with an extremely long query that might cause issues
|
||||||
|
const longQuery = 'a'.repeat(10000);
|
||||||
|
|
||||||
|
let errorCaught = false;
|
||||||
|
try {
|
||||||
|
await openaiProvider.research({
|
||||||
|
query: longQuery,
|
||||||
|
searchDepth: 'basic'
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
errorCaught = true;
|
||||||
|
console.log('Expected error for long query:', error.message.substring(0, 100));
|
||||||
|
expect(error.message).toBeTruthy();
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenAI might handle long queries, so we don't assert the error
|
||||||
|
console.log(`Long query error test - Error caught: ${errorCaught}`);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('OpenAI Research: should respect maxSources parameter', async () => {
|
||||||
|
const maxSources = 3;
|
||||||
|
const result = await openaiProvider.research({
|
||||||
|
query: 'List popular JavaScript frameworks',
|
||||||
|
searchDepth: 'basic',
|
||||||
|
maxSources: maxSources
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`OpenAI Max Sources Test - Requested: ${maxSources}, Found: ${result.sources.length}`);
|
||||||
|
|
||||||
|
// The API might not always return exactly maxSources, but should respect it as a limit
|
||||||
|
if (result.sources.length > 0) {
|
||||||
|
expect(result.sources.length).toBeLessThanOrEqual(maxSources * 2); // Allow some flexibility
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('OpenAI Research: should clean up provider', async () => {
|
||||||
|
await openaiProvider.stop();
|
||||||
|
console.log('OpenAI research provider stopped successfully');
|
||||||
|
});
|
||||||
|
|
||||||
|
export default tap.start();
|
80
test/test.research.stubs.ts
Normal file
80
test/test.research.stubs.ts
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
import { tap, expect } from '@push.rocks/tapbundle';
|
||||||
|
import * as smartai from '../ts/index.js';
|
||||||
|
|
||||||
|
// Test research method stubs for providers without full implementation
|
||||||
|
// These providers have research methods that throw "not yet supported" errors
|
||||||
|
|
||||||
|
tap.test('Research Stubs: Perplexity provider should have research method', async () => {
|
||||||
|
const perplexityProvider = new smartai.PerplexityProvider({
|
||||||
|
perplexityToken: 'test-token'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Perplexity has a basic implementation with Sonar models
|
||||||
|
expect(typeof perplexityProvider.research).toEqual('function');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Research Stubs: Groq provider should throw not supported error', async () => {
|
||||||
|
const groqProvider = new smartai.GroqProvider({
|
||||||
|
groqToken: 'test-token'
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(typeof groqProvider.research).toEqual('function');
|
||||||
|
|
||||||
|
let errorCaught = false;
|
||||||
|
try {
|
||||||
|
await groqProvider.research({ query: 'test' });
|
||||||
|
} catch (error) {
|
||||||
|
errorCaught = true;
|
||||||
|
expect(error.message).toInclude('not yet supported');
|
||||||
|
}
|
||||||
|
expect(errorCaught).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Research Stubs: Ollama provider should throw not supported error', async () => {
|
||||||
|
const ollamaProvider = new smartai.OllamaProvider({});
|
||||||
|
|
||||||
|
expect(typeof ollamaProvider.research).toEqual('function');
|
||||||
|
|
||||||
|
let errorCaught = false;
|
||||||
|
try {
|
||||||
|
await ollamaProvider.research({ query: 'test' });
|
||||||
|
} catch (error) {
|
||||||
|
errorCaught = true;
|
||||||
|
expect(error.message).toInclude('not yet supported');
|
||||||
|
}
|
||||||
|
expect(errorCaught).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Research Stubs: xAI provider should throw not supported error', async () => {
|
||||||
|
const xaiProvider = new smartai.XAIProvider({
|
||||||
|
xaiToken: 'test-token'
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(typeof xaiProvider.research).toEqual('function');
|
||||||
|
|
||||||
|
let errorCaught = false;
|
||||||
|
try {
|
||||||
|
await xaiProvider.research({ query: 'test' });
|
||||||
|
} catch (error) {
|
||||||
|
errorCaught = true;
|
||||||
|
expect(error.message).toInclude('not yet supported');
|
||||||
|
}
|
||||||
|
expect(errorCaught).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Research Stubs: Exo provider should throw not supported error', async () => {
|
||||||
|
const exoProvider = new smartai.ExoProvider({});
|
||||||
|
|
||||||
|
expect(typeof exoProvider.research).toEqual('function');
|
||||||
|
|
||||||
|
let errorCaught = false;
|
||||||
|
try {
|
||||||
|
await exoProvider.research({ query: 'test' });
|
||||||
|
} catch (error) {
|
||||||
|
errorCaught = true;
|
||||||
|
expect(error.message).toInclude('not yet supported');
|
||||||
|
}
|
||||||
|
expect(errorCaught).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
export default tap.start();
|
36
test/testimages/coffee-dani/README.md
Normal file
36
test/testimages/coffee-dani/README.md
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
# Coffee Image Attribution
|
||||||
|
|
||||||
|
## coffee.jpg
|
||||||
|
|
||||||
|
**Photographer:** Dani (@frokz)
|
||||||
|
**Source URL:** https://unsplash.com/photos/cup-of-coffee-on-saucer-ZLqxSzvVr7I
|
||||||
|
**Direct Link:** https://images.unsplash.com/photo-1506372023823-741c83b836fe
|
||||||
|
|
||||||
|
### Metadata
|
||||||
|
- **Title:** Cup of coffee on saucer
|
||||||
|
- **Description:** One of many coffee-moments in my life ;)
|
||||||
|
- **Date Published:** September 25, 2017
|
||||||
|
- **Location:** Stockholm, Sweden
|
||||||
|
- **Tags:** coffee, cafe, heart, coffee cup, cup, barista, latte, mug, saucer, food, sweden, stockholm
|
||||||
|
|
||||||
|
### License
|
||||||
|
**Unsplash License** - Free to use
|
||||||
|
- ✅ Commercial and non-commercial use
|
||||||
|
- ✅ No permission needed
|
||||||
|
- ❌ Cannot be sold without significant modification
|
||||||
|
- ❌ Cannot be used to replicate Unsplash or similar service
|
||||||
|
|
||||||
|
Full license: https://unsplash.com/license
|
||||||
|
|
||||||
|
### Usage in This Project
|
||||||
|
This image is used for testing vision/image processing capabilities in the SmartAI library test suite, specifically for:
|
||||||
|
- Testing coffee/beverage recognition
|
||||||
|
- Latte art pattern detection (heart shape)
|
||||||
|
- Scene/environment analysis
|
||||||
|
- Multi-element image understanding (cup, saucer, table)
|
||||||
|
|
||||||
|
### Download Information
|
||||||
|
- **Downloaded:** September 28, 2025
|
||||||
|
- **Original Filename:** dani-ZLqxSzvVr7I-unsplash.jpg
|
||||||
|
- **Resolution:** High resolution (3.7 MB)
|
||||||
|
- **Format:** JPEG
|
BIN
test/testimages/coffee-dani/coffee.jpg
Normal file
BIN
test/testimages/coffee-dani/coffee.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 3.7 MiB |
40
test/testimages/laptop-nicolas/README.md
Normal file
40
test/testimages/laptop-nicolas/README.md
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
# Laptop Image Attribution
|
||||||
|
|
||||||
|
## laptop.jpg
|
||||||
|
|
||||||
|
**Photographer:** Nicolas Bichon (@nicol3a)
|
||||||
|
**Source URL:** https://unsplash.com/photos/a-laptop-computer-sitting-on-top-of-a-wooden-desk-ZhV4iqAXxyA
|
||||||
|
**Direct Link:** https://images.unsplash.com/photo-1704230972797-e0e3aba0fce7
|
||||||
|
|
||||||
|
### Metadata
|
||||||
|
- **Title:** A laptop computer sitting on top of a wooden desk
|
||||||
|
- **Description:** Lifestyle photo I took for my indie app Type, a macOS app to take notes without interrupting your flow. https://usetype.app.
|
||||||
|
- **Date Published:** January 2, 2024
|
||||||
|
- **Camera:** FUJIFILM, X-T20
|
||||||
|
- **Tags:** computer, laptop, mac, keyboard, computer keyboard, computer hardware, furniture, table, electronics, screen, monitor, hardware, display, tabletop, lcd screen, digital display
|
||||||
|
|
||||||
|
### Statistics
|
||||||
|
- **Views:** 183,020
|
||||||
|
- **Downloads:** 757
|
||||||
|
|
||||||
|
### License
|
||||||
|
**Unsplash License** - Free to use
|
||||||
|
- ✅ Commercial and non-commercial use
|
||||||
|
- ✅ No permission needed
|
||||||
|
- ❌ Cannot be sold without significant modification
|
||||||
|
- ❌ Cannot be used to replicate Unsplash or similar service
|
||||||
|
|
||||||
|
Full license: https://unsplash.com/license
|
||||||
|
|
||||||
|
### Usage in This Project
|
||||||
|
This image is used for testing vision/image processing capabilities in the SmartAI library test suite, specifically for:
|
||||||
|
- Testing technology/computer equipment recognition
|
||||||
|
- Workspace/office environment analysis
|
||||||
|
- Object detection (laptop, keyboard, monitor, table)
|
||||||
|
- Scene understanding and context analysis
|
||||||
|
|
||||||
|
### Download Information
|
||||||
|
- **Downloaded:** September 28, 2025
|
||||||
|
- **Original Filename:** nicolas-bichon-ZhV4iqAXxyA-unsplash.jpg
|
||||||
|
- **Resolution:** High resolution (1.8 MB)
|
||||||
|
- **Format:** JPEG
|
BIN
test/testimages/laptop-nicolas/laptop.jpg
Normal file
BIN
test/testimages/laptop-nicolas/laptop.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 1.8 MiB |
40
test/testimages/receipt-annie/README.md
Normal file
40
test/testimages/receipt-annie/README.md
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
# Receipt Image Attribution
|
||||||
|
|
||||||
|
## receipt.jpg
|
||||||
|
|
||||||
|
**Photographer:** Annie Spratt (@anniespratt)
|
||||||
|
**Source URL:** https://unsplash.com/photos/a-receipt-sitting-on-top-of-a-wooden-table-recgFWxDO1Y
|
||||||
|
**Direct Link:** https://images.unsplash.com/photo-1731686602391-7484df33a03c
|
||||||
|
|
||||||
|
### Metadata
|
||||||
|
- **Title:** A receipt sitting on top of a wooden table
|
||||||
|
- **Description:** Download this free HD photo of text, document, invoice, and receipt by Annie Spratt
|
||||||
|
- **Date Published:** November 15, 2024
|
||||||
|
- **Tags:** text, document, invoice, receipt, diaper
|
||||||
|
|
||||||
|
### Statistics
|
||||||
|
- **Views:** 54,593
|
||||||
|
- **Downloads:** 764
|
||||||
|
|
||||||
|
### License
|
||||||
|
**Unsplash License** - Free to use
|
||||||
|
- ✅ Commercial and non-commercial use
|
||||||
|
- ✅ No permission needed
|
||||||
|
- ❌ Cannot be sold without significant modification
|
||||||
|
- ❌ Cannot be used to replicate Unsplash or similar service
|
||||||
|
|
||||||
|
Full license: https://unsplash.com/license
|
||||||
|
|
||||||
|
### Usage in This Project
|
||||||
|
This image is used for testing vision/image processing capabilities in the SmartAI library test suite, specifically for:
|
||||||
|
- Testing text extraction and OCR capabilities
|
||||||
|
- Document recognition and classification
|
||||||
|
- Receipt/invoice analysis
|
||||||
|
- Text-heavy image understanding
|
||||||
|
- Structured data extraction from documents
|
||||||
|
|
||||||
|
### Download Information
|
||||||
|
- **Downloaded:** September 28, 2025
|
||||||
|
- **Original Filename:** annie-spratt-recgFWxDO1Y-unsplash.jpg
|
||||||
|
- **Resolution:** High resolution (3.3 MB)
|
||||||
|
- **Format:** JPEG
|
BIN
test/testimages/receipt-annie/receipt.jpg
Normal file
BIN
test/testimages/receipt-annie/receipt.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 3.3 MiB |
@@ -1,8 +1,8 @@
|
|||||||
/**
|
/**
|
||||||
* autocreated commitinfo by @pushrocks/commitinfo
|
* autocreated commitinfo by @push.rocks/commitinfo
|
||||||
*/
|
*/
|
||||||
export const commitinfo = {
|
export const commitinfo = {
|
||||||
name: '@push.rocks/smartai',
|
name: '@push.rocks/smartai',
|
||||||
version: '0.0.15',
|
version: '0.7.1',
|
||||||
description: 'A TypeScript library for integrating and interacting with multiple AI models, offering capabilities for chat and potentially audio responses.'
|
description: 'SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.'
|
||||||
}
|
}
|
||||||
|
@@ -1,32 +1,204 @@
|
|||||||
|
import * as plugins from './plugins.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Message format for chat interactions
|
||||||
|
*/
|
||||||
|
export interface ChatMessage {
|
||||||
|
role: 'assistant' | 'user' | 'system';
|
||||||
|
content: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Options for chat interactions
|
||||||
|
*/
|
||||||
|
export interface ChatOptions {
|
||||||
|
systemMessage: string;
|
||||||
|
userMessage: string;
|
||||||
|
messageHistory: ChatMessage[];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Response format for chat interactions
|
||||||
|
*/
|
||||||
|
export interface ChatResponse {
|
||||||
|
role: 'assistant';
|
||||||
|
message: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Options for research interactions
|
||||||
|
*/
|
||||||
|
export interface ResearchOptions {
|
||||||
|
query: string;
|
||||||
|
searchDepth?: 'basic' | 'advanced' | 'deep';
|
||||||
|
maxSources?: number;
|
||||||
|
includeWebSearch?: boolean;
|
||||||
|
background?: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Response format for research interactions
|
||||||
|
*/
|
||||||
|
export interface ResearchResponse {
|
||||||
|
answer: string;
|
||||||
|
sources: Array<{
|
||||||
|
url: string;
|
||||||
|
title: string;
|
||||||
|
snippet: string;
|
||||||
|
}>;
|
||||||
|
searchQueries?: string[];
|
||||||
|
metadata?: any;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Options for image generation
|
||||||
|
*/
|
||||||
|
export interface ImageGenerateOptions {
|
||||||
|
prompt: string;
|
||||||
|
model?: 'gpt-image-1' | 'dall-e-3' | 'dall-e-2';
|
||||||
|
quality?: 'low' | 'medium' | 'high' | 'standard' | 'hd' | 'auto';
|
||||||
|
size?: '256x256' | '512x512' | '1024x1024' | '1536x1024' | '1024x1536' | '1792x1024' | '1024x1792' | 'auto';
|
||||||
|
style?: 'vivid' | 'natural';
|
||||||
|
background?: 'transparent' | 'opaque' | 'auto';
|
||||||
|
outputFormat?: 'png' | 'jpeg' | 'webp';
|
||||||
|
outputCompression?: number; // 0-100 for webp/jpeg
|
||||||
|
moderation?: 'low' | 'auto';
|
||||||
|
n?: number; // Number of images to generate
|
||||||
|
stream?: boolean;
|
||||||
|
partialImages?: number; // 0-3 for streaming
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Options for image editing
|
||||||
|
*/
|
||||||
|
export interface ImageEditOptions {
|
||||||
|
image: Buffer;
|
||||||
|
prompt: string;
|
||||||
|
mask?: Buffer;
|
||||||
|
model?: 'gpt-image-1' | 'dall-e-2';
|
||||||
|
quality?: 'low' | 'medium' | 'high' | 'standard' | 'auto';
|
||||||
|
size?: '256x256' | '512x512' | '1024x1024' | '1536x1024' | '1024x1536' | 'auto';
|
||||||
|
background?: 'transparent' | 'opaque' | 'auto';
|
||||||
|
outputFormat?: 'png' | 'jpeg' | 'webp';
|
||||||
|
outputCompression?: number;
|
||||||
|
n?: number;
|
||||||
|
stream?: boolean;
|
||||||
|
partialImages?: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Response format for image operations
|
||||||
|
*/
|
||||||
|
export interface ImageResponse {
|
||||||
|
images: Array<{
|
||||||
|
b64_json?: string;
|
||||||
|
url?: string;
|
||||||
|
revisedPrompt?: string;
|
||||||
|
}>;
|
||||||
|
metadata?: {
|
||||||
|
model: string;
|
||||||
|
quality?: string;
|
||||||
|
size?: string;
|
||||||
|
outputFormat?: string;
|
||||||
|
tokensUsed?: number;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Abstract base class for multi-modal AI models.
|
||||||
|
* Provides a common interface for different AI providers (OpenAI, Anthropic, Perplexity, Ollama)
|
||||||
|
*/
|
||||||
export abstract class MultiModalModel {
|
export abstract class MultiModalModel {
|
||||||
/**
|
/**
|
||||||
* starts the model
|
* SmartPdf instance for document processing
|
||||||
|
* Shared across all methods that need PDF functionality
|
||||||
*/
|
*/
|
||||||
abstract start(): Promise<void>;
|
protected smartpdfInstance: plugins.smartpdf.SmartPdf;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* stops the model
|
* Initializes the model and any necessary resources
|
||||||
|
* Should be called before using any other methods
|
||||||
*/
|
*/
|
||||||
abstract stop(): Promise<void>;
|
public async start(): Promise<void> {
|
||||||
|
this.smartpdfInstance = new plugins.smartpdf.SmartPdf();
|
||||||
public abstract chat(optionsArg: {
|
await this.smartpdfInstance.start();
|
||||||
systemMessage: string,
|
}
|
||||||
userMessage: string,
|
|
||||||
messageHistory: {
|
|
||||||
role: 'assistant' | 'user';
|
|
||||||
content: string;
|
|
||||||
}[]
|
|
||||||
}): Promise<{
|
|
||||||
role: 'assistant';
|
|
||||||
message: string;
|
|
||||||
}>
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Defines a streaming interface for chat interactions.
|
* Cleans up any resources used by the model
|
||||||
* The implementation will vary based on the specific AI model.
|
* Should be called when the model is no longer needed
|
||||||
* @param input
|
|
||||||
*/
|
*/
|
||||||
public abstract chatStream(input: ReadableStream<string>): Promise<ReadableStream<string>>;
|
public async stop(): Promise<void> {
|
||||||
|
if (this.smartpdfInstance) {
|
||||||
|
await this.smartpdfInstance.stop();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Synchronous chat interaction with the model
|
||||||
|
* @param optionsArg Options containing system message, user message, and message history
|
||||||
|
* @returns Promise resolving to the assistant's response
|
||||||
|
*/
|
||||||
|
public abstract chat(optionsArg: ChatOptions): Promise<ChatResponse>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Streaming interface for chat interactions
|
||||||
|
* Allows for real-time responses from the model
|
||||||
|
* @param input Stream of user messages
|
||||||
|
* @returns Stream of model responses
|
||||||
|
*/
|
||||||
|
public abstract chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Text-to-speech conversion
|
||||||
|
* @param optionsArg Options containing the message to convert to speech
|
||||||
|
* @returns Promise resolving to a readable stream of audio data
|
||||||
|
* @throws Error if the provider doesn't support audio generation
|
||||||
|
*/
|
||||||
|
public abstract audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Vision-language processing
|
||||||
|
* @param optionsArg Options containing the image and prompt for analysis
|
||||||
|
* @returns Promise resolving to the model's description or analysis of the image
|
||||||
|
* @throws Error if the provider doesn't support vision tasks
|
||||||
|
*/
|
||||||
|
public abstract vision(optionsArg: { image: Buffer; prompt: string }): Promise<string>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Document analysis and processing
|
||||||
|
* @param optionsArg Options containing system message, user message, PDF documents, and message history
|
||||||
|
* @returns Promise resolving to the model's analysis of the documents
|
||||||
|
* @throws Error if the provider doesn't support document processing
|
||||||
|
*/
|
||||||
|
public abstract document(optionsArg: {
|
||||||
|
systemMessage: string;
|
||||||
|
userMessage: string;
|
||||||
|
pdfDocuments: Uint8Array[];
|
||||||
|
messageHistory: ChatMessage[];
|
||||||
|
}): Promise<{ message: any }>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Research and web search capabilities
|
||||||
|
* @param optionsArg Options containing the research query and configuration
|
||||||
|
* @returns Promise resolving to the research results with sources
|
||||||
|
* @throws Error if the provider doesn't support research capabilities
|
||||||
|
*/
|
||||||
|
public abstract research(optionsArg: ResearchOptions): Promise<ResearchResponse>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Image generation from text prompts
|
||||||
|
* @param optionsArg Options containing the prompt and generation parameters
|
||||||
|
* @returns Promise resolving to the generated image(s)
|
||||||
|
* @throws Error if the provider doesn't support image generation
|
||||||
|
*/
|
||||||
|
public abstract imageGenerate(optionsArg: ImageGenerateOptions): Promise<ImageResponse>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Image editing and inpainting
|
||||||
|
* @param optionsArg Options containing the image, prompt, and editing parameters
|
||||||
|
* @returns Promise resolving to the edited image(s)
|
||||||
|
* @throws Error if the provider doesn't support image editing
|
||||||
|
*/
|
||||||
|
public abstract imageEdit(optionsArg: ImageEditOptions): Promise<ImageResponse>;
|
||||||
}
|
}
|
||||||
|
@@ -48,6 +48,18 @@ export class Conversation {
|
|||||||
return conversation;
|
return conversation;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static async createWithExo(smartaiRefArg: SmartAi) {
|
||||||
|
if (!smartaiRefArg.exoProvider) {
|
||||||
|
throw new Error('Exo provider not available');
|
||||||
|
}
|
||||||
|
const conversation = new Conversation(smartaiRefArg, {
|
||||||
|
processFunction: async (input) => {
|
||||||
|
return '' // TODO implement proper streaming
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return conversation;
|
||||||
|
}
|
||||||
|
|
||||||
public static async createWithOllama(smartaiRefArg: SmartAi) {
|
public static async createWithOllama(smartaiRefArg: SmartAi) {
|
||||||
if (!smartaiRefArg.ollamaProvider) {
|
if (!smartaiRefArg.ollamaProvider) {
|
||||||
throw new Error('Ollama provider not available');
|
throw new Error('Ollama provider not available');
|
||||||
@@ -60,6 +72,30 @@ export class Conversation {
|
|||||||
return conversation;
|
return conversation;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static async createWithGroq(smartaiRefArg: SmartAi) {
|
||||||
|
if (!smartaiRefArg.groqProvider) {
|
||||||
|
throw new Error('Groq provider not available');
|
||||||
|
}
|
||||||
|
const conversation = new Conversation(smartaiRefArg, {
|
||||||
|
processFunction: async (input) => {
|
||||||
|
return '' // TODO implement proper streaming
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return conversation;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static async createWithXai(smartaiRefArg: SmartAi) {
|
||||||
|
if (!smartaiRefArg.xaiProvider) {
|
||||||
|
throw new Error('XAI provider not available');
|
||||||
|
}
|
||||||
|
const conversation = new Conversation(smartaiRefArg, {
|
||||||
|
processFunction: async (input) => {
|
||||||
|
return '' // TODO implement proper streaming
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return conversation;
|
||||||
|
}
|
||||||
|
|
||||||
// INSTANCE
|
// INSTANCE
|
||||||
smartaiRef: SmartAi
|
smartaiRef: SmartAi
|
||||||
private systemMessage: string;
|
private systemMessage: string;
|
||||||
|
@@ -1,18 +1,32 @@
|
|||||||
import { Conversation } from './classes.conversation.js';
|
import { Conversation } from './classes.conversation.js';
|
||||||
import * as plugins from './plugins.js';
|
import * as plugins from './plugins.js';
|
||||||
import { AnthropicProvider } from './provider.anthropic.js';
|
import { AnthropicProvider } from './provider.anthropic.js';
|
||||||
import type { OllamaProvider } from './provider.ollama.js';
|
import { OllamaProvider } from './provider.ollama.js';
|
||||||
import { OpenAiProvider } from './provider.openai.js';
|
import { OpenAiProvider } from './provider.openai.js';
|
||||||
import type { PerplexityProvider } from './provider.perplexity.js';
|
import { PerplexityProvider } from './provider.perplexity.js';
|
||||||
|
import { ExoProvider } from './provider.exo.js';
|
||||||
|
import { GroqProvider } from './provider.groq.js';
|
||||||
|
import { XAIProvider } from './provider.xai.js';
|
||||||
|
|
||||||
|
|
||||||
export interface ISmartAiOptions {
|
export interface ISmartAiOptions {
|
||||||
openaiToken?: string;
|
openaiToken?: string;
|
||||||
anthropicToken?: string;
|
anthropicToken?: string;
|
||||||
perplexityToken?: string;
|
perplexityToken?: string;
|
||||||
|
groqToken?: string;
|
||||||
|
xaiToken?: string;
|
||||||
|
exo?: {
|
||||||
|
baseUrl?: string;
|
||||||
|
apiKey?: string;
|
||||||
|
};
|
||||||
|
ollama?: {
|
||||||
|
baseUrl?: string;
|
||||||
|
model?: string;
|
||||||
|
visionModel?: string;
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
export type TProvider = 'openai' | 'anthropic' | 'perplexity' | 'ollama';
|
export type TProvider = 'openai' | 'anthropic' | 'perplexity' | 'ollama' | 'exo' | 'groq' | 'xai';
|
||||||
|
|
||||||
export class SmartAi {
|
export class SmartAi {
|
||||||
public options: ISmartAiOptions;
|
public options: ISmartAiOptions;
|
||||||
@@ -21,6 +35,9 @@ export class SmartAi {
|
|||||||
public anthropicProvider: AnthropicProvider;
|
public anthropicProvider: AnthropicProvider;
|
||||||
public perplexityProvider: PerplexityProvider;
|
public perplexityProvider: PerplexityProvider;
|
||||||
public ollamaProvider: OllamaProvider;
|
public ollamaProvider: OllamaProvider;
|
||||||
|
public exoProvider: ExoProvider;
|
||||||
|
public groqProvider: GroqProvider;
|
||||||
|
public xaiProvider: XAIProvider;
|
||||||
|
|
||||||
constructor(optionsArg: ISmartAiOptions) {
|
constructor(optionsArg: ISmartAiOptions) {
|
||||||
this.options = optionsArg;
|
this.options = optionsArg;
|
||||||
@@ -37,16 +54,74 @@ export class SmartAi {
|
|||||||
this.anthropicProvider = new AnthropicProvider({
|
this.anthropicProvider = new AnthropicProvider({
|
||||||
anthropicToken: this.options.anthropicToken,
|
anthropicToken: this.options.anthropicToken,
|
||||||
});
|
});
|
||||||
|
await this.anthropicProvider.start();
|
||||||
|
}
|
||||||
|
if (this.options.perplexityToken) {
|
||||||
|
this.perplexityProvider = new PerplexityProvider({
|
||||||
|
perplexityToken: this.options.perplexityToken,
|
||||||
|
});
|
||||||
|
await this.perplexityProvider.start();
|
||||||
|
}
|
||||||
|
if (this.options.groqToken) {
|
||||||
|
this.groqProvider = new GroqProvider({
|
||||||
|
groqToken: this.options.groqToken,
|
||||||
|
});
|
||||||
|
await this.groqProvider.start();
|
||||||
|
}
|
||||||
|
if (this.options.xaiToken) {
|
||||||
|
this.xaiProvider = new XAIProvider({
|
||||||
|
xaiToken: this.options.xaiToken,
|
||||||
|
});
|
||||||
|
await this.xaiProvider.start();
|
||||||
|
}
|
||||||
|
if (this.options.ollama) {
|
||||||
|
this.ollamaProvider = new OllamaProvider({
|
||||||
|
baseUrl: this.options.ollama.baseUrl,
|
||||||
|
model: this.options.ollama.model,
|
||||||
|
visionModel: this.options.ollama.visionModel,
|
||||||
|
});
|
||||||
|
await this.ollamaProvider.start();
|
||||||
|
}
|
||||||
|
if (this.options.exo) {
|
||||||
|
this.exoProvider = new ExoProvider({
|
||||||
|
exoBaseUrl: this.options.exo.baseUrl,
|
||||||
|
apiKey: this.options.exo.apiKey,
|
||||||
|
});
|
||||||
|
await this.exoProvider.start();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public async stop() {}
|
public async stop() {
|
||||||
|
if (this.openaiProvider) {
|
||||||
|
await this.openaiProvider.stop();
|
||||||
|
}
|
||||||
|
if (this.anthropicProvider) {
|
||||||
|
await this.anthropicProvider.stop();
|
||||||
|
}
|
||||||
|
if (this.perplexityProvider) {
|
||||||
|
await this.perplexityProvider.stop();
|
||||||
|
}
|
||||||
|
if (this.groqProvider) {
|
||||||
|
await this.groqProvider.stop();
|
||||||
|
}
|
||||||
|
if (this.xaiProvider) {
|
||||||
|
await this.xaiProvider.stop();
|
||||||
|
}
|
||||||
|
if (this.ollamaProvider) {
|
||||||
|
await this.ollamaProvider.stop();
|
||||||
|
}
|
||||||
|
if (this.exoProvider) {
|
||||||
|
await this.exoProvider.stop();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* create a new conversation
|
* create a new conversation
|
||||||
*/
|
*/
|
||||||
createConversation(provider: TProvider) {
|
createConversation(provider: TProvider) {
|
||||||
switch (provider) {
|
switch (provider) {
|
||||||
|
case 'exo':
|
||||||
|
return Conversation.createWithExo(this);
|
||||||
case 'openai':
|
case 'openai':
|
||||||
return Conversation.createWithOpenAi(this);
|
return Conversation.createWithOpenAi(this);
|
||||||
case 'anthropic':
|
case 'anthropic':
|
||||||
@@ -55,6 +130,10 @@ export class SmartAi {
|
|||||||
return Conversation.createWithPerplexity(this);
|
return Conversation.createWithPerplexity(this);
|
||||||
case 'ollama':
|
case 'ollama':
|
||||||
return Conversation.createWithOllama(this);
|
return Conversation.createWithOllama(this);
|
||||||
|
case 'groq':
|
||||||
|
return Conversation.createWithGroq(this);
|
||||||
|
case 'xai':
|
||||||
|
return Conversation.createWithXai(this);
|
||||||
default:
|
default:
|
||||||
throw new Error('Provider not available');
|
throw new Error('Provider not available');
|
||||||
}
|
}
|
||||||
|
@@ -1,3 +1,9 @@
|
|||||||
export * from './classes.smartai.js';
|
export * from './classes.smartai.js';
|
||||||
export * from './abstract.classes.multimodal.js';
|
export * from './abstract.classes.multimodal.js';
|
||||||
export * from './provider.openai.js';
|
export * from './provider.openai.js';
|
||||||
|
export * from './provider.anthropic.js';
|
||||||
|
export * from './provider.perplexity.js';
|
||||||
|
export * from './provider.groq.js';
|
||||||
|
export * from './provider.ollama.js';
|
||||||
|
export * from './provider.xai.js';
|
||||||
|
export * from './provider.exo.js';
|
||||||
|
@@ -1,9 +1,25 @@
|
|||||||
import * as plugins from './plugins.js';
|
import * as plugins from './plugins.js';
|
||||||
import * as paths from './paths.js';
|
import * as paths from './paths.js';
|
||||||
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
||||||
|
import type {
|
||||||
|
ChatOptions,
|
||||||
|
ChatResponse,
|
||||||
|
ChatMessage,
|
||||||
|
ResearchOptions,
|
||||||
|
ResearchResponse,
|
||||||
|
ImageGenerateOptions,
|
||||||
|
ImageEditOptions,
|
||||||
|
ImageResponse
|
||||||
|
} from './abstract.classes.multimodal.js';
|
||||||
|
import type { ImageBlockParam, TextBlockParam } from '@anthropic-ai/sdk/resources/messages';
|
||||||
|
|
||||||
|
type ContentBlock = ImageBlockParam | TextBlockParam;
|
||||||
|
|
||||||
export interface IAnthropicProviderOptions {
|
export interface IAnthropicProviderOptions {
|
||||||
anthropicToken: string;
|
anthropicToken: string;
|
||||||
|
enableWebSearch?: boolean;
|
||||||
|
searchDomainAllowList?: string[];
|
||||||
|
searchDomainBlockList?: string[];
|
||||||
}
|
}
|
||||||
|
|
||||||
export class AnthropicProvider extends MultiModalModel {
|
export class AnthropicProvider extends MultiModalModel {
|
||||||
@@ -16,47 +32,374 @@ export class AnthropicProvider extends MultiModalModel {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async start() {
|
async start() {
|
||||||
|
await super.start();
|
||||||
this.anthropicApiClient = new plugins.anthropic.default({
|
this.anthropicApiClient = new plugins.anthropic.default({
|
||||||
apiKey: this.options.anthropicToken,
|
apiKey: this.options.anthropicToken,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
async stop() {}
|
async stop() {
|
||||||
|
await super.stop();
|
||||||
|
}
|
||||||
|
|
||||||
public async chatStream(input: ReadableStream<string>): Promise<ReadableStream<string>> {
|
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||||
// TODO: implement for OpenAI
|
// Create a TextDecoder to handle incoming chunks
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
let buffer = '';
|
||||||
|
let currentMessage: { role: string; content: string; } | null = null;
|
||||||
|
|
||||||
const returnStream = new ReadableStream();
|
// Create a TransformStream to process the input
|
||||||
return returnStream;
|
const transform = new TransformStream<Uint8Array, string>({
|
||||||
|
async transform(chunk, controller) {
|
||||||
|
buffer += decoder.decode(chunk, { stream: true });
|
||||||
|
|
||||||
|
// Try to parse complete JSON messages from the buffer
|
||||||
|
while (true) {
|
||||||
|
const newlineIndex = buffer.indexOf('\n');
|
||||||
|
if (newlineIndex === -1) break;
|
||||||
|
|
||||||
|
const line = buffer.slice(0, newlineIndex);
|
||||||
|
buffer = buffer.slice(newlineIndex + 1);
|
||||||
|
|
||||||
|
if (line.trim()) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(line);
|
||||||
|
currentMessage = {
|
||||||
|
role: message.role || 'user',
|
||||||
|
content: message.content || '',
|
||||||
|
};
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse message:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have a complete message, send it to Anthropic
|
||||||
|
if (currentMessage) {
|
||||||
|
const stream = await this.anthropicApiClient.messages.create({
|
||||||
|
model: 'claude-sonnet-4-5-20250929',
|
||||||
|
messages: [{ role: currentMessage.role, content: currentMessage.content }],
|
||||||
|
system: '',
|
||||||
|
stream: true,
|
||||||
|
max_tokens: 4000,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Process each chunk from Anthropic
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
const content = chunk.delta?.text;
|
||||||
|
if (content) {
|
||||||
|
controller.enqueue(content);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
currentMessage = null;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
flush(controller) {
|
||||||
|
if (buffer) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(buffer);
|
||||||
|
controller.enqueue(message.content || '');
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse remaining buffer:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Connect the input to our transform stream
|
||||||
|
return input.pipeThrough(transform);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Implementing the synchronous chat interaction
|
// Implementing the synchronous chat interaction
|
||||||
public async chat(optionsArg: {
|
public async chat(optionsArg: ChatOptions): Promise<ChatResponse> {
|
||||||
systemMessage: string;
|
// Convert message history to Anthropic format
|
||||||
userMessage: string;
|
const messages = optionsArg.messageHistory.map(msg => ({
|
||||||
messageHistory: {
|
role: msg.role === 'assistant' ? 'assistant' as const : 'user' as const,
|
||||||
role: 'assistant' | 'user';
|
content: msg.content
|
||||||
content: string;
|
}));
|
||||||
}[];
|
|
||||||
}) {
|
|
||||||
const result = await this.anthropicApiClient.messages.create({
|
const result = await this.anthropicApiClient.messages.create({
|
||||||
model: 'claude-3-opus-20240229',
|
model: 'claude-sonnet-4-5-20250929',
|
||||||
system: optionsArg.systemMessage,
|
system: optionsArg.systemMessage,
|
||||||
messages: [
|
messages: [
|
||||||
...optionsArg.messageHistory,
|
...messages,
|
||||||
{ role: 'user', content: optionsArg.userMessage },
|
{ role: 'user' as const, content: optionsArg.userMessage }
|
||||||
],
|
],
|
||||||
max_tokens: 4000,
|
max_tokens: 4000,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Extract text content from the response
|
||||||
|
let message = '';
|
||||||
|
for (const block of result.content) {
|
||||||
|
if ('text' in block) {
|
||||||
|
message += block.text;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
role: result.role as 'assistant',
|
role: 'assistant' as const,
|
||||||
message: result.content.join('\n'),
|
message,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
private async audio(messageArg: string) {
|
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
||||||
// Anthropic does not provide an audio API, so this method is not implemented.
|
// Anthropic does not provide an audio API, so this method is not implemented.
|
||||||
throw new Error('Audio generation is not yet supported by Anthropic.');
|
throw new Error('Audio generation is not yet supported by Anthropic.');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
|
||||||
|
const base64Image = optionsArg.image.toString('base64');
|
||||||
|
|
||||||
|
const content: ContentBlock[] = [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: optionsArg.prompt
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: 'image',
|
||||||
|
source: {
|
||||||
|
type: 'base64',
|
||||||
|
media_type: 'image/jpeg',
|
||||||
|
data: base64Image
|
||||||
|
}
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
const result = await this.anthropicApiClient.messages.create({
|
||||||
|
model: 'claude-sonnet-4-5-20250929',
|
||||||
|
messages: [{
|
||||||
|
role: 'user',
|
||||||
|
content
|
||||||
|
}],
|
||||||
|
max_tokens: 1024
|
||||||
|
});
|
||||||
|
|
||||||
|
// Extract text content from the response
|
||||||
|
let message = '';
|
||||||
|
for (const block of result.content) {
|
||||||
|
if ('text' in block) {
|
||||||
|
message += block.text;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return message;
|
||||||
|
}
|
||||||
|
|
||||||
|
public async document(optionsArg: {
|
||||||
|
systemMessage: string;
|
||||||
|
userMessage: string;
|
||||||
|
pdfDocuments: Uint8Array[];
|
||||||
|
messageHistory: ChatMessage[];
|
||||||
|
}): Promise<{ message: any }> {
|
||||||
|
// Convert PDF documents to images using SmartPDF
|
||||||
|
let documentImageBytesArray: Uint8Array[] = [];
|
||||||
|
|
||||||
|
for (const pdfDocument of optionsArg.pdfDocuments) {
|
||||||
|
const documentImageArray = await this.smartpdfInstance.convertPDFToPngBytes(pdfDocument);
|
||||||
|
documentImageBytesArray = documentImageBytesArray.concat(documentImageArray);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert message history to Anthropic format
|
||||||
|
const messages = optionsArg.messageHistory.map(msg => ({
|
||||||
|
role: msg.role === 'assistant' ? 'assistant' as const : 'user' as const,
|
||||||
|
content: msg.content
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Create content array with text and images
|
||||||
|
const content: ContentBlock[] = [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: optionsArg.userMessage
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
// Add each document page as an image
|
||||||
|
for (const imageBytes of documentImageBytesArray) {
|
||||||
|
content.push({
|
||||||
|
type: 'image',
|
||||||
|
source: {
|
||||||
|
type: 'base64',
|
||||||
|
media_type: 'image/jpeg',
|
||||||
|
data: Buffer.from(imageBytes).toString('base64')
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await this.anthropicApiClient.messages.create({
|
||||||
|
model: 'claude-sonnet-4-5-20250929',
|
||||||
|
system: optionsArg.systemMessage,
|
||||||
|
messages: [
|
||||||
|
...messages,
|
||||||
|
{ role: 'user', content }
|
||||||
|
],
|
||||||
|
max_tokens: 4096
|
||||||
|
});
|
||||||
|
|
||||||
|
// Extract text content from the response
|
||||||
|
let message = '';
|
||||||
|
for (const block of result.content) {
|
||||||
|
if ('text' in block) {
|
||||||
|
message += block.text;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
message: {
|
||||||
|
role: 'assistant',
|
||||||
|
content: message
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
public async research(optionsArg: ResearchOptions): Promise<ResearchResponse> {
|
||||||
|
// Prepare the messages for the research request
|
||||||
|
const systemMessage = `You are a research assistant with web search capabilities.
|
||||||
|
Provide comprehensive, well-researched answers with citations and sources.
|
||||||
|
When searching the web, be thorough and cite your sources accurately.`;
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Build the tool configuration for web search
|
||||||
|
const tools: any[] = [];
|
||||||
|
|
||||||
|
if (this.options.enableWebSearch) {
|
||||||
|
const webSearchTool: any = {
|
||||||
|
type: 'web_search_20250305',
|
||||||
|
name: 'web_search'
|
||||||
|
};
|
||||||
|
|
||||||
|
// Add optional parameters
|
||||||
|
if (optionsArg.maxSources) {
|
||||||
|
webSearchTool.max_uses = optionsArg.maxSources;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.options.searchDomainAllowList?.length) {
|
||||||
|
webSearchTool.allowed_domains = this.options.searchDomainAllowList;
|
||||||
|
} else if (this.options.searchDomainBlockList?.length) {
|
||||||
|
webSearchTool.blocked_domains = this.options.searchDomainBlockList;
|
||||||
|
}
|
||||||
|
|
||||||
|
tools.push(webSearchTool);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Configure the request based on search depth
|
||||||
|
const maxTokens = optionsArg.searchDepth === 'deep' ? 8192 :
|
||||||
|
optionsArg.searchDepth === 'advanced' ? 6144 : 4096;
|
||||||
|
|
||||||
|
// Create the research request
|
||||||
|
const requestParams: any = {
|
||||||
|
model: 'claude-sonnet-4-5-20250929',
|
||||||
|
system: systemMessage,
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: 'user' as const,
|
||||||
|
content: optionsArg.query
|
||||||
|
}
|
||||||
|
],
|
||||||
|
max_tokens: maxTokens,
|
||||||
|
temperature: 0.7
|
||||||
|
};
|
||||||
|
|
||||||
|
// Add tools if web search is enabled
|
||||||
|
if (tools.length > 0) {
|
||||||
|
requestParams.tools = tools;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute the research request
|
||||||
|
const result = await this.anthropicApiClient.messages.create(requestParams);
|
||||||
|
|
||||||
|
// Extract the answer from content blocks
|
||||||
|
let answer = '';
|
||||||
|
const sources: Array<{ url: string; title: string; snippet: string }> = [];
|
||||||
|
const searchQueries: string[] = [];
|
||||||
|
|
||||||
|
// Process content blocks
|
||||||
|
for (const block of result.content) {
|
||||||
|
if ('text' in block) {
|
||||||
|
// Accumulate text content
|
||||||
|
answer += block.text;
|
||||||
|
|
||||||
|
// Extract citations if present
|
||||||
|
if ('citations' in block && Array.isArray(block.citations)) {
|
||||||
|
for (const citation of block.citations) {
|
||||||
|
if (citation.type === 'web_search_result_location') {
|
||||||
|
sources.push({
|
||||||
|
title: citation.title || '',
|
||||||
|
url: citation.url || '',
|
||||||
|
snippet: citation.cited_text || ''
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if ('type' in block && block.type === 'server_tool_use') {
|
||||||
|
// Extract search queries from server tool use
|
||||||
|
if (block.name === 'web_search' && block.input && typeof block.input === 'object' && 'query' in block.input) {
|
||||||
|
searchQueries.push((block.input as any).query);
|
||||||
|
}
|
||||||
|
} else if ('type' in block && block.type === 'web_search_tool_result') {
|
||||||
|
// Extract sources from web search results
|
||||||
|
if (Array.isArray(block.content)) {
|
||||||
|
for (const result of block.content) {
|
||||||
|
if (result.type === 'web_search_result') {
|
||||||
|
// Only add if not already in sources (avoid duplicates from citations)
|
||||||
|
if (!sources.some(s => s.url === result.url)) {
|
||||||
|
sources.push({
|
||||||
|
title: result.title || '',
|
||||||
|
url: result.url || '',
|
||||||
|
snippet: '' // Search results don't include snippets, only citations do
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback: Parse markdown-style links if no citations found
|
||||||
|
if (sources.length === 0) {
|
||||||
|
const urlRegex = /\[([^\]]+)\]\(([^)]+)\)/g;
|
||||||
|
let match: RegExpExecArray | null;
|
||||||
|
|
||||||
|
while ((match = urlRegex.exec(answer)) !== null) {
|
||||||
|
sources.push({
|
||||||
|
title: match[1],
|
||||||
|
url: match[2],
|
||||||
|
snippet: ''
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if web search was used based on usage info
|
||||||
|
const webSearchCount = result.usage?.server_tool_use?.web_search_requests || 0;
|
||||||
|
|
||||||
|
return {
|
||||||
|
answer,
|
||||||
|
sources,
|
||||||
|
searchQueries: searchQueries.length > 0 ? searchQueries : undefined,
|
||||||
|
metadata: {
|
||||||
|
model: 'claude-sonnet-4-5-20250929',
|
||||||
|
searchDepth: optionsArg.searchDepth || 'basic',
|
||||||
|
tokensUsed: result.usage?.output_tokens,
|
||||||
|
webSearchesPerformed: webSearchCount
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Anthropic research error:', error);
|
||||||
|
throw new Error(`Failed to perform research: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Image generation is not supported by Anthropic
|
||||||
|
*/
|
||||||
|
public async imageGenerate(optionsArg: ImageGenerateOptions): Promise<ImageResponse> {
|
||||||
|
throw new Error('Image generation is not supported by Anthropic. Claude can only analyze images, not generate them. Please use OpenAI provider for image generation.');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Image editing is not supported by Anthropic
|
||||||
|
*/
|
||||||
|
public async imageEdit(optionsArg: ImageEditOptions): Promise<ImageResponse> {
|
||||||
|
throw new Error('Image editing is not supported by Anthropic. Claude can only analyze images, not edit them. Please use OpenAI provider for image editing.');
|
||||||
|
}
|
||||||
}
|
}
|
155
ts/provider.exo.ts
Normal file
155
ts/provider.exo.ts
Normal file
@@ -0,0 +1,155 @@
|
|||||||
|
import * as plugins from './plugins.js';
|
||||||
|
import * as paths from './paths.js';
|
||||||
|
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
||||||
|
import type {
|
||||||
|
ChatOptions,
|
||||||
|
ChatResponse,
|
||||||
|
ChatMessage,
|
||||||
|
ResearchOptions,
|
||||||
|
ResearchResponse,
|
||||||
|
ImageGenerateOptions,
|
||||||
|
ImageEditOptions,
|
||||||
|
ImageResponse
|
||||||
|
} from './abstract.classes.multimodal.js';
|
||||||
|
import type { ChatCompletionMessageParam } from 'openai/resources/chat/completions';
|
||||||
|
|
||||||
|
export interface IExoProviderOptions {
|
||||||
|
exoBaseUrl?: string;
|
||||||
|
apiKey?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class ExoProvider extends MultiModalModel {
|
||||||
|
private options: IExoProviderOptions;
|
||||||
|
public openAiApiClient: plugins.openai.default;
|
||||||
|
|
||||||
|
constructor(optionsArg: IExoProviderOptions = {}) {
|
||||||
|
super();
|
||||||
|
this.options = {
|
||||||
|
exoBaseUrl: 'http://localhost:8080/v1', // Default Exo API endpoint
|
||||||
|
...optionsArg
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
public async start() {
|
||||||
|
this.openAiApiClient = new plugins.openai.default({
|
||||||
|
apiKey: this.options.apiKey || 'not-needed', // Exo might not require an API key for local deployment
|
||||||
|
baseURL: this.options.exoBaseUrl,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public async stop() {}
|
||||||
|
|
||||||
|
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||||
|
// Create a TextDecoder to handle incoming chunks
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
let buffer = '';
|
||||||
|
let currentMessage: { role: string; content: string; } | null = null;
|
||||||
|
|
||||||
|
// Create a TransformStream to process the input
|
||||||
|
const transform = new TransformStream<Uint8Array, string>({
|
||||||
|
transform: async (chunk, controller) => {
|
||||||
|
buffer += decoder.decode(chunk, { stream: true });
|
||||||
|
|
||||||
|
// Try to parse complete JSON messages from the buffer
|
||||||
|
while (true) {
|
||||||
|
const newlineIndex = buffer.indexOf('\n');
|
||||||
|
if (newlineIndex === -1) break;
|
||||||
|
|
||||||
|
const line = buffer.slice(0, newlineIndex);
|
||||||
|
buffer = buffer.slice(newlineIndex + 1);
|
||||||
|
|
||||||
|
if (line.trim()) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(line);
|
||||||
|
currentMessage = message;
|
||||||
|
|
||||||
|
// Process the message based on its type
|
||||||
|
if (message.type === 'message') {
|
||||||
|
const response = await this.chat({
|
||||||
|
systemMessage: '',
|
||||||
|
userMessage: message.content,
|
||||||
|
messageHistory: [{ role: message.role as 'user' | 'assistant' | 'system', content: message.content }]
|
||||||
|
});
|
||||||
|
|
||||||
|
controller.enqueue(JSON.stringify(response) + '\n');
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error processing message:', error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
flush(controller) {
|
||||||
|
if (buffer) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(buffer);
|
||||||
|
currentMessage = message;
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error processing remaining buffer:', error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return input.pipeThrough(transform);
|
||||||
|
}
|
||||||
|
|
||||||
|
public async chat(options: ChatOptions): Promise<ChatResponse> {
|
||||||
|
const messages: ChatCompletionMessageParam[] = [
|
||||||
|
{ role: 'system', content: options.systemMessage },
|
||||||
|
...options.messageHistory,
|
||||||
|
{ role: 'user', content: options.userMessage }
|
||||||
|
];
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await this.openAiApiClient.chat.completions.create({
|
||||||
|
model: 'local-model', // Exo uses local models
|
||||||
|
messages: messages,
|
||||||
|
stream: false
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
role: 'assistant',
|
||||||
|
message: response.choices[0]?.message?.content || ''
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error in chat completion:', error);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
||||||
|
throw new Error('Audio generation is not supported by Exo provider');
|
||||||
|
}
|
||||||
|
|
||||||
|
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
|
||||||
|
throw new Error('Vision processing is not supported by Exo provider');
|
||||||
|
}
|
||||||
|
|
||||||
|
public async document(optionsArg: {
|
||||||
|
systemMessage: string;
|
||||||
|
userMessage: string;
|
||||||
|
pdfDocuments: Uint8Array[];
|
||||||
|
messageHistory: ChatMessage[];
|
||||||
|
}): Promise<{ message: any }> {
|
||||||
|
throw new Error('Document processing is not supported by Exo provider');
|
||||||
|
}
|
||||||
|
|
||||||
|
public async research(optionsArg: ResearchOptions): Promise<ResearchResponse> {
|
||||||
|
throw new Error('Research capabilities are not yet supported by Exo provider.');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Image generation is not supported by Exo
|
||||||
|
*/
|
||||||
|
public async imageGenerate(optionsArg: ImageGenerateOptions): Promise<ImageResponse> {
|
||||||
|
throw new Error('Image generation is not supported by Exo. Please use OpenAI provider for image generation.');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Image editing is not supported by Exo
|
||||||
|
*/
|
||||||
|
public async imageEdit(optionsArg: ImageEditOptions): Promise<ImageResponse> {
|
||||||
|
throw new Error('Image editing is not supported by Exo. Please use OpenAI provider for image editing.');
|
||||||
|
}
|
||||||
|
}
|
219
ts/provider.groq.ts
Normal file
219
ts/provider.groq.ts
Normal file
@@ -0,0 +1,219 @@
|
|||||||
|
import * as plugins from './plugins.js';
|
||||||
|
import * as paths from './paths.js';
|
||||||
|
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
||||||
|
import type {
|
||||||
|
ChatOptions,
|
||||||
|
ChatResponse,
|
||||||
|
ChatMessage,
|
||||||
|
ResearchOptions,
|
||||||
|
ResearchResponse,
|
||||||
|
ImageGenerateOptions,
|
||||||
|
ImageEditOptions,
|
||||||
|
ImageResponse
|
||||||
|
} from './abstract.classes.multimodal.js';
|
||||||
|
|
||||||
|
export interface IGroqProviderOptions {
|
||||||
|
groqToken: string;
|
||||||
|
model?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class GroqProvider extends MultiModalModel {
|
||||||
|
private options: IGroqProviderOptions;
|
||||||
|
private baseUrl = 'https://api.groq.com/v1';
|
||||||
|
|
||||||
|
constructor(optionsArg: IGroqProviderOptions) {
|
||||||
|
super();
|
||||||
|
this.options = {
|
||||||
|
...optionsArg,
|
||||||
|
model: optionsArg.model || 'llama-3.3-70b-versatile', // Default model
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
async start() {}
|
||||||
|
|
||||||
|
async stop() {}
|
||||||
|
|
||||||
|
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||||
|
// Create a TextDecoder to handle incoming chunks
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
let buffer = '';
|
||||||
|
let currentMessage: { role: string; content: string; } | null = null;
|
||||||
|
|
||||||
|
// Create a TransformStream to process the input
|
||||||
|
const transform = new TransformStream<Uint8Array, string>({
|
||||||
|
transform: async (chunk, controller) => {
|
||||||
|
buffer += decoder.decode(chunk, { stream: true });
|
||||||
|
|
||||||
|
// Try to parse complete JSON messages from the buffer
|
||||||
|
while (true) {
|
||||||
|
const newlineIndex = buffer.indexOf('\n');
|
||||||
|
if (newlineIndex === -1) break;
|
||||||
|
|
||||||
|
const line = buffer.slice(0, newlineIndex);
|
||||||
|
buffer = buffer.slice(newlineIndex + 1);
|
||||||
|
|
||||||
|
if (line.trim()) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(line);
|
||||||
|
currentMessage = {
|
||||||
|
role: message.role || 'user',
|
||||||
|
content: message.content || '',
|
||||||
|
};
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse message:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have a complete message, send it to Groq
|
||||||
|
if (currentMessage) {
|
||||||
|
const response = await fetch(`${this.baseUrl}/chat/completions`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Authorization': `Bearer ${this.options.groqToken}`,
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: this.options.model,
|
||||||
|
messages: [{ role: currentMessage.role, content: currentMessage.content }],
|
||||||
|
stream: true,
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Process each chunk from Groq
|
||||||
|
const reader = response.body?.getReader();
|
||||||
|
if (reader) {
|
||||||
|
try {
|
||||||
|
while (true) {
|
||||||
|
const { done, value } = await reader.read();
|
||||||
|
if (done) break;
|
||||||
|
|
||||||
|
const chunk = new TextDecoder().decode(value);
|
||||||
|
const lines = chunk.split('\n');
|
||||||
|
|
||||||
|
for (const line of lines) {
|
||||||
|
if (line.startsWith('data: ')) {
|
||||||
|
const data = line.slice(6);
|
||||||
|
if (data === '[DONE]') break;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(data);
|
||||||
|
const content = parsed.choices[0]?.delta?.content;
|
||||||
|
if (content) {
|
||||||
|
controller.enqueue(content);
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse SSE data:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
reader.releaseLock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
currentMessage = null;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
flush(controller) {
|
||||||
|
if (buffer) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(buffer);
|
||||||
|
controller.enqueue(message.content || '');
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse remaining buffer:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Connect the input to our transform stream
|
||||||
|
return input.pipeThrough(transform);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implementing the synchronous chat interaction
|
||||||
|
public async chat(optionsArg: ChatOptions): Promise<ChatResponse> {
|
||||||
|
const messages = [
|
||||||
|
// System message
|
||||||
|
{
|
||||||
|
role: 'system',
|
||||||
|
content: optionsArg.systemMessage,
|
||||||
|
},
|
||||||
|
// Message history
|
||||||
|
...optionsArg.messageHistory.map(msg => ({
|
||||||
|
role: msg.role,
|
||||||
|
content: msg.content,
|
||||||
|
})),
|
||||||
|
// User message
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
content: optionsArg.userMessage,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const response = await fetch(`${this.baseUrl}/chat/completions`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Authorization': `Bearer ${this.options.groqToken}`,
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: this.options.model,
|
||||||
|
messages,
|
||||||
|
temperature: 0.7,
|
||||||
|
max_completion_tokens: 1024,
|
||||||
|
stream: false,
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
const error = await response.json();
|
||||||
|
throw new Error(`Groq API error: ${error.message || response.statusText}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await response.json();
|
||||||
|
|
||||||
|
return {
|
||||||
|
role: 'assistant',
|
||||||
|
message: result.choices[0].message.content,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
||||||
|
// Groq does not provide an audio API, so this method is not implemented.
|
||||||
|
throw new Error('Audio generation is not yet supported by Groq.');
|
||||||
|
}
|
||||||
|
|
||||||
|
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
|
||||||
|
throw new Error('Vision tasks are not yet supported by Groq.');
|
||||||
|
}
|
||||||
|
|
||||||
|
public async document(optionsArg: {
|
||||||
|
systemMessage: string;
|
||||||
|
userMessage: string;
|
||||||
|
pdfDocuments: Uint8Array[];
|
||||||
|
messageHistory: ChatMessage[];
|
||||||
|
}): Promise<{ message: any }> {
|
||||||
|
throw new Error('Document processing is not yet supported by Groq.');
|
||||||
|
}
|
||||||
|
|
||||||
|
public async research(optionsArg: ResearchOptions): Promise<ResearchResponse> {
|
||||||
|
throw new Error('Research capabilities are not yet supported by Groq provider.');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Image generation is not supported by Groq
|
||||||
|
*/
|
||||||
|
public async imageGenerate(optionsArg: ImageGenerateOptions): Promise<ImageResponse> {
|
||||||
|
throw new Error('Image generation is not supported by Groq. Please use OpenAI provider for image generation.');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Image editing is not supported by Groq
|
||||||
|
*/
|
||||||
|
public async imageEdit(optionsArg: ImageEditOptions): Promise<ImageResponse> {
|
||||||
|
throw new Error('Image editing is not supported by Groq. Please use OpenAI provider for image editing.');
|
||||||
|
}
|
||||||
|
}
|
@@ -1,3 +1,281 @@
|
|||||||
import * as plugins from './plugins.js';
|
import * as plugins from './plugins.js';
|
||||||
|
import * as paths from './paths.js';
|
||||||
|
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
||||||
|
import type {
|
||||||
|
ChatOptions,
|
||||||
|
ChatResponse,
|
||||||
|
ChatMessage,
|
||||||
|
ResearchOptions,
|
||||||
|
ResearchResponse,
|
||||||
|
ImageGenerateOptions,
|
||||||
|
ImageEditOptions,
|
||||||
|
ImageResponse
|
||||||
|
} from './abstract.classes.multimodal.js';
|
||||||
|
|
||||||
export class OllamaProvider {}
|
export interface IOllamaProviderOptions {
|
||||||
|
baseUrl?: string;
|
||||||
|
model?: string;
|
||||||
|
visionModel?: string; // Model to use for vision tasks (e.g. 'llava')
|
||||||
|
}
|
||||||
|
|
||||||
|
export class OllamaProvider extends MultiModalModel {
|
||||||
|
private options: IOllamaProviderOptions;
|
||||||
|
private baseUrl: string;
|
||||||
|
private model: string;
|
||||||
|
private visionModel: string;
|
||||||
|
|
||||||
|
constructor(optionsArg: IOllamaProviderOptions = {}) {
|
||||||
|
super();
|
||||||
|
this.options = optionsArg;
|
||||||
|
this.baseUrl = optionsArg.baseUrl || 'http://localhost:11434';
|
||||||
|
this.model = optionsArg.model || 'llama2';
|
||||||
|
this.visionModel = optionsArg.visionModel || 'llava';
|
||||||
|
}
|
||||||
|
|
||||||
|
async start() {
|
||||||
|
await super.start();
|
||||||
|
// Verify Ollama is running
|
||||||
|
try {
|
||||||
|
const response = await fetch(`${this.baseUrl}/api/tags`);
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error('Failed to connect to Ollama server');
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
throw new Error(`Failed to connect to Ollama server at ${this.baseUrl}: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async stop() {
|
||||||
|
await super.stop();
|
||||||
|
}
|
||||||
|
|
||||||
|
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||||
|
// Create a TextDecoder to handle incoming chunks
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
let buffer = '';
|
||||||
|
let currentMessage: { role: string; content: string; } | null = null;
|
||||||
|
|
||||||
|
// Create a TransformStream to process the input
|
||||||
|
const transform = new TransformStream<Uint8Array, string>({
|
||||||
|
transform: async (chunk, controller) => {
|
||||||
|
buffer += decoder.decode(chunk, { stream: true });
|
||||||
|
|
||||||
|
// Try to parse complete JSON messages from the buffer
|
||||||
|
while (true) {
|
||||||
|
const newlineIndex = buffer.indexOf('\n');
|
||||||
|
if (newlineIndex === -1) break;
|
||||||
|
|
||||||
|
const line = buffer.slice(0, newlineIndex);
|
||||||
|
buffer = buffer.slice(newlineIndex + 1);
|
||||||
|
|
||||||
|
if (line.trim()) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(line);
|
||||||
|
currentMessage = {
|
||||||
|
role: message.role || 'user',
|
||||||
|
content: message.content || '',
|
||||||
|
};
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse message:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have a complete message, send it to Ollama
|
||||||
|
if (currentMessage) {
|
||||||
|
const response = await fetch(`${this.baseUrl}/api/chat`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: this.model,
|
||||||
|
messages: [{ role: currentMessage.role, content: currentMessage.content }],
|
||||||
|
stream: true,
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Process each chunk from Ollama
|
||||||
|
const reader = response.body?.getReader();
|
||||||
|
if (reader) {
|
||||||
|
try {
|
||||||
|
while (true) {
|
||||||
|
const { done, value } = await reader.read();
|
||||||
|
if (done) break;
|
||||||
|
|
||||||
|
const chunk = new TextDecoder().decode(value);
|
||||||
|
const lines = chunk.split('\n');
|
||||||
|
|
||||||
|
for (const line of lines) {
|
||||||
|
if (line.trim()) {
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(line);
|
||||||
|
const content = parsed.message?.content;
|
||||||
|
if (content) {
|
||||||
|
controller.enqueue(content);
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse Ollama response:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
reader.releaseLock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
currentMessage = null;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
flush(controller) {
|
||||||
|
if (buffer) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(buffer);
|
||||||
|
controller.enqueue(message.content || '');
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse remaining buffer:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Connect the input to our transform stream
|
||||||
|
return input.pipeThrough(transform);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implementing the synchronous chat interaction
|
||||||
|
public async chat(optionsArg: ChatOptions): Promise<ChatResponse> {
|
||||||
|
// Format messages for Ollama
|
||||||
|
const messages = [
|
||||||
|
{ role: 'system', content: optionsArg.systemMessage },
|
||||||
|
...optionsArg.messageHistory,
|
||||||
|
{ role: 'user', content: optionsArg.userMessage }
|
||||||
|
];
|
||||||
|
|
||||||
|
// Make API call to Ollama
|
||||||
|
const response = await fetch(`${this.baseUrl}/api/chat`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: this.model,
|
||||||
|
messages: messages,
|
||||||
|
stream: false
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error(`Ollama API error: ${response.statusText}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await response.json();
|
||||||
|
|
||||||
|
return {
|
||||||
|
role: 'assistant' as const,
|
||||||
|
message: result.message.content,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
||||||
|
throw new Error('Audio generation is not supported by Ollama.');
|
||||||
|
}
|
||||||
|
|
||||||
|
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
|
||||||
|
const base64Image = optionsArg.image.toString('base64');
|
||||||
|
|
||||||
|
const response = await fetch(`${this.baseUrl}/api/chat`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: this.visionModel,
|
||||||
|
messages: [{
|
||||||
|
role: 'user',
|
||||||
|
content: optionsArg.prompt,
|
||||||
|
images: [base64Image]
|
||||||
|
}],
|
||||||
|
stream: false
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error(`Ollama API error: ${response.statusText}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await response.json();
|
||||||
|
return result.message.content;
|
||||||
|
}
|
||||||
|
|
||||||
|
public async document(optionsArg: {
|
||||||
|
systemMessage: string;
|
||||||
|
userMessage: string;
|
||||||
|
pdfDocuments: Uint8Array[];
|
||||||
|
messageHistory: ChatMessage[];
|
||||||
|
}): Promise<{ message: any }> {
|
||||||
|
// Convert PDF documents to images using SmartPDF
|
||||||
|
let documentImageBytesArray: Uint8Array[] = [];
|
||||||
|
|
||||||
|
for (const pdfDocument of optionsArg.pdfDocuments) {
|
||||||
|
const documentImageArray = await this.smartpdfInstance.convertPDFToPngBytes(pdfDocument);
|
||||||
|
documentImageBytesArray = documentImageBytesArray.concat(documentImageArray);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert images to base64
|
||||||
|
const base64Images = documentImageBytesArray.map(bytes => Buffer.from(bytes).toString('base64'));
|
||||||
|
|
||||||
|
// Send request to Ollama with images
|
||||||
|
const response = await fetch(`${this.baseUrl}/api/chat`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: this.visionModel,
|
||||||
|
messages: [
|
||||||
|
{ role: 'system', content: optionsArg.systemMessage },
|
||||||
|
...optionsArg.messageHistory,
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
content: optionsArg.userMessage,
|
||||||
|
images: base64Images
|
||||||
|
}
|
||||||
|
],
|
||||||
|
stream: false
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error(`Ollama API error: ${response.statusText}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await response.json();
|
||||||
|
return {
|
||||||
|
message: {
|
||||||
|
role: 'assistant',
|
||||||
|
content: result.message.content
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
public async research(optionsArg: ResearchOptions): Promise<ResearchResponse> {
|
||||||
|
throw new Error('Research capabilities are not yet supported by Ollama provider.');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Image generation is not supported by Ollama
|
||||||
|
*/
|
||||||
|
public async imageGenerate(optionsArg: ImageGenerateOptions): Promise<ImageResponse> {
|
||||||
|
throw new Error('Image generation is not supported by Ollama. Please use OpenAI provider for image generation.');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Image editing is not supported by Ollama
|
||||||
|
*/
|
||||||
|
public async imageEdit(optionsArg: ImageEditOptions): Promise<ImageResponse> {
|
||||||
|
throw new Error('Image editing is not supported by Ollama. Please use OpenAI provider for image editing.');
|
||||||
|
}
|
||||||
|
}
|
@@ -1,16 +1,35 @@
|
|||||||
import * as plugins from './plugins.js';
|
import * as plugins from './plugins.js';
|
||||||
import * as paths from './paths.js';
|
import * as paths from './paths.js';
|
||||||
|
import { Readable } from 'stream';
|
||||||
|
|
||||||
|
// Custom type definition for chat completion messages
|
||||||
|
export type TChatCompletionRequestMessage = {
|
||||||
|
role: "system" | "user" | "assistant";
|
||||||
|
content: string;
|
||||||
|
};
|
||||||
|
|
||||||
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
||||||
|
import type {
|
||||||
|
ResearchOptions,
|
||||||
|
ResearchResponse,
|
||||||
|
ImageGenerateOptions,
|
||||||
|
ImageEditOptions,
|
||||||
|
ImageResponse
|
||||||
|
} from './abstract.classes.multimodal.js';
|
||||||
|
|
||||||
export interface IOpenaiProviderOptions {
|
export interface IOpenaiProviderOptions {
|
||||||
openaiToken: string;
|
openaiToken: string;
|
||||||
|
chatModel?: string;
|
||||||
|
audioModel?: string;
|
||||||
|
visionModel?: string;
|
||||||
|
researchModel?: string;
|
||||||
|
imageModel?: string;
|
||||||
|
enableWebSearch?: boolean;
|
||||||
}
|
}
|
||||||
|
|
||||||
export class OpenAiProvider extends MultiModalModel {
|
export class OpenAiProvider extends MultiModalModel {
|
||||||
private options: IOpenaiProviderOptions;
|
private options: IOpenaiProviderOptions;
|
||||||
public openAiApiClient: plugins.openai.default;
|
public openAiApiClient: plugins.openai.default;
|
||||||
public smartpdfInstance: plugins.smartpdf.SmartPdf;
|
|
||||||
|
|
||||||
constructor(optionsArg: IOpenaiProviderOptions) {
|
constructor(optionsArg: IOpenaiProviderOptions) {
|
||||||
super();
|
super();
|
||||||
@@ -18,20 +37,90 @@ export class OpenAiProvider extends MultiModalModel {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public async start() {
|
public async start() {
|
||||||
|
await super.start();
|
||||||
this.openAiApiClient = new plugins.openai.default({
|
this.openAiApiClient = new plugins.openai.default({
|
||||||
apiKey: this.options.openaiToken,
|
apiKey: this.options.openaiToken,
|
||||||
dangerouslyAllowBrowser: true,
|
dangerouslyAllowBrowser: true,
|
||||||
});
|
});
|
||||||
this.smartpdfInstance = new plugins.smartpdf.SmartPdf();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public async stop() {}
|
public async stop() {
|
||||||
|
await super.stop();
|
||||||
|
}
|
||||||
|
|
||||||
public async chatStream(input: ReadableStream<string>): Promise<ReadableStream<string>> {
|
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||||
// TODO: implement for OpenAI
|
// Create a TextDecoder to handle incoming chunks
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
let buffer = '';
|
||||||
|
let currentMessage: {
|
||||||
|
role: "function" | "user" | "system" | "assistant" | "tool" | "developer";
|
||||||
|
content: string;
|
||||||
|
} | null = null;
|
||||||
|
|
||||||
const returnStream = new ReadableStream();
|
// Create a TransformStream to process the input
|
||||||
return returnStream;
|
const transform = new TransformStream<Uint8Array, string>({
|
||||||
|
transform: async (chunk, controller) => {
|
||||||
|
buffer += decoder.decode(chunk, { stream: true });
|
||||||
|
|
||||||
|
// Try to parse complete JSON messages from the buffer
|
||||||
|
while (true) {
|
||||||
|
const newlineIndex = buffer.indexOf('\n');
|
||||||
|
if (newlineIndex === -1) break;
|
||||||
|
|
||||||
|
const line = buffer.slice(0, newlineIndex);
|
||||||
|
buffer = buffer.slice(newlineIndex + 1);
|
||||||
|
|
||||||
|
if (line.trim()) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(line);
|
||||||
|
currentMessage = {
|
||||||
|
role: (message.role || 'user') as "function" | "user" | "system" | "assistant" | "tool" | "developer",
|
||||||
|
content: message.content || '',
|
||||||
|
};
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse message:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have a complete message, send it to OpenAI
|
||||||
|
if (currentMessage) {
|
||||||
|
const messageToSend = { role: "user" as const, content: currentMessage.content };
|
||||||
|
const chatModel = this.options.chatModel ?? 'gpt-5-mini';
|
||||||
|
const requestParams: any = {
|
||||||
|
model: chatModel,
|
||||||
|
messages: [messageToSend],
|
||||||
|
stream: true,
|
||||||
|
};
|
||||||
|
// Temperature is omitted since the model does not support it.
|
||||||
|
const stream = await this.openAiApiClient.chat.completions.create(requestParams);
|
||||||
|
// Explicitly cast the stream as an async iterable to satisfy TypeScript.
|
||||||
|
const streamAsyncIterable = stream as unknown as AsyncIterableIterator<any>;
|
||||||
|
// Process each chunk from OpenAI
|
||||||
|
for await (const chunk of streamAsyncIterable) {
|
||||||
|
const content = chunk.choices[0]?.delta?.content;
|
||||||
|
if (content) {
|
||||||
|
controller.enqueue(content);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
currentMessage = null;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
flush(controller) {
|
||||||
|
if (buffer) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(buffer);
|
||||||
|
controller.enqueue(message.content || '');
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse remaining buffer:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Connect the input to our transform stream
|
||||||
|
return input.pipeThrough(transform);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Implementing the synchronous chat interaction
|
// Implementing the synchronous chat interaction
|
||||||
@@ -43,15 +132,17 @@ export class OpenAiProvider extends MultiModalModel {
|
|||||||
content: string;
|
content: string;
|
||||||
}[];
|
}[];
|
||||||
}) {
|
}) {
|
||||||
const result = await this.openAiApiClient.chat.completions.create({
|
const chatModel = this.options.chatModel ?? 'gpt-5-mini';
|
||||||
model: 'gpt-4-turbo-preview',
|
const requestParams: any = {
|
||||||
|
model: chatModel,
|
||||||
messages: [
|
messages: [
|
||||||
{ role: 'system', content: optionsArg.systemMessage },
|
{ role: 'system', content: optionsArg.systemMessage },
|
||||||
...optionsArg.messageHistory,
|
...optionsArg.messageHistory,
|
||||||
{ role: 'user', content: optionsArg.userMessage },
|
{ role: 'user', content: optionsArg.userMessage },
|
||||||
],
|
],
|
||||||
});
|
};
|
||||||
|
// Temperature parameter removed to avoid unsupported error.
|
||||||
|
const result = await this.openAiApiClient.chat.completions.create(requestParams);
|
||||||
return {
|
return {
|
||||||
role: result.choices[0].message.role as 'assistant',
|
role: result.choices[0].message.role as 'assistant',
|
||||||
message: result.choices[0].message.content,
|
message: result.choices[0].message.content,
|
||||||
@@ -61,14 +152,15 @@ export class OpenAiProvider extends MultiModalModel {
|
|||||||
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
||||||
const done = plugins.smartpromise.defer<NodeJS.ReadableStream>();
|
const done = plugins.smartpromise.defer<NodeJS.ReadableStream>();
|
||||||
const result = await this.openAiApiClient.audio.speech.create({
|
const result = await this.openAiApiClient.audio.speech.create({
|
||||||
model: 'tts-1-hd',
|
model: this.options.audioModel ?? 'tts-1-hd',
|
||||||
input: optionsArg.message,
|
input: optionsArg.message,
|
||||||
voice: 'nova',
|
voice: 'nova',
|
||||||
response_format: 'mp3',
|
response_format: 'mp3',
|
||||||
speed: 1,
|
speed: 1,
|
||||||
});
|
});
|
||||||
const stream = result.body;
|
const stream = result.body;
|
||||||
done.resolve(stream);
|
const nodeStream = Readable.fromWeb(stream as any);
|
||||||
|
done.resolve(nodeStream);
|
||||||
return done.promise;
|
return done.promise;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -83,6 +175,7 @@ export class OpenAiProvider extends MultiModalModel {
|
|||||||
}) {
|
}) {
|
||||||
let pdfDocumentImageBytesArray: Uint8Array[] = [];
|
let pdfDocumentImageBytesArray: Uint8Array[] = [];
|
||||||
|
|
||||||
|
// Convert each PDF into one or more image byte arrays.
|
||||||
for (const pdfDocument of optionsArg.pdfDocuments) {
|
for (const pdfDocument of optionsArg.pdfDocuments) {
|
||||||
const documentImageArray = await this.smartpdfInstance.convertPDFToPngBytes(pdfDocument);
|
const documentImageArray = await this.smartpdfInstance.convertPDFToPngBytes(pdfDocument);
|
||||||
pdfDocumentImageBytesArray = pdfDocumentImageBytesArray.concat(documentImageArray);
|
pdfDocumentImageBytesArray = pdfDocumentImageBytesArray.concat(documentImageArray);
|
||||||
@@ -91,19 +184,18 @@ export class OpenAiProvider extends MultiModalModel {
|
|||||||
console.log(`image smartfile array`);
|
console.log(`image smartfile array`);
|
||||||
console.log(pdfDocumentImageBytesArray.map((smartfile) => smartfile.length));
|
console.log(pdfDocumentImageBytesArray.map((smartfile) => smartfile.length));
|
||||||
|
|
||||||
const smartfileArray = await plugins.smartarray.map(
|
// Filter out any empty buffers to avoid sending invalid image URLs.
|
||||||
pdfDocumentImageBytesArray,
|
const validImageBytesArray = pdfDocumentImageBytesArray.filter(imageBytes => imageBytes && imageBytes.length > 0);
|
||||||
async (pdfDocumentImageBytes) => {
|
const imageAttachments = validImageBytesArray.map(imageBytes => ({
|
||||||
return plugins.smartfile.SmartFile.fromBuffer(
|
type: 'image_url',
|
||||||
'pdfDocumentImage.jpg',
|
image_url: {
|
||||||
Buffer.from(pdfDocumentImageBytes)
|
url: 'data:image/png;base64,' + Buffer.from(imageBytes).toString('base64'),
|
||||||
);
|
},
|
||||||
}
|
}));
|
||||||
);
|
|
||||||
|
|
||||||
const result = await this.openAiApiClient.chat.completions.create({
|
const chatModel = this.options.chatModel ?? 'gpt-5-mini';
|
||||||
model: 'gpt-4-vision-preview',
|
const requestParams: any = {
|
||||||
// response_format: { type: "json_object" }, // not supported for now
|
model: chatModel,
|
||||||
messages: [
|
messages: [
|
||||||
{ role: 'system', content: optionsArg.systemMessage },
|
{ role: 'system', content: optionsArg.systemMessage },
|
||||||
...optionsArg.messageHistory,
|
...optionsArg.messageHistory,
|
||||||
@@ -111,24 +203,253 @@ export class OpenAiProvider extends MultiModalModel {
|
|||||||
role: 'user',
|
role: 'user',
|
||||||
content: [
|
content: [
|
||||||
{ type: 'text', text: optionsArg.userMessage },
|
{ type: 'text', text: optionsArg.userMessage },
|
||||||
...(() => {
|
...imageAttachments,
|
||||||
const returnArray = [];
|
|
||||||
for (const imageBytes of pdfDocumentImageBytesArray) {
|
|
||||||
returnArray.push({
|
|
||||||
type: 'image_url',
|
|
||||||
image_url: {
|
|
||||||
url: 'data:image/png;base64,' + Buffer.from(imageBytes).toString('base64'),
|
|
||||||
},
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return returnArray;
|
|
||||||
})(),
|
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
});
|
};
|
||||||
|
// Temperature parameter removed.
|
||||||
|
const result = await this.openAiApiClient.chat.completions.create(requestParams);
|
||||||
return {
|
return {
|
||||||
message: result.choices[0].message,
|
message: result.choices[0].message,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
|
||||||
|
const visionModel = this.options.visionModel ?? '04-mini';
|
||||||
|
const requestParams: any = {
|
||||||
|
model: visionModel,
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
content: [
|
||||||
|
{ type: 'text', text: optionsArg.prompt },
|
||||||
|
{
|
||||||
|
type: 'image_url',
|
||||||
|
image_url: {
|
||||||
|
url: `data:image/jpeg;base64,${optionsArg.image.toString('base64')}`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
max_tokens: 300
|
||||||
|
};
|
||||||
|
const result = await this.openAiApiClient.chat.completions.create(requestParams);
|
||||||
|
return result.choices[0].message.content || '';
|
||||||
|
}
|
||||||
|
|
||||||
|
public async research(optionsArg: ResearchOptions): Promise<ResearchResponse> {
|
||||||
|
// Determine which model to use - Deep Research API requires specific models
|
||||||
|
let model: string;
|
||||||
|
if (optionsArg.searchDepth === 'deep') {
|
||||||
|
model = this.options.researchModel || 'o4-mini-deep-research-2025-06-26';
|
||||||
|
} else {
|
||||||
|
// For basic/advanced, still use deep research models if web search is needed
|
||||||
|
if (optionsArg.includeWebSearch) {
|
||||||
|
model = this.options.researchModel || 'o4-mini-deep-research-2025-06-26';
|
||||||
|
} else {
|
||||||
|
model = this.options.chatModel || 'gpt-5-mini';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const systemMessage = 'You are a research assistant. Provide comprehensive answers with citations and sources when available.';
|
||||||
|
|
||||||
|
// Prepare request parameters using Deep Research API format
|
||||||
|
const requestParams: any = {
|
||||||
|
model,
|
||||||
|
instructions: systemMessage,
|
||||||
|
input: optionsArg.query
|
||||||
|
};
|
||||||
|
|
||||||
|
// Add web search tool if requested
|
||||||
|
if (optionsArg.includeWebSearch || optionsArg.searchDepth === 'deep') {
|
||||||
|
requestParams.tools = [
|
||||||
|
{
|
||||||
|
type: 'web_search_preview',
|
||||||
|
search_context_size: optionsArg.searchDepth === 'deep' ? 'high' :
|
||||||
|
optionsArg.searchDepth === 'advanced' ? 'medium' : 'low'
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add background flag for deep research
|
||||||
|
if (optionsArg.background && optionsArg.searchDepth === 'deep') {
|
||||||
|
requestParams.background = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Execute the research request using Deep Research API
|
||||||
|
const result = await this.openAiApiClient.responses.create(requestParams);
|
||||||
|
|
||||||
|
// Extract the answer from output items
|
||||||
|
let answer = '';
|
||||||
|
const sources: Array<{ url: string; title: string; snippet: string }> = [];
|
||||||
|
const searchQueries: string[] = [];
|
||||||
|
|
||||||
|
// Process output items
|
||||||
|
for (const item of result.output || []) {
|
||||||
|
// Extract message content
|
||||||
|
if (item.type === 'message' && 'content' in item) {
|
||||||
|
const messageItem = item as any;
|
||||||
|
for (const contentItem of messageItem.content || []) {
|
||||||
|
if (contentItem.type === 'output_text' && 'text' in contentItem) {
|
||||||
|
answer += contentItem.text;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract web search queries
|
||||||
|
if (item.type === 'web_search_call' && 'action' in item) {
|
||||||
|
const searchItem = item as any;
|
||||||
|
if (searchItem.action && searchItem.action.type === 'search' && 'query' in searchItem.action) {
|
||||||
|
searchQueries.push(searchItem.action.query);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse sources from markdown links in the answer
|
||||||
|
const urlRegex = /\[([^\]]+)\]\(([^)]+)\)/g;
|
||||||
|
let match: RegExpExecArray | null;
|
||||||
|
|
||||||
|
while ((match = urlRegex.exec(answer)) !== null) {
|
||||||
|
sources.push({
|
||||||
|
title: match[1],
|
||||||
|
url: match[2],
|
||||||
|
snippet: ''
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
answer,
|
||||||
|
sources,
|
||||||
|
searchQueries: searchQueries.length > 0 ? searchQueries : undefined,
|
||||||
|
metadata: {
|
||||||
|
model,
|
||||||
|
searchDepth: optionsArg.searchDepth || 'basic',
|
||||||
|
tokensUsed: result.usage?.total_tokens
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Research API error:', error);
|
||||||
|
throw new Error(`Failed to perform research: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Image generation using OpenAI's gpt-image-1 or DALL-E models
|
||||||
|
*/
|
||||||
|
public async imageGenerate(optionsArg: ImageGenerateOptions): Promise<ImageResponse> {
|
||||||
|
const model = optionsArg.model || this.options.imageModel || 'gpt-image-1';
|
||||||
|
|
||||||
|
try {
|
||||||
|
const requestParams: any = {
|
||||||
|
model,
|
||||||
|
prompt: optionsArg.prompt,
|
||||||
|
n: optionsArg.n || 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Add gpt-image-1 specific parameters
|
||||||
|
if (model === 'gpt-image-1') {
|
||||||
|
if (optionsArg.quality) requestParams.quality = optionsArg.quality;
|
||||||
|
if (optionsArg.size) requestParams.size = optionsArg.size;
|
||||||
|
if (optionsArg.background) requestParams.background = optionsArg.background;
|
||||||
|
if (optionsArg.outputFormat) requestParams.output_format = optionsArg.outputFormat;
|
||||||
|
if (optionsArg.outputCompression !== undefined) requestParams.output_compression = optionsArg.outputCompression;
|
||||||
|
if (optionsArg.moderation) requestParams.moderation = optionsArg.moderation;
|
||||||
|
if (optionsArg.stream !== undefined) requestParams.stream = optionsArg.stream;
|
||||||
|
if (optionsArg.partialImages !== undefined) requestParams.partial_images = optionsArg.partialImages;
|
||||||
|
} else if (model === 'dall-e-3') {
|
||||||
|
// DALL-E 3 specific parameters
|
||||||
|
if (optionsArg.quality) requestParams.quality = optionsArg.quality;
|
||||||
|
if (optionsArg.size) requestParams.size = optionsArg.size;
|
||||||
|
if (optionsArg.style) requestParams.style = optionsArg.style;
|
||||||
|
requestParams.response_format = 'b64_json'; // Always use base64 for consistency
|
||||||
|
} else if (model === 'dall-e-2') {
|
||||||
|
// DALL-E 2 specific parameters
|
||||||
|
if (optionsArg.size) requestParams.size = optionsArg.size;
|
||||||
|
requestParams.response_format = 'b64_json';
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await this.openAiApiClient.images.generate(requestParams);
|
||||||
|
|
||||||
|
const images = (result.data || []).map(img => ({
|
||||||
|
b64_json: img.b64_json,
|
||||||
|
url: img.url,
|
||||||
|
revisedPrompt: img.revised_prompt
|
||||||
|
}));
|
||||||
|
|
||||||
|
return {
|
||||||
|
images,
|
||||||
|
metadata: {
|
||||||
|
model,
|
||||||
|
quality: result.quality,
|
||||||
|
size: result.size,
|
||||||
|
outputFormat: result.output_format,
|
||||||
|
tokensUsed: result.usage?.total_tokens
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Image generation error:', error);
|
||||||
|
throw new Error(`Failed to generate image: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Image editing using OpenAI's gpt-image-1 or DALL-E 2 models
|
||||||
|
*/
|
||||||
|
public async imageEdit(optionsArg: ImageEditOptions): Promise<ImageResponse> {
|
||||||
|
const model = optionsArg.model || this.options.imageModel || 'gpt-image-1';
|
||||||
|
|
||||||
|
try {
|
||||||
|
const requestParams: any = {
|
||||||
|
model,
|
||||||
|
image: optionsArg.image,
|
||||||
|
prompt: optionsArg.prompt,
|
||||||
|
n: optionsArg.n || 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Add mask if provided
|
||||||
|
if (optionsArg.mask) {
|
||||||
|
requestParams.mask = optionsArg.mask;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add gpt-image-1 specific parameters
|
||||||
|
if (model === 'gpt-image-1') {
|
||||||
|
if (optionsArg.quality) requestParams.quality = optionsArg.quality;
|
||||||
|
if (optionsArg.size) requestParams.size = optionsArg.size;
|
||||||
|
if (optionsArg.background) requestParams.background = optionsArg.background;
|
||||||
|
if (optionsArg.outputFormat) requestParams.output_format = optionsArg.outputFormat;
|
||||||
|
if (optionsArg.outputCompression !== undefined) requestParams.output_compression = optionsArg.outputCompression;
|
||||||
|
if (optionsArg.stream !== undefined) requestParams.stream = optionsArg.stream;
|
||||||
|
if (optionsArg.partialImages !== undefined) requestParams.partial_images = optionsArg.partialImages;
|
||||||
|
} else if (model === 'dall-e-2') {
|
||||||
|
// DALL-E 2 specific parameters
|
||||||
|
if (optionsArg.size) requestParams.size = optionsArg.size;
|
||||||
|
requestParams.response_format = 'b64_json';
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await this.openAiApiClient.images.edit(requestParams);
|
||||||
|
|
||||||
|
const images = (result.data || []).map(img => ({
|
||||||
|
b64_json: img.b64_json,
|
||||||
|
url: img.url,
|
||||||
|
revisedPrompt: img.revised_prompt
|
||||||
|
}));
|
||||||
|
|
||||||
|
return {
|
||||||
|
images,
|
||||||
|
metadata: {
|
||||||
|
model,
|
||||||
|
quality: result.quality,
|
||||||
|
size: result.size,
|
||||||
|
outputFormat: result.output_format,
|
||||||
|
tokensUsed: result.usage?.total_tokens
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Image edit error:', error);
|
||||||
|
throw new Error(`Failed to edit image: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
@@ -1,3 +1,259 @@
|
|||||||
import * as plugins from './plugins.js';
|
import * as plugins from './plugins.js';
|
||||||
|
import * as paths from './paths.js';
|
||||||
|
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
||||||
|
import type {
|
||||||
|
ChatOptions,
|
||||||
|
ChatResponse,
|
||||||
|
ChatMessage,
|
||||||
|
ResearchOptions,
|
||||||
|
ResearchResponse,
|
||||||
|
ImageGenerateOptions,
|
||||||
|
ImageEditOptions,
|
||||||
|
ImageResponse
|
||||||
|
} from './abstract.classes.multimodal.js';
|
||||||
|
|
||||||
export class PerplexityProvider {}
|
export interface IPerplexityProviderOptions {
|
||||||
|
perplexityToken: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class PerplexityProvider extends MultiModalModel {
|
||||||
|
private options: IPerplexityProviderOptions;
|
||||||
|
|
||||||
|
constructor(optionsArg: IPerplexityProviderOptions) {
|
||||||
|
super();
|
||||||
|
this.options = optionsArg;
|
||||||
|
}
|
||||||
|
|
||||||
|
async start() {
|
||||||
|
// Initialize any necessary clients or resources
|
||||||
|
}
|
||||||
|
|
||||||
|
async stop() {}
|
||||||
|
|
||||||
|
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||||
|
// Create a TextDecoder to handle incoming chunks
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
let buffer = '';
|
||||||
|
let currentMessage: { role: string; content: string; } | null = null;
|
||||||
|
|
||||||
|
// Create a TransformStream to process the input
|
||||||
|
const transform = new TransformStream<Uint8Array, string>({
|
||||||
|
async transform(chunk, controller) {
|
||||||
|
buffer += decoder.decode(chunk, { stream: true });
|
||||||
|
|
||||||
|
// Try to parse complete JSON messages from the buffer
|
||||||
|
while (true) {
|
||||||
|
const newlineIndex = buffer.indexOf('\n');
|
||||||
|
if (newlineIndex === -1) break;
|
||||||
|
|
||||||
|
const line = buffer.slice(0, newlineIndex);
|
||||||
|
buffer = buffer.slice(newlineIndex + 1);
|
||||||
|
|
||||||
|
if (line.trim()) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(line);
|
||||||
|
currentMessage = {
|
||||||
|
role: message.role || 'user',
|
||||||
|
content: message.content || '',
|
||||||
|
};
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse message:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have a complete message, send it to Perplexity
|
||||||
|
if (currentMessage) {
|
||||||
|
const response = await fetch('https://api.perplexity.ai/chat/completions', {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Authorization': `Bearer ${this.options.perplexityToken}`,
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: 'mixtral-8x7b-instruct',
|
||||||
|
messages: [{ role: currentMessage.role, content: currentMessage.content }],
|
||||||
|
stream: true,
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Process each chunk from Perplexity
|
||||||
|
const reader = response.body?.getReader();
|
||||||
|
if (reader) {
|
||||||
|
try {
|
||||||
|
while (true) {
|
||||||
|
const { done, value } = await reader.read();
|
||||||
|
if (done) break;
|
||||||
|
|
||||||
|
const chunk = new TextDecoder().decode(value);
|
||||||
|
const lines = chunk.split('\n');
|
||||||
|
|
||||||
|
for (const line of lines) {
|
||||||
|
if (line.startsWith('data: ')) {
|
||||||
|
const data = line.slice(6);
|
||||||
|
if (data === '[DONE]') break;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(data);
|
||||||
|
const content = parsed.choices[0]?.delta?.content;
|
||||||
|
if (content) {
|
||||||
|
controller.enqueue(content);
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse SSE data:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
reader.releaseLock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
currentMessage = null;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
flush(controller) {
|
||||||
|
if (buffer) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(buffer);
|
||||||
|
controller.enqueue(message.content || '');
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse remaining buffer:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Connect the input to our transform stream
|
||||||
|
return input.pipeThrough(transform);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implementing the synchronous chat interaction
|
||||||
|
public async chat(optionsArg: ChatOptions): Promise<ChatResponse> {
|
||||||
|
// Make API call to Perplexity
|
||||||
|
const response = await fetch('https://api.perplexity.ai/chat/completions', {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Authorization': `Bearer ${this.options.perplexityToken}`,
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: 'mixtral-8x7b-instruct', // Using Mixtral model
|
||||||
|
messages: [
|
||||||
|
{ role: 'system', content: optionsArg.systemMessage },
|
||||||
|
...optionsArg.messageHistory,
|
||||||
|
{ role: 'user', content: optionsArg.userMessage }
|
||||||
|
],
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error(`Perplexity API error: ${response.statusText}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await response.json();
|
||||||
|
|
||||||
|
return {
|
||||||
|
role: 'assistant' as const,
|
||||||
|
message: result.choices[0].message.content,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
||||||
|
throw new Error('Audio generation is not supported by Perplexity.');
|
||||||
|
}
|
||||||
|
|
||||||
|
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
|
||||||
|
throw new Error('Vision tasks are not supported by Perplexity.');
|
||||||
|
}
|
||||||
|
|
||||||
|
public async document(optionsArg: {
|
||||||
|
systemMessage: string;
|
||||||
|
userMessage: string;
|
||||||
|
pdfDocuments: Uint8Array[];
|
||||||
|
messageHistory: ChatMessage[];
|
||||||
|
}): Promise<{ message: any }> {
|
||||||
|
throw new Error('Document processing is not supported by Perplexity.');
|
||||||
|
}
|
||||||
|
|
||||||
|
public async research(optionsArg: ResearchOptions): Promise<ResearchResponse> {
|
||||||
|
// Perplexity has Sonar models that are optimized for search
|
||||||
|
// sonar models: sonar, sonar-pro
|
||||||
|
const model = optionsArg.searchDepth === 'deep' ? 'sonar-pro' : 'sonar';
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await fetch('https://api.perplexity.ai/chat/completions', {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Authorization': `Bearer ${this.options.perplexityToken}`,
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model,
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: 'system',
|
||||||
|
content: 'You are a helpful research assistant. Provide accurate information with sources.'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
content: optionsArg.query
|
||||||
|
}
|
||||||
|
],
|
||||||
|
temperature: 0.7,
|
||||||
|
max_tokens: 4000
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error(`Perplexity API error: ${response.statusText}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await response.json();
|
||||||
|
const answer = result.choices[0].message.content;
|
||||||
|
|
||||||
|
// Parse citations from the response
|
||||||
|
const sources: Array<{ url: string; title: string; snippet: string }> = [];
|
||||||
|
|
||||||
|
// Perplexity includes citations in the format [1], [2], etc. with sources listed
|
||||||
|
// This is a simplified parser - could be enhanced based on actual Perplexity response format
|
||||||
|
if (result.citations) {
|
||||||
|
for (const citation of result.citations) {
|
||||||
|
sources.push({
|
||||||
|
url: citation.url || '',
|
||||||
|
title: citation.title || '',
|
||||||
|
snippet: citation.snippet || ''
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
answer,
|
||||||
|
sources,
|
||||||
|
metadata: {
|
||||||
|
model,
|
||||||
|
searchDepth: optionsArg.searchDepth || 'basic'
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Perplexity research error:', error);
|
||||||
|
throw new Error(`Failed to perform research: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Image generation is not supported by Perplexity
|
||||||
|
*/
|
||||||
|
public async imageGenerate(optionsArg: ImageGenerateOptions): Promise<ImageResponse> {
|
||||||
|
throw new Error('Image generation is not supported by Perplexity. Please use OpenAI provider for image generation.');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Image editing is not supported by Perplexity
|
||||||
|
*/
|
||||||
|
public async imageEdit(optionsArg: ImageEditOptions): Promise<ImageResponse> {
|
||||||
|
throw new Error('Image editing is not supported by Perplexity. Please use OpenAI provider for image editing.');
|
||||||
|
}
|
||||||
|
}
|
211
ts/provider.xai.ts
Normal file
211
ts/provider.xai.ts
Normal file
@@ -0,0 +1,211 @@
|
|||||||
|
import * as plugins from './plugins.js';
|
||||||
|
import * as paths from './paths.js';
|
||||||
|
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
||||||
|
import type {
|
||||||
|
ChatOptions,
|
||||||
|
ChatResponse,
|
||||||
|
ChatMessage,
|
||||||
|
ResearchOptions,
|
||||||
|
ResearchResponse,
|
||||||
|
ImageGenerateOptions,
|
||||||
|
ImageEditOptions,
|
||||||
|
ImageResponse
|
||||||
|
} from './abstract.classes.multimodal.js';
|
||||||
|
import type { ChatCompletionMessageParam } from 'openai/resources/chat/completions';
|
||||||
|
|
||||||
|
export interface IXAIProviderOptions {
|
||||||
|
xaiToken: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class XAIProvider extends MultiModalModel {
|
||||||
|
private options: IXAIProviderOptions;
|
||||||
|
public openAiApiClient: plugins.openai.default;
|
||||||
|
|
||||||
|
constructor(optionsArg: IXAIProviderOptions) {
|
||||||
|
super();
|
||||||
|
this.options = optionsArg;
|
||||||
|
}
|
||||||
|
|
||||||
|
public async start() {
|
||||||
|
await super.start();
|
||||||
|
this.openAiApiClient = new plugins.openai.default({
|
||||||
|
apiKey: this.options.xaiToken,
|
||||||
|
baseURL: 'https://api.x.ai/v1',
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public async stop() {
|
||||||
|
await super.stop();
|
||||||
|
}
|
||||||
|
|
||||||
|
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||||
|
// Create a TextDecoder to handle incoming chunks
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
let buffer = '';
|
||||||
|
let currentMessage: { role: string; content: string; } | null = null;
|
||||||
|
|
||||||
|
// Create a TransformStream to process the input
|
||||||
|
const transform = new TransformStream<Uint8Array, string>({
|
||||||
|
async transform(chunk, controller) {
|
||||||
|
buffer += decoder.decode(chunk, { stream: true });
|
||||||
|
|
||||||
|
// Try to parse complete JSON messages from the buffer
|
||||||
|
while (true) {
|
||||||
|
const newlineIndex = buffer.indexOf('\n');
|
||||||
|
if (newlineIndex === -1) break;
|
||||||
|
|
||||||
|
const line = buffer.slice(0, newlineIndex);
|
||||||
|
buffer = buffer.slice(newlineIndex + 1);
|
||||||
|
|
||||||
|
if (line.trim()) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(line);
|
||||||
|
currentMessage = {
|
||||||
|
role: message.role || 'user',
|
||||||
|
content: message.content || '',
|
||||||
|
};
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse message:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have a complete message, send it to X.AI
|
||||||
|
if (currentMessage) {
|
||||||
|
const stream = await this.openAiApiClient.chat.completions.create({
|
||||||
|
model: 'grok-2-latest',
|
||||||
|
messages: [{ role: currentMessage.role, content: currentMessage.content }],
|
||||||
|
stream: true,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Process each chunk from X.AI
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
const content = chunk.choices[0]?.delta?.content;
|
||||||
|
if (content) {
|
||||||
|
controller.enqueue(content);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
currentMessage = null;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
flush(controller) {
|
||||||
|
if (buffer) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(buffer);
|
||||||
|
controller.enqueue(message.content || '');
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse remaining buffer:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Connect the input to our transform stream
|
||||||
|
return input.pipeThrough(transform);
|
||||||
|
}
|
||||||
|
|
||||||
|
public async chat(optionsArg: {
|
||||||
|
systemMessage: string;
|
||||||
|
userMessage: string;
|
||||||
|
messageHistory: { role: string; content: string; }[];
|
||||||
|
}): Promise<{ role: 'assistant'; message: string; }> {
|
||||||
|
// Prepare messages array with system message, history, and user message
|
||||||
|
const messages: ChatCompletionMessageParam[] = [
|
||||||
|
{ role: 'system', content: optionsArg.systemMessage },
|
||||||
|
...optionsArg.messageHistory.map(msg => ({
|
||||||
|
role: msg.role as 'system' | 'user' | 'assistant',
|
||||||
|
content: msg.content
|
||||||
|
})),
|
||||||
|
{ role: 'user', content: optionsArg.userMessage }
|
||||||
|
];
|
||||||
|
|
||||||
|
// Call X.AI's chat completion API
|
||||||
|
const completion = await this.openAiApiClient.chat.completions.create({
|
||||||
|
model: 'grok-2-latest',
|
||||||
|
messages: messages,
|
||||||
|
stream: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Return the assistant's response
|
||||||
|
return {
|
||||||
|
role: 'assistant',
|
||||||
|
message: completion.choices[0]?.message?.content || ''
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
||||||
|
throw new Error('Audio generation is not supported by X.AI');
|
||||||
|
}
|
||||||
|
|
||||||
|
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
|
||||||
|
throw new Error('Vision tasks are not supported by X.AI');
|
||||||
|
}
|
||||||
|
|
||||||
|
public async document(optionsArg: {
|
||||||
|
systemMessage: string;
|
||||||
|
userMessage: string;
|
||||||
|
pdfDocuments: Uint8Array[];
|
||||||
|
messageHistory: { role: string; content: string; }[];
|
||||||
|
}): Promise<{ message: any }> {
|
||||||
|
// First convert PDF documents to images
|
||||||
|
let pdfDocumentImageBytesArray: Uint8Array[] = [];
|
||||||
|
|
||||||
|
for (const pdfDocument of optionsArg.pdfDocuments) {
|
||||||
|
const documentImageArray = await this.smartpdfInstance.convertPDFToPngBytes(pdfDocument);
|
||||||
|
pdfDocumentImageBytesArray = pdfDocumentImageBytesArray.concat(documentImageArray);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert images to base64 for inclusion in the message
|
||||||
|
const imageBase64Array = pdfDocumentImageBytesArray.map(bytes =>
|
||||||
|
Buffer.from(bytes).toString('base64')
|
||||||
|
);
|
||||||
|
|
||||||
|
// Combine document images into the user message
|
||||||
|
const enhancedUserMessage = `
|
||||||
|
${optionsArg.userMessage}
|
||||||
|
|
||||||
|
Document contents (as images):
|
||||||
|
${imageBase64Array.map((img, i) => `Image ${i + 1}: <image data>`).join('\n')}
|
||||||
|
`;
|
||||||
|
|
||||||
|
// Use chat completion to analyze the documents
|
||||||
|
const messages: ChatCompletionMessageParam[] = [
|
||||||
|
{ role: 'system', content: optionsArg.systemMessage },
|
||||||
|
...optionsArg.messageHistory.map(msg => ({
|
||||||
|
role: msg.role as 'system' | 'user' | 'assistant',
|
||||||
|
content: msg.content
|
||||||
|
})),
|
||||||
|
{ role: 'user', content: enhancedUserMessage }
|
||||||
|
];
|
||||||
|
|
||||||
|
const completion = await this.openAiApiClient.chat.completions.create({
|
||||||
|
model: 'grok-2-latest',
|
||||||
|
messages: messages,
|
||||||
|
stream: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
message: completion.choices[0]?.message?.content || ''
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
public async research(optionsArg: ResearchOptions): Promise<ResearchResponse> {
|
||||||
|
throw new Error('Research capabilities are not yet supported by xAI provider.');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Image generation is not supported by xAI
|
||||||
|
*/
|
||||||
|
public async imageGenerate(optionsArg: ImageGenerateOptions): Promise<ImageResponse> {
|
||||||
|
throw new Error('Image generation is not supported by xAI. Please use OpenAI provider for image generation.');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Image editing is not supported by xAI
|
||||||
|
*/
|
||||||
|
public async imageEdit(optionsArg: ImageEditOptions): Promise<ImageResponse> {
|
||||||
|
throw new Error('Image editing is not supported by xAI. Please use OpenAI provider for image editing.');
|
||||||
|
}
|
||||||
|
}
|
Reference in New Issue
Block a user