Compare commits
23 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
4bf7113334 | ||
6bdbeae144 | |||
09c27379cb | |||
2bc6f7ee5e | |||
0ac50d647d | |||
5f9ffc7356 | |||
502b665224 | |||
bda0d7ed7e | |||
de2a60d12f | |||
5b3a93a43a | |||
6b241f8889 | |||
0a80ac0a8a | |||
6ce442354e | |||
9b38a3c06e | |||
5dead05324 | |||
6916dd9e2a | |||
f89888a542 | |||
d93b198b09 | |||
9e390d0fdb | |||
8329ee861e | |||
b8585a0afb | |||
c96f5118cf | |||
17e1a1f1e1 |
81
changelog.md
81
changelog.md
@@ -1,5 +1,86 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## 2025-07-25 - 0.5.5 - feat(documentation)
|
||||||
|
Comprehensive documentation enhancement and test improvements
|
||||||
|
|
||||||
|
- Completely rewrote readme.md with detailed provider comparisons, advanced usage examples, and performance tips
|
||||||
|
- Added comprehensive examples for all supported providers (OpenAI, Anthropic, Perplexity, Groq, XAI, Ollama, Exo)
|
||||||
|
- Included detailed sections on chat interactions, streaming, TTS, vision processing, and document analysis
|
||||||
|
- Added verbose flag to test script for better debugging
|
||||||
|
|
||||||
|
## 2025-05-13 - 0.5.4 - fix(provider.openai)
|
||||||
|
Update dependency versions, clean test imports, and adjust default OpenAI model configurations
|
||||||
|
|
||||||
|
- Bump dependency versions in package.json (@git.zone/tsbuild, @push.rocks/tapbundle, openai, etc.)
|
||||||
|
- Change default chatModel from 'gpt-4o' to 'o4-mini' and visionModel from 'gpt-4o' to '04-mini' in provider.openai.ts
|
||||||
|
- Remove unused 'expectAsync' import from test file
|
||||||
|
|
||||||
|
## 2025-04-03 - 0.5.3 - fix(package.json)
|
||||||
|
Add explicit packageManager field to package.json
|
||||||
|
|
||||||
|
- Include the packageManager property to specify the pnpm version and checksum.
|
||||||
|
- Align package metadata with current standards.
|
||||||
|
|
||||||
|
## 2025-04-03 - 0.5.2 - fix(readme)
|
||||||
|
Remove redundant conclusion section from README to streamline documentation.
|
||||||
|
|
||||||
|
- Eliminated the conclusion block describing SmartAi's capabilities and documentation pointers.
|
||||||
|
|
||||||
|
## 2025-02-25 - 0.5.1 - fix(OpenAiProvider)
|
||||||
|
Corrected audio model ID in OpenAiProvider
|
||||||
|
|
||||||
|
- Fixed audio model identifier from 'o3-mini' to 'tts-1-hd' in the OpenAiProvider's audio method.
|
||||||
|
- Addressed minor code formatting issues in test suite for better readability.
|
||||||
|
- Corrected spelling errors in test documentation and comments.
|
||||||
|
|
||||||
|
## 2025-02-25 - 0.5.0 - feat(documentation and configuration)
|
||||||
|
Enhanced package and README documentation
|
||||||
|
|
||||||
|
- Expanded the package description to better reflect the library's capabilities.
|
||||||
|
- Improved README with detailed usage examples for initialization, chat interactions, streaming chat, audio generation, document analysis, and vision processing.
|
||||||
|
- Provided error handling strategies and advanced streaming customization examples.
|
||||||
|
|
||||||
|
## 2025-02-25 - 0.4.2 - fix(core)
|
||||||
|
Fix OpenAI chat streaming and PDF document processing logic.
|
||||||
|
|
||||||
|
- Updated OpenAI chat streaming to handle new async iterable format.
|
||||||
|
- Improved PDF document processing by filtering out empty image buffers.
|
||||||
|
- Removed unsupported temperature options from OpenAI requests.
|
||||||
|
|
||||||
|
## 2025-02-25 - 0.4.1 - fix(provider)
|
||||||
|
Fix provider modules for consistency
|
||||||
|
|
||||||
|
- Updated TypeScript interfaces and options in provider modules for better type safety.
|
||||||
|
- Modified transform stream handlers in Exo, Groq, and Ollama providers for consistency.
|
||||||
|
- Added optional model options to OpenAI provider for custom model usage.
|
||||||
|
|
||||||
|
## 2025-02-08 - 0.4.0 - feat(core)
|
||||||
|
Added support for Exo AI provider
|
||||||
|
|
||||||
|
- Introduced ExoProvider with chat functionalities.
|
||||||
|
- Updated SmartAi class to initialize ExoProvider.
|
||||||
|
- Extended Conversation class to support ExoProvider.
|
||||||
|
|
||||||
|
## 2025-02-05 - 0.3.3 - fix(documentation)
|
||||||
|
Update readme with detailed license and legal information.
|
||||||
|
|
||||||
|
- Added explicit section on License and Legal Information in the README.
|
||||||
|
- Clarified the use of trademarks and company information.
|
||||||
|
|
||||||
|
## 2025-02-05 - 0.3.2 - fix(documentation)
|
||||||
|
Remove redundant badges from readme
|
||||||
|
|
||||||
|
- Removed Build Status badge from the readme file.
|
||||||
|
- Removed License badge from the readme file.
|
||||||
|
|
||||||
|
## 2025-02-05 - 0.3.1 - fix(documentation)
|
||||||
|
Updated README structure and added detailed usage examples
|
||||||
|
|
||||||
|
- Introduced a Table of Contents
|
||||||
|
- Included comprehensive sections for chat, streaming chat, audio generation, document processing, and vision processing
|
||||||
|
- Added example code and detailed configuration steps for supported AI providers
|
||||||
|
- Clarified the development setup with instructions for running tests and building the project
|
||||||
|
|
||||||
## 2025-02-05 - 0.3.0 - feat(integration-xai)
|
## 2025-02-05 - 0.3.0 - feat(integration-xai)
|
||||||
Add support for X.AI provider with chat and document processing capabilities.
|
Add support for X.AI provider with chat and document processing capabilities.
|
||||||
|
|
||||||
|
@@ -5,20 +5,33 @@
|
|||||||
"githost": "code.foss.global",
|
"githost": "code.foss.global",
|
||||||
"gitscope": "push.rocks",
|
"gitscope": "push.rocks",
|
||||||
"gitrepo": "smartai",
|
"gitrepo": "smartai",
|
||||||
"description": "A TypeScript library for integrating and interacting with multiple AI models, offering capabilities for chat and potentially audio responses.",
|
"description": "SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.",
|
||||||
"npmPackagename": "@push.rocks/smartai",
|
"npmPackagename": "@push.rocks/smartai",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"projectDomain": "push.rocks",
|
"projectDomain": "push.rocks",
|
||||||
"keywords": [
|
"keywords": [
|
||||||
"AI integration",
|
"AI integration",
|
||||||
"chatbot",
|
|
||||||
"TypeScript",
|
"TypeScript",
|
||||||
|
"chatbot",
|
||||||
"OpenAI",
|
"OpenAI",
|
||||||
"Anthropic",
|
"Anthropic",
|
||||||
"multi-model support",
|
"multi-model",
|
||||||
"audio responses",
|
"audio generation",
|
||||||
"text-to-speech",
|
"text-to-speech",
|
||||||
"streaming chat"
|
"document processing",
|
||||||
|
"vision processing",
|
||||||
|
"streaming chat",
|
||||||
|
"API",
|
||||||
|
"multiple providers",
|
||||||
|
"AI models",
|
||||||
|
"synchronous chat",
|
||||||
|
"asynchronous chat",
|
||||||
|
"real-time interaction",
|
||||||
|
"content analysis",
|
||||||
|
"image description",
|
||||||
|
"document classification",
|
||||||
|
"AI toolkit",
|
||||||
|
"provider switching"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
64
package.json
64
package.json
@@ -1,37 +1,37 @@
|
|||||||
{
|
{
|
||||||
"name": "@push.rocks/smartai",
|
"name": "@push.rocks/smartai",
|
||||||
"version": "0.3.0",
|
"version": "0.5.5",
|
||||||
"private": false,
|
"private": false,
|
||||||
"description": "A TypeScript library for integrating and interacting with multiple AI models, offering capabilities for chat and potentially audio responses.",
|
"description": "SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.",
|
||||||
"main": "dist_ts/index.js",
|
"main": "dist_ts/index.js",
|
||||||
"typings": "dist_ts/index.d.ts",
|
"typings": "dist_ts/index.d.ts",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
"author": "Task Venture Capital GmbH",
|
"author": "Task Venture Capital GmbH",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"test": "(tstest test/ --web)",
|
"test": "(tstest test/ --web --verbose)",
|
||||||
"build": "(tsbuild --web --allowimplicitany)",
|
"build": "(tsbuild --web --allowimplicitany)",
|
||||||
"buildDocs": "(tsdoc)"
|
"buildDocs": "(tsdoc)"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@git.zone/tsbuild": "^2.1.84",
|
"@git.zone/tsbuild": "^2.6.4",
|
||||||
"@git.zone/tsbundle": "^2.0.5",
|
"@git.zone/tsbundle": "^2.5.1",
|
||||||
"@git.zone/tsrun": "^1.2.49",
|
"@git.zone/tsrun": "^1.3.3",
|
||||||
"@git.zone/tstest": "^1.0.90",
|
"@git.zone/tstest": "^2.3.2",
|
||||||
"@push.rocks/qenv": "^6.0.5",
|
"@push.rocks/qenv": "^6.1.0",
|
||||||
"@push.rocks/tapbundle": "^5.3.0",
|
"@push.rocks/tapbundle": "^6.0.3",
|
||||||
"@types/node": "^22.5.5"
|
"@types/node": "^22.15.17"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@anthropic-ai/sdk": "^0.27.3",
|
"@anthropic-ai/sdk": "^0.57.0",
|
||||||
"@push.rocks/smartarray": "^1.0.8",
|
"@push.rocks/smartarray": "^1.1.0",
|
||||||
"@push.rocks/smartfile": "^11.0.21",
|
"@push.rocks/smartfile": "^11.2.5",
|
||||||
"@push.rocks/smartpath": "^5.0.18",
|
"@push.rocks/smartpath": "^5.0.18",
|
||||||
"@push.rocks/smartpdf": "^3.1.6",
|
"@push.rocks/smartpdf": "^3.2.2",
|
||||||
"@push.rocks/smartpromise": "^4.0.4",
|
"@push.rocks/smartpromise": "^4.2.3",
|
||||||
"@push.rocks/smartrequest": "^2.0.22",
|
"@push.rocks/smartrequest": "^2.1.0",
|
||||||
"@push.rocks/webstream": "^1.0.10",
|
"@push.rocks/webstream": "^1.0.10",
|
||||||
"openai": "^4.62.1"
|
"openai": "^5.10.2"
|
||||||
},
|
},
|
||||||
"repository": {
|
"repository": {
|
||||||
"type": "git",
|
"type": "git",
|
||||||
@@ -58,13 +58,33 @@
|
|||||||
],
|
],
|
||||||
"keywords": [
|
"keywords": [
|
||||||
"AI integration",
|
"AI integration",
|
||||||
"chatbot",
|
|
||||||
"TypeScript",
|
"TypeScript",
|
||||||
|
"chatbot",
|
||||||
"OpenAI",
|
"OpenAI",
|
||||||
"Anthropic",
|
"Anthropic",
|
||||||
"multi-model support",
|
"multi-model",
|
||||||
"audio responses",
|
"audio generation",
|
||||||
"text-to-speech",
|
"text-to-speech",
|
||||||
"streaming chat"
|
"document processing",
|
||||||
]
|
"vision processing",
|
||||||
|
"streaming chat",
|
||||||
|
"API",
|
||||||
|
"multiple providers",
|
||||||
|
"AI models",
|
||||||
|
"synchronous chat",
|
||||||
|
"asynchronous chat",
|
||||||
|
"real-time interaction",
|
||||||
|
"content analysis",
|
||||||
|
"image description",
|
||||||
|
"document classification",
|
||||||
|
"AI toolkit",
|
||||||
|
"provider switching"
|
||||||
|
],
|
||||||
|
"pnpm": {
|
||||||
|
"onlyBuiltDependencies": [
|
||||||
|
"esbuild",
|
||||||
|
"puppeteer"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"packageManager": "pnpm@10.7.0+sha512.6b865ad4b62a1d9842b61d674a393903b871d9244954f652b8842c2b553c72176b278f64c463e52d40fff8aba385c235c8c9ecf5cc7de4fd78b8bb6d49633ab6"
|
||||||
}
|
}
|
||||||
|
7807
pnpm-lock.yaml
generated
7807
pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
442
readme.md
442
readme.md
@@ -1,235 +1,393 @@
|
|||||||
# @push.rocks/smartai
|
# @push.rocks/smartai
|
||||||
|
|
||||||
Provides a standardized interface for integrating and conversing with multiple AI models, supporting operations like chat, streaming interactions, and audio responses.
|
SmartAi is a powerful TypeScript library that provides a unified interface for integrating with multiple AI providers including OpenAI, Anthropic, Perplexity, Ollama, Groq, XAI, and Exo. It offers comprehensive support for chat interactions, streaming conversations, text-to-speech, document analysis, and vision processing.
|
||||||
|
|
||||||
## Install
|
## Install
|
||||||
|
|
||||||
To add @push.rocks/smartai to your project, run the following command in your terminal:
|
To install SmartAi into your project, use pnpm:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
npm install @push.rocks/smartai
|
pnpm install @push.rocks/smartai
|
||||||
```
|
```
|
||||||
|
|
||||||
This command installs the package and adds it to your project's dependencies.
|
|
||||||
|
|
||||||
## Supported AI Providers
|
|
||||||
|
|
||||||
@push.rocks/smartai supports multiple AI providers, each with its own unique capabilities:
|
|
||||||
|
|
||||||
### OpenAI
|
|
||||||
- Models: GPT-4, GPT-3.5-turbo, GPT-4-vision-preview
|
|
||||||
- Features: Chat, Streaming, Audio Generation, Vision, Document Processing
|
|
||||||
- Configuration:
|
|
||||||
```typescript
|
|
||||||
openaiToken: 'your-openai-token'
|
|
||||||
```
|
|
||||||
|
|
||||||
### X.AI
|
|
||||||
- Models: Grok-2-latest
|
|
||||||
- Features: Chat, Streaming, Document Processing
|
|
||||||
- Configuration:
|
|
||||||
```typescript
|
|
||||||
xaiToken: 'your-xai-token'
|
|
||||||
```
|
|
||||||
|
|
||||||
### Anthropic
|
|
||||||
- Models: Claude-3-opus-20240229
|
|
||||||
- Features: Chat, Streaming, Vision, Document Processing
|
|
||||||
- Configuration:
|
|
||||||
```typescript
|
|
||||||
anthropicToken: 'your-anthropic-token'
|
|
||||||
```
|
|
||||||
|
|
||||||
### Perplexity
|
|
||||||
- Models: Mixtral-8x7b-instruct
|
|
||||||
- Features: Chat, Streaming
|
|
||||||
- Configuration:
|
|
||||||
```typescript
|
|
||||||
perplexityToken: 'your-perplexity-token'
|
|
||||||
```
|
|
||||||
|
|
||||||
### Groq
|
|
||||||
- Models: Llama-3.3-70b-versatile
|
|
||||||
- Features: Chat, Streaming
|
|
||||||
- Configuration:
|
|
||||||
```typescript
|
|
||||||
groqToken: 'your-groq-token'
|
|
||||||
```
|
|
||||||
|
|
||||||
### Ollama
|
|
||||||
- Models: Configurable (default: llama2, llava for vision/documents)
|
|
||||||
- Features: Chat, Streaming, Vision, Document Processing
|
|
||||||
- Configuration:
|
|
||||||
```typescript
|
|
||||||
baseUrl: 'http://localhost:11434' // Optional
|
|
||||||
model: 'llama2' // Optional
|
|
||||||
visionModel: 'llava' // Optional, for vision and document tasks
|
|
||||||
```
|
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
The `@push.rocks/smartai` package is a comprehensive solution for integrating and interacting with various AI models, designed to support operations ranging from chat interactions to audio responses. This documentation will guide you through the process of utilizing `@push.rocks/smartai` in your applications.
|
SmartAi provides a clean, consistent API across all supported AI providers. This documentation covers all features with practical examples for each provider and capability.
|
||||||
|
|
||||||
### Getting Started
|
### Initialization
|
||||||
|
|
||||||
Before you begin, ensure you have installed the package as described in the **Install** section above. Once installed, you can start integrating AI functionalities into your application.
|
First, initialize SmartAi with the API tokens and configuration for the providers you want to use:
|
||||||
|
|
||||||
### Initializing SmartAi
|
|
||||||
|
|
||||||
The first step is to import and initialize the `SmartAi` class with appropriate options for the AI services you plan to use:
|
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
import { SmartAi } from '@push.rocks/smartai';
|
import { SmartAi } from '@push.rocks/smartai';
|
||||||
|
|
||||||
const smartAi = new SmartAi({
|
const smartAi = new SmartAi({
|
||||||
openaiToken: 'your-openai-token',
|
// OpenAI - for GPT models, DALL-E, and TTS
|
||||||
xaiToken: 'your-xai-token',
|
openaiToken: 'your-openai-api-key',
|
||||||
anthropicToken: 'your-anthropic-token',
|
|
||||||
perplexityToken: 'your-perplexity-token',
|
// Anthropic - for Claude models
|
||||||
groqToken: 'your-groq-token',
|
anthropicToken: 'your-anthropic-api-key',
|
||||||
|
|
||||||
|
// Perplexity - for research-focused AI
|
||||||
|
perplexityToken: 'your-perplexity-api-key',
|
||||||
|
|
||||||
|
// Groq - for fast inference
|
||||||
|
groqToken: 'your-groq-api-key',
|
||||||
|
|
||||||
|
// XAI - for Grok models
|
||||||
|
xaiToken: 'your-xai-api-key',
|
||||||
|
|
||||||
|
// Ollama - for local models
|
||||||
ollama: {
|
ollama: {
|
||||||
baseUrl: 'http://localhost:11434',
|
baseUrl: 'http://localhost:11434',
|
||||||
model: 'llama2'
|
model: 'llama2', // default model for chat
|
||||||
|
visionModel: 'llava' // default model for vision
|
||||||
|
},
|
||||||
|
|
||||||
|
// Exo - for distributed inference
|
||||||
|
exo: {
|
||||||
|
baseUrl: 'http://localhost:8080/v1',
|
||||||
|
apiKey: 'your-exo-api-key'
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Start the SmartAi instance
|
||||||
await smartAi.start();
|
await smartAi.start();
|
||||||
```
|
```
|
||||||
|
|
||||||
### Chat Interactions
|
## Supported Providers
|
||||||
|
|
||||||
|
SmartAi supports the following AI providers:
|
||||||
|
|
||||||
|
| Provider | Use Case | Key Features |
|
||||||
|
|----------|----------|--------------|
|
||||||
|
| **OpenAI** | General purpose, GPT models | Chat, streaming, TTS, vision, documents |
|
||||||
|
| **Anthropic** | Claude models, safety-focused | Chat, streaming, vision, documents |
|
||||||
|
| **Perplexity** | Research and factual queries | Chat, streaming, documents |
|
||||||
|
| **Groq** | Fast inference | Chat, streaming |
|
||||||
|
| **XAI** | Grok models | Chat, streaming |
|
||||||
|
| **Ollama** | Local models | Chat, streaming, vision |
|
||||||
|
| **Exo** | Distributed inference | Chat, streaming |
|
||||||
|
|
||||||
|
## Core Features
|
||||||
|
|
||||||
|
### 1. Chat Interactions
|
||||||
|
|
||||||
|
SmartAi provides both synchronous and streaming chat capabilities across all supported providers.
|
||||||
|
|
||||||
#### Synchronous Chat
|
#### Synchronous Chat
|
||||||
|
|
||||||
For simple question-answer interactions:
|
Simple request-response interactions with any provider:
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
const response = await smartAi.openaiProvider.chat({
|
// OpenAI Example
|
||||||
|
const openAiResponse = await smartAi.openaiProvider.chat({
|
||||||
systemMessage: 'You are a helpful assistant.',
|
systemMessage: 'You are a helpful assistant.',
|
||||||
userMessage: 'What is the capital of France?',
|
userMessage: 'What is the capital of France?',
|
||||||
messageHistory: [] // Previous messages in the conversation
|
messageHistory: []
|
||||||
});
|
});
|
||||||
|
console.log(openAiResponse.message); // "The capital of France is Paris."
|
||||||
|
|
||||||
console.log(response.message);
|
// Anthropic Example
|
||||||
|
const anthropicResponse = await smartAi.anthropicProvider.chat({
|
||||||
|
systemMessage: 'You are a knowledgeable historian.',
|
||||||
|
userMessage: 'Tell me about the French Revolution',
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
|
console.log(anthropicResponse.message);
|
||||||
|
|
||||||
|
// Using message history for context
|
||||||
|
const contextualResponse = await smartAi.openaiProvider.chat({
|
||||||
|
systemMessage: 'You are a math tutor.',
|
||||||
|
userMessage: 'What about multiplication?',
|
||||||
|
messageHistory: [
|
||||||
|
{ role: 'user', content: 'Can you teach me math?' },
|
||||||
|
{ role: 'assistant', content: 'Of course! What would you like to learn?' }
|
||||||
|
]
|
||||||
|
});
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Streaming Chat
|
#### Streaming Chat
|
||||||
|
|
||||||
For real-time, streaming interactions:
|
For real-time, token-by-token responses:
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
const textEncoder = new TextEncoder();
|
// Create a readable stream for input
|
||||||
const textDecoder = new TextDecoder();
|
const { readable, writable } = new TransformStream();
|
||||||
|
|
||||||
// Create input and output streams
|
|
||||||
const { writable, readable } = new TransformStream();
|
|
||||||
const writer = writable.getWriter();
|
const writer = writable.getWriter();
|
||||||
|
|
||||||
// Send a message
|
// Send a message
|
||||||
const message = {
|
const encoder = new TextEncoder();
|
||||||
|
await writer.write(encoder.encode(JSON.stringify({
|
||||||
role: 'user',
|
role: 'user',
|
||||||
content: 'Tell me a story about a brave knight'
|
content: 'Write a haiku about programming'
|
||||||
};
|
})));
|
||||||
|
await writer.close();
|
||||||
|
|
||||||
writer.write(textEncoder.encode(JSON.stringify(message) + '\n'));
|
// Get streaming response
|
||||||
|
const responseStream = await smartAi.openaiProvider.chatStream(readable);
|
||||||
// Process the response stream
|
const reader = responseStream.getReader();
|
||||||
const stream = await smartAi.openaiProvider.chatStream(readable);
|
const decoder = new TextDecoder();
|
||||||
const reader = stream.getReader();
|
|
||||||
|
|
||||||
|
// Read the stream
|
||||||
while (true) {
|
while (true) {
|
||||||
const { done, value } = await reader.read();
|
const { done, value } = await reader.read();
|
||||||
if (done) break;
|
if (done) break;
|
||||||
console.log('AI:', value); // Process each chunk of the response
|
process.stdout.write(value); // Print each chunk as it arrives
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Audio Generation
|
### 2. Text-to-Speech (Audio Generation)
|
||||||
|
|
||||||
For providers that support audio generation (currently OpenAI):
|
Convert text to natural-sounding speech (currently supported by OpenAI):
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
|
import * as fs from 'fs';
|
||||||
|
|
||||||
|
// Generate speech from text
|
||||||
const audioStream = await smartAi.openaiProvider.audio({
|
const audioStream = await smartAi.openaiProvider.audio({
|
||||||
message: 'Hello, this is a test of text-to-speech'
|
message: 'Hello world! This is a test of the text-to-speech system.'
|
||||||
});
|
});
|
||||||
|
|
||||||
// Handle the audio stream (e.g., save to file or play)
|
// Save to file
|
||||||
|
const writeStream = fs.createWriteStream('output.mp3');
|
||||||
|
audioStream.pipe(writeStream);
|
||||||
|
|
||||||
|
// Or use in your application directly
|
||||||
|
audioStream.on('data', (chunk) => {
|
||||||
|
// Process audio chunks
|
||||||
|
});
|
||||||
```
|
```
|
||||||
|
|
||||||
### Document Processing
|
### 3. Vision Processing
|
||||||
|
|
||||||
For providers that support document processing (OpenAI, Ollama, and Anthropic):
|
Analyze images and get detailed descriptions:
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
// Using OpenAI
|
import * as fs from 'fs';
|
||||||
const result = await smartAi.openaiProvider.document({
|
|
||||||
systemMessage: 'Classify the document type',
|
|
||||||
userMessage: 'What type of document is this?',
|
|
||||||
messageHistory: [],
|
|
||||||
pdfDocuments: [pdfBuffer] // Uint8Array of PDF content
|
|
||||||
});
|
|
||||||
|
|
||||||
// Using Ollama with llava
|
// Read an image file
|
||||||
const analysis = await smartAi.ollamaProvider.document({
|
const imageBuffer = fs.readFileSync('image.jpg');
|
||||||
systemMessage: 'You are a document analysis assistant',
|
|
||||||
userMessage: 'Extract the key information from this document',
|
|
||||||
messageHistory: [],
|
|
||||||
pdfDocuments: [pdfBuffer] // Uint8Array of PDF content
|
|
||||||
});
|
|
||||||
|
|
||||||
// Using Anthropic with Claude 3
|
// OpenAI Vision
|
||||||
|
const openAiVision = await smartAi.openaiProvider.vision({
|
||||||
|
image: imageBuffer,
|
||||||
|
prompt: 'What is in this image? Describe in detail.'
|
||||||
|
});
|
||||||
|
console.log('OpenAI:', openAiVision);
|
||||||
|
|
||||||
|
// Anthropic Vision
|
||||||
|
const anthropicVision = await smartAi.anthropicProvider.vision({
|
||||||
|
image: imageBuffer,
|
||||||
|
prompt: 'Analyze this image and identify any text or objects.'
|
||||||
|
});
|
||||||
|
console.log('Anthropic:', anthropicVision);
|
||||||
|
|
||||||
|
// Ollama Vision (using local model)
|
||||||
|
const ollamaVision = await smartAi.ollamaProvider.vision({
|
||||||
|
image: imageBuffer,
|
||||||
|
prompt: 'Describe the colors and composition of this image.'
|
||||||
|
});
|
||||||
|
console.log('Ollama:', ollamaVision);
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Document Analysis
|
||||||
|
|
||||||
|
Process and analyze PDF documents with AI:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import * as fs from 'fs';
|
||||||
|
|
||||||
|
// Read PDF documents
|
||||||
|
const pdfBuffer = fs.readFileSync('document.pdf');
|
||||||
|
|
||||||
|
// Analyze with OpenAI
|
||||||
|
const openAiAnalysis = await smartAi.openaiProvider.document({
|
||||||
|
systemMessage: 'You are a document analyst. Extract key information.',
|
||||||
|
userMessage: 'Summarize this document and list the main points.',
|
||||||
|
messageHistory: [],
|
||||||
|
pdfDocuments: [pdfBuffer]
|
||||||
|
});
|
||||||
|
console.log('OpenAI Analysis:', openAiAnalysis.message);
|
||||||
|
|
||||||
|
// Analyze with Anthropic
|
||||||
const anthropicAnalysis = await smartAi.anthropicProvider.document({
|
const anthropicAnalysis = await smartAi.anthropicProvider.document({
|
||||||
systemMessage: 'You are a document analysis assistant',
|
systemMessage: 'You are a legal expert.',
|
||||||
userMessage: 'Please analyze this document and extract key information',
|
userMessage: 'Identify any legal terms or implications in this document.',
|
||||||
messageHistory: [],
|
messageHistory: [],
|
||||||
pdfDocuments: [pdfBuffer] // Uint8Array of PDF content
|
pdfDocuments: [pdfBuffer]
|
||||||
});
|
});
|
||||||
|
console.log('Anthropic Analysis:', anthropicAnalysis.message);
|
||||||
|
|
||||||
|
// Process multiple documents
|
||||||
|
const doc1 = fs.readFileSync('contract1.pdf');
|
||||||
|
const doc2 = fs.readFileSync('contract2.pdf');
|
||||||
|
|
||||||
|
const comparison = await smartAi.openaiProvider.document({
|
||||||
|
systemMessage: 'You are a contract analyst.',
|
||||||
|
userMessage: 'Compare these two contracts and highlight the differences.',
|
||||||
|
messageHistory: [],
|
||||||
|
pdfDocuments: [doc1, doc2]
|
||||||
|
});
|
||||||
|
console.log('Comparison:', comparison.message);
|
||||||
```
|
```
|
||||||
|
|
||||||
Both providers will:
|
### 5. Conversation Management
|
||||||
1. Convert PDF documents to images
|
|
||||||
2. Process each page using their vision models
|
|
||||||
3. Return a comprehensive analysis based on the system message and user query
|
|
||||||
|
|
||||||
### Vision Processing
|
Create persistent conversation sessions with any provider:
|
||||||
|
|
||||||
For providers that support vision tasks (OpenAI, Ollama, and Anthropic):
|
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
// Using OpenAI's GPT-4 Vision
|
// Create a conversation with OpenAI
|
||||||
const description = await smartAi.openaiProvider.vision({
|
const conversation = smartAi.createConversation('openai');
|
||||||
image: imageBuffer, // Buffer containing the image data
|
|
||||||
prompt: 'What do you see in this image?'
|
|
||||||
});
|
|
||||||
|
|
||||||
// Using Ollama's Llava model
|
// Set the system message
|
||||||
const analysis = await smartAi.ollamaProvider.vision({
|
await conversation.setSystemMessage('You are a helpful coding assistant.');
|
||||||
image: imageBuffer,
|
|
||||||
prompt: 'Analyze this image in detail'
|
|
||||||
});
|
|
||||||
|
|
||||||
// Using Anthropic's Claude 3
|
// Get input and output streams
|
||||||
const anthropicAnalysis = await smartAi.anthropicProvider.vision({
|
const inputWriter = conversation.getInputStreamWriter();
|
||||||
image: imageBuffer,
|
const outputStream = conversation.getOutputStream();
|
||||||
prompt: 'Please analyze this image and describe what you see'
|
|
||||||
});
|
// Set up output reader
|
||||||
|
const reader = outputStream.getReader();
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
|
||||||
|
// Send messages
|
||||||
|
await inputWriter.write('How do I create a REST API in Node.js?');
|
||||||
|
|
||||||
|
// Read responses
|
||||||
|
while (true) {
|
||||||
|
const { done, value } = await reader.read();
|
||||||
|
if (done) break;
|
||||||
|
console.log('Assistant:', decoder.decode(value));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Continue the conversation
|
||||||
|
await inputWriter.write('Can you show me an example with Express?');
|
||||||
|
|
||||||
|
// Create conversations with different providers
|
||||||
|
const anthropicConversation = smartAi.createConversation('anthropic');
|
||||||
|
const groqConversation = smartAi.createConversation('groq');
|
||||||
```
|
```
|
||||||
|
|
||||||
## Error Handling
|
## Advanced Usage
|
||||||
|
|
||||||
All providers implement proper error handling. It's recommended to wrap API calls in try-catch blocks:
|
### Error Handling
|
||||||
|
|
||||||
|
Always wrap AI operations in try-catch blocks for robust error handling:
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
try {
|
try {
|
||||||
const response = await smartAi.openaiProvider.chat({
|
const response = await smartAi.openaiProvider.chat({
|
||||||
systemMessage: 'You are a helpful assistant.',
|
systemMessage: 'You are an assistant.',
|
||||||
userMessage: 'Hello!',
|
userMessage: 'Hello!',
|
||||||
messageHistory: []
|
messageHistory: []
|
||||||
});
|
});
|
||||||
|
console.log(response.message);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('AI provider error:', error.message);
|
if (error.code === 'rate_limit_exceeded') {
|
||||||
|
console.error('Rate limit hit, please retry later');
|
||||||
|
} else if (error.code === 'invalid_api_key') {
|
||||||
|
console.error('Invalid API key provided');
|
||||||
|
} else {
|
||||||
|
console.error('Unexpected error:', error.message);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Streaming with Custom Processing
|
||||||
|
|
||||||
|
Implement custom transformations on streaming responses:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Create a custom transform stream
|
||||||
|
const customTransform = new TransformStream({
|
||||||
|
transform(chunk, controller) {
|
||||||
|
// Example: Add timestamps to each chunk
|
||||||
|
const timestamp = new Date().toISOString();
|
||||||
|
controller.enqueue(`[${timestamp}] ${chunk}`);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Apply to streaming chat
|
||||||
|
const inputStream = new ReadableStream({
|
||||||
|
start(controller) {
|
||||||
|
controller.enqueue(new TextEncoder().encode(JSON.stringify({
|
||||||
|
role: 'user',
|
||||||
|
content: 'Tell me a story'
|
||||||
|
})));
|
||||||
|
controller.close();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
const responseStream = await smartAi.openaiProvider.chatStream(inputStream);
|
||||||
|
const processedStream = responseStream.pipeThrough(customTransform);
|
||||||
|
|
||||||
|
// Read processed stream
|
||||||
|
const reader = processedStream.getReader();
|
||||||
|
while (true) {
|
||||||
|
const { done, value } = await reader.read();
|
||||||
|
if (done) break;
|
||||||
|
console.log(value);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Provider-Specific Features
|
||||||
|
|
||||||
|
Each provider may have unique capabilities. Here's how to leverage them:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// OpenAI - Use specific models
|
||||||
|
const gpt4Response = await smartAi.openaiProvider.chat({
|
||||||
|
systemMessage: 'You are a helpful assistant.',
|
||||||
|
userMessage: 'Explain quantum computing',
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
|
|
||||||
|
// Anthropic - Use Claude's strength in analysis
|
||||||
|
const codeReview = await smartAi.anthropicProvider.chat({
|
||||||
|
systemMessage: 'You are a code reviewer.',
|
||||||
|
userMessage: 'Review this code for security issues: ...',
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
|
|
||||||
|
// Perplexity - Best for research and current events
|
||||||
|
const research = await smartAi.perplexityProvider.chat({
|
||||||
|
systemMessage: 'You are a research assistant.',
|
||||||
|
userMessage: 'What are the latest developments in renewable energy?',
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
|
|
||||||
|
// Groq - Optimized for speed
|
||||||
|
const quickResponse = await smartAi.groqProvider.chat({
|
||||||
|
systemMessage: 'You are a quick helper.',
|
||||||
|
userMessage: 'Give me a one-line summary of photosynthesis',
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### Performance Optimization
|
||||||
|
|
||||||
|
Tips for optimal performance:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// 1. Reuse providers instead of creating new instances
|
||||||
|
const smartAi = new SmartAi({ /* config */ });
|
||||||
|
await smartAi.start(); // Initialize once
|
||||||
|
|
||||||
|
// 2. Use streaming for long responses
|
||||||
|
// Streaming reduces time-to-first-token and memory usage
|
||||||
|
|
||||||
|
// 3. Batch operations when possible
|
||||||
|
const promises = [
|
||||||
|
smartAi.openaiProvider.chat({ /* ... */ }),
|
||||||
|
smartAi.anthropicProvider.chat({ /* ... */ })
|
||||||
|
];
|
||||||
|
const results = await Promise.all(promises);
|
||||||
|
|
||||||
|
// 4. Clean up resources
|
||||||
|
await smartAi.stop(); // When done
|
||||||
|
```
|
||||||
|
|
||||||
## License and Legal Information
|
## License and Legal Information
|
||||||
|
|
||||||
This repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository.
|
This repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository.
|
||||||
|
30
test/test.ts
30
test/test.ts
@@ -1,4 +1,4 @@
|
|||||||
import { expect, expectAsync, tap } from '@push.rocks/tapbundle';
|
import { expect, tap } from '@push.rocks/tapbundle';
|
||||||
import * as qenv from '@push.rocks/qenv';
|
import * as qenv from '@push.rocks/qenv';
|
||||||
import * as smartrequest from '@push.rocks/smartrequest';
|
import * as smartrequest from '@push.rocks/smartrequest';
|
||||||
import * as smartfile from '@push.rocks/smartfile';
|
import * as smartfile from '@push.rocks/smartfile';
|
||||||
@@ -21,8 +21,7 @@ tap.test('should create chat response with openai', async () => {
|
|||||||
const response = await testSmartai.openaiProvider.chat({
|
const response = await testSmartai.openaiProvider.chat({
|
||||||
systemMessage: 'Hello',
|
systemMessage: 'Hello',
|
||||||
userMessage: userMessage,
|
userMessage: userMessage,
|
||||||
messageHistory: [
|
messageHistory: [],
|
||||||
],
|
|
||||||
});
|
});
|
||||||
console.log(`userMessage: ${userMessage}`);
|
console.log(`userMessage: ${userMessage}`);
|
||||||
console.log(response.message);
|
console.log(response.message);
|
||||||
@@ -55,7 +54,7 @@ tap.test('should recognize companies in a pdf', async () => {
|
|||||||
address: string;
|
address: string;
|
||||||
city: string;
|
city: string;
|
||||||
country: string;
|
country: string;
|
||||||
EU: boolean; // wether the entity is within EU
|
EU: boolean; // whether the entity is within EU
|
||||||
};
|
};
|
||||||
entityReceiver: {
|
entityReceiver: {
|
||||||
type: 'official state entity' | 'company' | 'person';
|
type: 'official state entity' | 'company' | 'person';
|
||||||
@@ -63,7 +62,7 @@ tap.test('should recognize companies in a pdf', async () => {
|
|||||||
address: string;
|
address: string;
|
||||||
city: string;
|
city: string;
|
||||||
country: string;
|
country: string;
|
||||||
EU: boolean; // wether the entity is within EU
|
EU: boolean; // whether the entity is within EU
|
||||||
};
|
};
|
||||||
date: string; // the date of the document as YYYY-MM-DD
|
date: string; // the date of the document as YYYY-MM-DD
|
||||||
title: string; // a short title, suitable for a filename
|
title: string; // a short title, suitable for a filename
|
||||||
@@ -75,10 +74,27 @@ tap.test('should recognize companies in a pdf', async () => {
|
|||||||
pdfDocuments: [pdfBuffer],
|
pdfDocuments: [pdfBuffer],
|
||||||
});
|
});
|
||||||
console.log(result);
|
console.log(result);
|
||||||
})
|
});
|
||||||
|
|
||||||
|
tap.test('should create audio response with openai', async () => {
|
||||||
|
// Call the audio method with a sample message.
|
||||||
|
const audioStream = await testSmartai.openaiProvider.audio({
|
||||||
|
message: 'This is a test of audio generation.',
|
||||||
|
});
|
||||||
|
// Read all chunks from the stream.
|
||||||
|
const chunks: Uint8Array[] = [];
|
||||||
|
for await (const chunk of audioStream) {
|
||||||
|
chunks.push(chunk as Uint8Array);
|
||||||
|
}
|
||||||
|
const audioBuffer = Buffer.concat(chunks);
|
||||||
|
await smartfile.fs.toFs(audioBuffer, './.nogit/testoutput.mp3');
|
||||||
|
console.log(`Audio Buffer length: ${audioBuffer.length}`);
|
||||||
|
// Assert that the resulting buffer is not empty.
|
||||||
|
expect(audioBuffer.length).toBeGreaterThan(0);
|
||||||
|
});
|
||||||
|
|
||||||
tap.test('should stop the smartai instance', async () => {
|
tap.test('should stop the smartai instance', async () => {
|
||||||
await testSmartai.stop();
|
await testSmartai.stop();
|
||||||
});
|
});
|
||||||
|
|
||||||
export default tap.start();
|
export default tap.start();
|
@@ -3,6 +3,6 @@
|
|||||||
*/
|
*/
|
||||||
export const commitinfo = {
|
export const commitinfo = {
|
||||||
name: '@push.rocks/smartai',
|
name: '@push.rocks/smartai',
|
||||||
version: '0.3.0',
|
version: '0.5.4',
|
||||||
description: 'A TypeScript library for integrating and interacting with multiple AI models, offering capabilities for chat and potentially audio responses.'
|
description: 'SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.'
|
||||||
}
|
}
|
||||||
|
@@ -48,6 +48,18 @@ export class Conversation {
|
|||||||
return conversation;
|
return conversation;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static async createWithExo(smartaiRefArg: SmartAi) {
|
||||||
|
if (!smartaiRefArg.exoProvider) {
|
||||||
|
throw new Error('Exo provider not available');
|
||||||
|
}
|
||||||
|
const conversation = new Conversation(smartaiRefArg, {
|
||||||
|
processFunction: async (input) => {
|
||||||
|
return '' // TODO implement proper streaming
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return conversation;
|
||||||
|
}
|
||||||
|
|
||||||
public static async createWithOllama(smartaiRefArg: SmartAi) {
|
public static async createWithOllama(smartaiRefArg: SmartAi) {
|
||||||
if (!smartaiRefArg.ollamaProvider) {
|
if (!smartaiRefArg.ollamaProvider) {
|
||||||
throw new Error('Ollama provider not available');
|
throw new Error('Ollama provider not available');
|
||||||
@@ -60,6 +72,30 @@ export class Conversation {
|
|||||||
return conversation;
|
return conversation;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static async createWithGroq(smartaiRefArg: SmartAi) {
|
||||||
|
if (!smartaiRefArg.groqProvider) {
|
||||||
|
throw new Error('Groq provider not available');
|
||||||
|
}
|
||||||
|
const conversation = new Conversation(smartaiRefArg, {
|
||||||
|
processFunction: async (input) => {
|
||||||
|
return '' // TODO implement proper streaming
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return conversation;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static async createWithXai(smartaiRefArg: SmartAi) {
|
||||||
|
if (!smartaiRefArg.xaiProvider) {
|
||||||
|
throw new Error('XAI provider not available');
|
||||||
|
}
|
||||||
|
const conversation = new Conversation(smartaiRefArg, {
|
||||||
|
processFunction: async (input) => {
|
||||||
|
return '' // TODO implement proper streaming
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return conversation;
|
||||||
|
}
|
||||||
|
|
||||||
// INSTANCE
|
// INSTANCE
|
||||||
smartaiRef: SmartAi
|
smartaiRef: SmartAi
|
||||||
private systemMessage: string;
|
private systemMessage: string;
|
||||||
|
@@ -1,18 +1,32 @@
|
|||||||
import { Conversation } from './classes.conversation.js';
|
import { Conversation } from './classes.conversation.js';
|
||||||
import * as plugins from './plugins.js';
|
import * as plugins from './plugins.js';
|
||||||
import { AnthropicProvider } from './provider.anthropic.js';
|
import { AnthropicProvider } from './provider.anthropic.js';
|
||||||
import type { OllamaProvider } from './provider.ollama.js';
|
import { OllamaProvider } from './provider.ollama.js';
|
||||||
import { OpenAiProvider } from './provider.openai.js';
|
import { OpenAiProvider } from './provider.openai.js';
|
||||||
import type { PerplexityProvider } from './provider.perplexity.js';
|
import { PerplexityProvider } from './provider.perplexity.js';
|
||||||
|
import { ExoProvider } from './provider.exo.js';
|
||||||
|
import { GroqProvider } from './provider.groq.js';
|
||||||
|
import { XAIProvider } from './provider.xai.js';
|
||||||
|
|
||||||
|
|
||||||
export interface ISmartAiOptions {
|
export interface ISmartAiOptions {
|
||||||
openaiToken?: string;
|
openaiToken?: string;
|
||||||
anthropicToken?: string;
|
anthropicToken?: string;
|
||||||
perplexityToken?: string;
|
perplexityToken?: string;
|
||||||
|
groqToken?: string;
|
||||||
|
xaiToken?: string;
|
||||||
|
exo?: {
|
||||||
|
baseUrl?: string;
|
||||||
|
apiKey?: string;
|
||||||
|
};
|
||||||
|
ollama?: {
|
||||||
|
baseUrl?: string;
|
||||||
|
model?: string;
|
||||||
|
visionModel?: string;
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
export type TProvider = 'openai' | 'anthropic' | 'perplexity' | 'ollama';
|
export type TProvider = 'openai' | 'anthropic' | 'perplexity' | 'ollama' | 'exo' | 'groq' | 'xai';
|
||||||
|
|
||||||
export class SmartAi {
|
export class SmartAi {
|
||||||
public options: ISmartAiOptions;
|
public options: ISmartAiOptions;
|
||||||
@@ -21,6 +35,9 @@ export class SmartAi {
|
|||||||
public anthropicProvider: AnthropicProvider;
|
public anthropicProvider: AnthropicProvider;
|
||||||
public perplexityProvider: PerplexityProvider;
|
public perplexityProvider: PerplexityProvider;
|
||||||
public ollamaProvider: OllamaProvider;
|
public ollamaProvider: OllamaProvider;
|
||||||
|
public exoProvider: ExoProvider;
|
||||||
|
public groqProvider: GroqProvider;
|
||||||
|
public xaiProvider: XAIProvider;
|
||||||
|
|
||||||
constructor(optionsArg: ISmartAiOptions) {
|
constructor(optionsArg: ISmartAiOptions) {
|
||||||
this.options = optionsArg;
|
this.options = optionsArg;
|
||||||
@@ -37,6 +54,40 @@ export class SmartAi {
|
|||||||
this.anthropicProvider = new AnthropicProvider({
|
this.anthropicProvider = new AnthropicProvider({
|
||||||
anthropicToken: this.options.anthropicToken,
|
anthropicToken: this.options.anthropicToken,
|
||||||
});
|
});
|
||||||
|
await this.anthropicProvider.start();
|
||||||
|
}
|
||||||
|
if (this.options.perplexityToken) {
|
||||||
|
this.perplexityProvider = new PerplexityProvider({
|
||||||
|
perplexityToken: this.options.perplexityToken,
|
||||||
|
});
|
||||||
|
await this.perplexityProvider.start();
|
||||||
|
}
|
||||||
|
if (this.options.groqToken) {
|
||||||
|
this.groqProvider = new GroqProvider({
|
||||||
|
groqToken: this.options.groqToken,
|
||||||
|
});
|
||||||
|
await this.groqProvider.start();
|
||||||
|
}
|
||||||
|
if (this.options.xaiToken) {
|
||||||
|
this.xaiProvider = new XAIProvider({
|
||||||
|
xaiToken: this.options.xaiToken,
|
||||||
|
});
|
||||||
|
await this.xaiProvider.start();
|
||||||
|
}
|
||||||
|
if (this.options.ollama) {
|
||||||
|
this.ollamaProvider = new OllamaProvider({
|
||||||
|
baseUrl: this.options.ollama.baseUrl,
|
||||||
|
model: this.options.ollama.model,
|
||||||
|
visionModel: this.options.ollama.visionModel,
|
||||||
|
});
|
||||||
|
await this.ollamaProvider.start();
|
||||||
|
}
|
||||||
|
if (this.options.exo) {
|
||||||
|
this.exoProvider = new ExoProvider({
|
||||||
|
exoBaseUrl: this.options.exo.baseUrl,
|
||||||
|
apiKey: this.options.exo.apiKey,
|
||||||
|
});
|
||||||
|
await this.exoProvider.start();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -47,6 +98,8 @@ export class SmartAi {
|
|||||||
*/
|
*/
|
||||||
createConversation(provider: TProvider) {
|
createConversation(provider: TProvider) {
|
||||||
switch (provider) {
|
switch (provider) {
|
||||||
|
case 'exo':
|
||||||
|
return Conversation.createWithExo(this);
|
||||||
case 'openai':
|
case 'openai':
|
||||||
return Conversation.createWithOpenAi(this);
|
return Conversation.createWithOpenAi(this);
|
||||||
case 'anthropic':
|
case 'anthropic':
|
||||||
@@ -55,6 +108,10 @@ export class SmartAi {
|
|||||||
return Conversation.createWithPerplexity(this);
|
return Conversation.createWithPerplexity(this);
|
||||||
case 'ollama':
|
case 'ollama':
|
||||||
return Conversation.createWithOllama(this);
|
return Conversation.createWithOllama(this);
|
||||||
|
case 'groq':
|
||||||
|
return Conversation.createWithGroq(this);
|
||||||
|
case 'xai':
|
||||||
|
return Conversation.createWithXai(this);
|
||||||
default:
|
default:
|
||||||
throw new Error('Provider not available');
|
throw new Error('Provider not available');
|
||||||
}
|
}
|
||||||
|
128
ts/provider.exo.ts
Normal file
128
ts/provider.exo.ts
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
import * as plugins from './plugins.js';
|
||||||
|
import * as paths from './paths.js';
|
||||||
|
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
||||||
|
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js';
|
||||||
|
import type { ChatCompletionMessageParam } from 'openai/resources/chat/completions';
|
||||||
|
|
||||||
|
export interface IExoProviderOptions {
|
||||||
|
exoBaseUrl?: string;
|
||||||
|
apiKey?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class ExoProvider extends MultiModalModel {
|
||||||
|
private options: IExoProviderOptions;
|
||||||
|
public openAiApiClient: plugins.openai.default;
|
||||||
|
|
||||||
|
constructor(optionsArg: IExoProviderOptions = {}) {
|
||||||
|
super();
|
||||||
|
this.options = {
|
||||||
|
exoBaseUrl: 'http://localhost:8080/v1', // Default Exo API endpoint
|
||||||
|
...optionsArg
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
public async start() {
|
||||||
|
this.openAiApiClient = new plugins.openai.default({
|
||||||
|
apiKey: this.options.apiKey || 'not-needed', // Exo might not require an API key for local deployment
|
||||||
|
baseURL: this.options.exoBaseUrl,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public async stop() {}
|
||||||
|
|
||||||
|
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||||
|
// Create a TextDecoder to handle incoming chunks
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
let buffer = '';
|
||||||
|
let currentMessage: { role: string; content: string; } | null = null;
|
||||||
|
|
||||||
|
// Create a TransformStream to process the input
|
||||||
|
const transform = new TransformStream<Uint8Array, string>({
|
||||||
|
transform: async (chunk, controller) => {
|
||||||
|
buffer += decoder.decode(chunk, { stream: true });
|
||||||
|
|
||||||
|
// Try to parse complete JSON messages from the buffer
|
||||||
|
while (true) {
|
||||||
|
const newlineIndex = buffer.indexOf('\n');
|
||||||
|
if (newlineIndex === -1) break;
|
||||||
|
|
||||||
|
const line = buffer.slice(0, newlineIndex);
|
||||||
|
buffer = buffer.slice(newlineIndex + 1);
|
||||||
|
|
||||||
|
if (line.trim()) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(line);
|
||||||
|
currentMessage = message;
|
||||||
|
|
||||||
|
// Process the message based on its type
|
||||||
|
if (message.type === 'message') {
|
||||||
|
const response = await this.chat({
|
||||||
|
systemMessage: '',
|
||||||
|
userMessage: message.content,
|
||||||
|
messageHistory: [{ role: message.role as 'user' | 'assistant' | 'system', content: message.content }]
|
||||||
|
});
|
||||||
|
|
||||||
|
controller.enqueue(JSON.stringify(response) + '\n');
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error processing message:', error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
flush(controller) {
|
||||||
|
if (buffer) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(buffer);
|
||||||
|
currentMessage = message;
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error processing remaining buffer:', error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return input.pipeThrough(transform);
|
||||||
|
}
|
||||||
|
|
||||||
|
public async chat(options: ChatOptions): Promise<ChatResponse> {
|
||||||
|
const messages: ChatCompletionMessageParam[] = [
|
||||||
|
{ role: 'system', content: options.systemMessage },
|
||||||
|
...options.messageHistory,
|
||||||
|
{ role: 'user', content: options.userMessage }
|
||||||
|
];
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await this.openAiApiClient.chat.completions.create({
|
||||||
|
model: 'local-model', // Exo uses local models
|
||||||
|
messages: messages,
|
||||||
|
stream: false
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
role: 'assistant',
|
||||||
|
message: response.choices[0]?.message?.content || ''
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error in chat completion:', error);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
||||||
|
throw new Error('Audio generation is not supported by Exo provider');
|
||||||
|
}
|
||||||
|
|
||||||
|
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
|
||||||
|
throw new Error('Vision processing is not supported by Exo provider');
|
||||||
|
}
|
||||||
|
|
||||||
|
public async document(optionsArg: {
|
||||||
|
systemMessage: string;
|
||||||
|
userMessage: string;
|
||||||
|
pdfDocuments: Uint8Array[];
|
||||||
|
messageHistory: ChatMessage[];
|
||||||
|
}): Promise<{ message: any }> {
|
||||||
|
throw new Error('Document processing is not supported by Exo provider');
|
||||||
|
}
|
||||||
|
}
|
@@ -32,7 +32,7 @@ export class GroqProvider extends MultiModalModel {
|
|||||||
|
|
||||||
// Create a TransformStream to process the input
|
// Create a TransformStream to process the input
|
||||||
const transform = new TransformStream<Uint8Array, string>({
|
const transform = new TransformStream<Uint8Array, string>({
|
||||||
async transform(chunk, controller) {
|
transform: async (chunk, controller) => {
|
||||||
buffer += decoder.decode(chunk, { stream: true });
|
buffer += decoder.decode(chunk, { stream: true });
|
||||||
|
|
||||||
// Try to parse complete JSON messages from the buffer
|
// Try to parse complete JSON messages from the buffer
|
||||||
|
@@ -45,7 +45,7 @@ export class OllamaProvider extends MultiModalModel {
|
|||||||
|
|
||||||
// Create a TransformStream to process the input
|
// Create a TransformStream to process the input
|
||||||
const transform = new TransformStream<Uint8Array, string>({
|
const transform = new TransformStream<Uint8Array, string>({
|
||||||
async transform(chunk, controller) {
|
transform: async (chunk, controller) => {
|
||||||
buffer += decoder.decode(chunk, { stream: true });
|
buffer += decoder.decode(chunk, { stream: true });
|
||||||
|
|
||||||
// Try to parse complete JSON messages from the buffer
|
// Try to parse complete JSON messages from the buffer
|
||||||
|
@@ -1,10 +1,20 @@
|
|||||||
import * as plugins from './plugins.js';
|
import * as plugins from './plugins.js';
|
||||||
import * as paths from './paths.js';
|
import * as paths from './paths.js';
|
||||||
|
|
||||||
|
// Custom type definition for chat completion messages
|
||||||
|
export type TChatCompletionRequestMessage = {
|
||||||
|
role: "system" | "user" | "assistant";
|
||||||
|
content: string;
|
||||||
|
};
|
||||||
|
|
||||||
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
||||||
|
|
||||||
export interface IOpenaiProviderOptions {
|
export interface IOpenaiProviderOptions {
|
||||||
openaiToken: string;
|
openaiToken: string;
|
||||||
|
chatModel?: string;
|
||||||
|
audioModel?: string;
|
||||||
|
visionModel?: string;
|
||||||
|
// Optionally add more model options (e.g., documentModel) if needed.
|
||||||
}
|
}
|
||||||
|
|
||||||
export class OpenAiProvider extends MultiModalModel {
|
export class OpenAiProvider extends MultiModalModel {
|
||||||
@@ -31,11 +41,14 @@ export class OpenAiProvider extends MultiModalModel {
|
|||||||
// Create a TextDecoder to handle incoming chunks
|
// Create a TextDecoder to handle incoming chunks
|
||||||
const decoder = new TextDecoder();
|
const decoder = new TextDecoder();
|
||||||
let buffer = '';
|
let buffer = '';
|
||||||
let currentMessage: { role: string; content: string; } | null = null;
|
let currentMessage: {
|
||||||
|
role: "function" | "user" | "system" | "assistant" | "tool" | "developer";
|
||||||
|
content: string;
|
||||||
|
} | null = null;
|
||||||
|
|
||||||
// Create a TransformStream to process the input
|
// Create a TransformStream to process the input
|
||||||
const transform = new TransformStream<Uint8Array, string>({
|
const transform = new TransformStream<Uint8Array, string>({
|
||||||
async transform(chunk, controller) {
|
transform: async (chunk, controller) => {
|
||||||
buffer += decoder.decode(chunk, { stream: true });
|
buffer += decoder.decode(chunk, { stream: true });
|
||||||
|
|
||||||
// Try to parse complete JSON messages from the buffer
|
// Try to parse complete JSON messages from the buffer
|
||||||
@@ -50,7 +63,7 @@ export class OpenAiProvider extends MultiModalModel {
|
|||||||
try {
|
try {
|
||||||
const message = JSON.parse(line);
|
const message = JSON.parse(line);
|
||||||
currentMessage = {
|
currentMessage = {
|
||||||
role: message.role || 'user',
|
role: (message.role || 'user') as "function" | "user" | "system" | "assistant" | "tool" | "developer",
|
||||||
content: message.content || '',
|
content: message.content || '',
|
||||||
};
|
};
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
@@ -61,20 +74,24 @@ export class OpenAiProvider extends MultiModalModel {
|
|||||||
|
|
||||||
// If we have a complete message, send it to OpenAI
|
// If we have a complete message, send it to OpenAI
|
||||||
if (currentMessage) {
|
if (currentMessage) {
|
||||||
const stream = await this.openAiApiClient.chat.completions.create({
|
const messageToSend = { role: "user" as const, content: currentMessage.content };
|
||||||
model: 'gpt-4',
|
const chatModel = this.options.chatModel ?? 'o3-mini';
|
||||||
messages: [{ role: currentMessage.role, content: currentMessage.content }],
|
const requestParams: any = {
|
||||||
|
model: chatModel,
|
||||||
|
messages: [messageToSend],
|
||||||
stream: true,
|
stream: true,
|
||||||
});
|
};
|
||||||
|
// Temperature is omitted since the model does not support it.
|
||||||
|
const stream = await this.openAiApiClient.chat.completions.create(requestParams);
|
||||||
|
// Explicitly cast the stream as an async iterable to satisfy TypeScript.
|
||||||
|
const streamAsyncIterable = stream as unknown as AsyncIterableIterator<any>;
|
||||||
// Process each chunk from OpenAI
|
// Process each chunk from OpenAI
|
||||||
for await (const chunk of stream) {
|
for await (const chunk of streamAsyncIterable) {
|
||||||
const content = chunk.choices[0]?.delta?.content;
|
const content = chunk.choices[0]?.delta?.content;
|
||||||
if (content) {
|
if (content) {
|
||||||
controller.enqueue(content);
|
controller.enqueue(content);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
currentMessage = null;
|
currentMessage = null;
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -104,15 +121,17 @@ export class OpenAiProvider extends MultiModalModel {
|
|||||||
content: string;
|
content: string;
|
||||||
}[];
|
}[];
|
||||||
}) {
|
}) {
|
||||||
const result = await this.openAiApiClient.chat.completions.create({
|
const chatModel = this.options.chatModel ?? 'o3-mini';
|
||||||
model: 'gpt-4o',
|
const requestParams: any = {
|
||||||
|
model: chatModel,
|
||||||
messages: [
|
messages: [
|
||||||
{ role: 'system', content: optionsArg.systemMessage },
|
{ role: 'system', content: optionsArg.systemMessage },
|
||||||
...optionsArg.messageHistory,
|
...optionsArg.messageHistory,
|
||||||
{ role: 'user', content: optionsArg.userMessage },
|
{ role: 'user', content: optionsArg.userMessage },
|
||||||
],
|
],
|
||||||
});
|
};
|
||||||
|
// Temperature parameter removed to avoid unsupported error.
|
||||||
|
const result = await this.openAiApiClient.chat.completions.create(requestParams);
|
||||||
return {
|
return {
|
||||||
role: result.choices[0].message.role as 'assistant',
|
role: result.choices[0].message.role as 'assistant',
|
||||||
message: result.choices[0].message.content,
|
message: result.choices[0].message.content,
|
||||||
@@ -122,7 +141,7 @@ export class OpenAiProvider extends MultiModalModel {
|
|||||||
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
||||||
const done = plugins.smartpromise.defer<NodeJS.ReadableStream>();
|
const done = plugins.smartpromise.defer<NodeJS.ReadableStream>();
|
||||||
const result = await this.openAiApiClient.audio.speech.create({
|
const result = await this.openAiApiClient.audio.speech.create({
|
||||||
model: 'tts-1-hd',
|
model: this.options.audioModel ?? 'tts-1-hd',
|
||||||
input: optionsArg.message,
|
input: optionsArg.message,
|
||||||
voice: 'nova',
|
voice: 'nova',
|
||||||
response_format: 'mp3',
|
response_format: 'mp3',
|
||||||
@@ -144,27 +163,30 @@ export class OpenAiProvider extends MultiModalModel {
|
|||||||
}) {
|
}) {
|
||||||
let pdfDocumentImageBytesArray: Uint8Array[] = [];
|
let pdfDocumentImageBytesArray: Uint8Array[] = [];
|
||||||
|
|
||||||
|
// Convert each PDF into one or more image byte arrays.
|
||||||
|
const smartpdfInstance = new plugins.smartpdf.SmartPdf();
|
||||||
|
await smartpdfInstance.start();
|
||||||
for (const pdfDocument of optionsArg.pdfDocuments) {
|
for (const pdfDocument of optionsArg.pdfDocuments) {
|
||||||
const documentImageArray = await this.smartpdfInstance.convertPDFToPngBytes(pdfDocument);
|
const documentImageArray = await smartpdfInstance.convertPDFToPngBytes(pdfDocument);
|
||||||
pdfDocumentImageBytesArray = pdfDocumentImageBytesArray.concat(documentImageArray);
|
pdfDocumentImageBytesArray = pdfDocumentImageBytesArray.concat(documentImageArray);
|
||||||
}
|
}
|
||||||
|
await smartpdfInstance.stop();
|
||||||
|
|
||||||
console.log(`image smartfile array`);
|
console.log(`image smartfile array`);
|
||||||
console.log(pdfDocumentImageBytesArray.map((smartfile) => smartfile.length));
|
console.log(pdfDocumentImageBytesArray.map((smartfile) => smartfile.length));
|
||||||
|
|
||||||
const smartfileArray = await plugins.smartarray.map(
|
// Filter out any empty buffers to avoid sending invalid image URLs.
|
||||||
pdfDocumentImageBytesArray,
|
const validImageBytesArray = pdfDocumentImageBytesArray.filter(imageBytes => imageBytes && imageBytes.length > 0);
|
||||||
async (pdfDocumentImageBytes) => {
|
const imageAttachments = validImageBytesArray.map(imageBytes => ({
|
||||||
return plugins.smartfile.SmartFile.fromBuffer(
|
type: 'image_url',
|
||||||
'pdfDocumentImage.jpg',
|
image_url: {
|
||||||
Buffer.from(pdfDocumentImageBytes)
|
url: 'data:image/png;base64,' + Buffer.from(imageBytes).toString('base64'),
|
||||||
);
|
},
|
||||||
}
|
}));
|
||||||
);
|
|
||||||
|
|
||||||
const result = await this.openAiApiClient.chat.completions.create({
|
const chatModel = this.options.chatModel ?? 'o4-mini';
|
||||||
model: 'gpt-4o',
|
const requestParams: any = {
|
||||||
// response_format: { type: "json_object" }, // not supported for now
|
model: chatModel,
|
||||||
messages: [
|
messages: [
|
||||||
{ role: 'system', content: optionsArg.systemMessage },
|
{ role: 'system', content: optionsArg.systemMessage },
|
||||||
...optionsArg.messageHistory,
|
...optionsArg.messageHistory,
|
||||||
@@ -172,30 +194,22 @@ export class OpenAiProvider extends MultiModalModel {
|
|||||||
role: 'user',
|
role: 'user',
|
||||||
content: [
|
content: [
|
||||||
{ type: 'text', text: optionsArg.userMessage },
|
{ type: 'text', text: optionsArg.userMessage },
|
||||||
...(() => {
|
...imageAttachments,
|
||||||
const returnArray = [];
|
|
||||||
for (const imageBytes of pdfDocumentImageBytesArray) {
|
|
||||||
returnArray.push({
|
|
||||||
type: 'image_url',
|
|
||||||
image_url: {
|
|
||||||
url: 'data:image/png;base64,' + Buffer.from(imageBytes).toString('base64'),
|
|
||||||
},
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return returnArray;
|
|
||||||
})(),
|
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
});
|
};
|
||||||
|
// Temperature parameter removed.
|
||||||
|
const result = await this.openAiApiClient.chat.completions.create(requestParams);
|
||||||
return {
|
return {
|
||||||
message: result.choices[0].message,
|
message: result.choices[0].message,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
|
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
|
||||||
const result = await this.openAiApiClient.chat.completions.create({
|
const visionModel = this.options.visionModel ?? '04-mini';
|
||||||
model: 'gpt-4-vision-preview',
|
const requestParams: any = {
|
||||||
|
model: visionModel,
|
||||||
messages: [
|
messages: [
|
||||||
{
|
{
|
||||||
role: 'user',
|
role: 'user',
|
||||||
@@ -211,8 +225,8 @@ export class OpenAiProvider extends MultiModalModel {
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
max_tokens: 300
|
max_tokens: 300
|
||||||
});
|
};
|
||||||
|
const result = await this.openAiApiClient.chat.completions.create(requestParams);
|
||||||
return result.choices[0].message.content || '';
|
return result.choices[0].message.content || '';
|
||||||
}
|
}
|
||||||
}
|
}
|
Reference in New Issue
Block a user