14 Commits

13 changed files with 5199 additions and 1696 deletions

View File

@ -1,5 +1,52 @@
# Changelog
## 2025-02-25 - 0.5.1 - fix(OpenAiProvider)
Corrected audio model ID in OpenAiProvider
- Fixed audio model identifier from 'o3-mini' to 'tts-1-hd' in the OpenAiProvider's audio method.
- Addressed minor code formatting issues in test suite for better readability.
- Corrected spelling errors in test documentation and comments.
## 2025-02-25 - 0.5.0 - feat(documentation and configuration)
Enhanced package and README documentation
- Expanded the package description to better reflect the library's capabilities.
- Improved README with detailed usage examples for initialization, chat interactions, streaming chat, audio generation, document analysis, and vision processing.
- Provided error handling strategies and advanced streaming customization examples.
## 2025-02-25 - 0.4.2 - fix(core)
Fix OpenAI chat streaming and PDF document processing logic.
- Updated OpenAI chat streaming to handle new async iterable format.
- Improved PDF document processing by filtering out empty image buffers.
- Removed unsupported temperature options from OpenAI requests.
## 2025-02-25 - 0.4.1 - fix(provider)
Fix provider modules for consistency
- Updated TypeScript interfaces and options in provider modules for better type safety.
- Modified transform stream handlers in Exo, Groq, and Ollama providers for consistency.
- Added optional model options to OpenAI provider for custom model usage.
## 2025-02-08 - 0.4.0 - feat(core)
Added support for Exo AI provider
- Introduced ExoProvider with chat functionalities.
- Updated SmartAi class to initialize ExoProvider.
- Extended Conversation class to support ExoProvider.
## 2025-02-05 - 0.3.3 - fix(documentation)
Update readme with detailed license and legal information.
- Added explicit section on License and Legal Information in the README.
- Clarified the use of trademarks and company information.
## 2025-02-05 - 0.3.2 - fix(documentation)
Remove redundant badges from readme
- Removed Build Status badge from the readme file.
- Removed License badge from the readme file.
## 2025-02-05 - 0.3.1 - fix(documentation)
Updated README structure and added detailed usage examples

View File

@ -5,20 +5,33 @@
"githost": "code.foss.global",
"gitscope": "push.rocks",
"gitrepo": "smartai",
"description": "A TypeScript library for integrating and interacting with multiple AI models, offering capabilities for chat and potentially audio responses.",
"description": "SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.",
"npmPackagename": "@push.rocks/smartai",
"license": "MIT",
"projectDomain": "push.rocks",
"keywords": [
"AI integration",
"chatbot",
"TypeScript",
"chatbot",
"OpenAI",
"Anthropic",
"multi-model support",
"audio responses",
"multi-model",
"audio generation",
"text-to-speech",
"streaming chat"
"document processing",
"vision processing",
"streaming chat",
"API",
"multiple providers",
"AI models",
"synchronous chat",
"asynchronous chat",
"real-time interaction",
"content analysis",
"image description",
"document classification",
"AI toolkit",
"provider switching"
]
}
},

View File

@ -1,8 +1,8 @@
{
"name": "@push.rocks/smartai",
"version": "0.3.1",
"version": "0.5.1",
"private": false,
"description": "A TypeScript library for integrating and interacting with multiple AI models, offering capabilities for chat and potentially audio responses.",
"description": "SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.",
"main": "dist_ts/index.js",
"typings": "dist_ts/index.d.ts",
"type": "module",
@ -14,24 +14,24 @@
"buildDocs": "(tsdoc)"
},
"devDependencies": {
"@git.zone/tsbuild": "^2.1.84",
"@git.zone/tsbundle": "^2.0.5",
"@git.zone/tsrun": "^1.2.49",
"@git.zone/tstest": "^1.0.90",
"@push.rocks/qenv": "^6.0.5",
"@push.rocks/tapbundle": "^5.3.0",
"@types/node": "^22.5.5"
"@git.zone/tsbuild": "^2.2.1",
"@git.zone/tsbundle": "^2.2.5",
"@git.zone/tsrun": "^1.3.3",
"@git.zone/tstest": "^1.0.96",
"@push.rocks/qenv": "^6.1.0",
"@push.rocks/tapbundle": "^5.5.6",
"@types/node": "^22.13.5"
},
"dependencies": {
"@anthropic-ai/sdk": "^0.27.3",
"@push.rocks/smartarray": "^1.0.8",
"@push.rocks/smartfile": "^11.0.21",
"@anthropic-ai/sdk": "^0.37.0",
"@push.rocks/smartarray": "^1.1.0",
"@push.rocks/smartfile": "^11.2.0",
"@push.rocks/smartpath": "^5.0.18",
"@push.rocks/smartpdf": "^3.1.6",
"@push.rocks/smartpromise": "^4.0.4",
"@push.rocks/smartrequest": "^2.0.22",
"@push.rocks/smartpdf": "^3.2.2",
"@push.rocks/smartpromise": "^4.2.3",
"@push.rocks/smartrequest": "^2.0.23",
"@push.rocks/webstream": "^1.0.10",
"openai": "^4.62.1"
"openai": "^4.85.4"
},
"repository": {
"type": "git",
@ -58,13 +58,31 @@
],
"keywords": [
"AI integration",
"chatbot",
"TypeScript",
"chatbot",
"OpenAI",
"Anthropic",
"multi-model support",
"audio responses",
"multi-model",
"audio generation",
"text-to-speech",
"streaming chat"
]
"document processing",
"vision processing",
"streaming chat",
"API",
"multiple providers",
"AI models",
"synchronous chat",
"asynchronous chat",
"real-time interaction",
"content analysis",
"image description",
"document classification",
"AI toolkit",
"provider switching"
],
"pnpm": {
"onlyBuiltDependencies": [
"puppeteer"
]
}
}

6084
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

312
readme.md
View File

@ -1,164 +1,75 @@
# @push.rocks/smartai
[![npm version](https://badge.fury.io/js/%40push.rocks%2Fsmartai.svg)](https://www.npmjs.com/package/@push.rocks/smartai)
[![Build Status](https://github.com/push.rocks/smartai/workflows/CI/badge.svg)](https://github.com/push.rocks/smartai/actions)
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](LICENSE)
SmartAi is a TypeScript library providing a unified interface for integrating and interacting with multiple AI models, supporting chat interactions, audio and document processing, and vision tasks.
SmartAi is a comprehensive TypeScript library that provides a standardized interface for integrating and interacting with multiple AI models. It supports a range of operations from synchronous and streaming chat to audio generation, document processing, and vision tasks.
## Install
## Table of Contents
- [Features](#features)
- [Installation](#installation)
- [Supported AI Providers](#supported-ai-providers)
- [Quick Start](#quick-start)
- [Usage Examples](#usage-examples)
- [Chat Interactions](#chat-interactions)
- [Streaming Chat](#streaming-chat)
- [Audio Generation](#audio-generation)
- [Document Processing](#document-processing)
- [Vision Processing](#vision-processing)
- [Error Handling](#error-handling)
- [Development](#development)
- [Running Tests](#running-tests)
- [Building the Project](#building-the-project)
- [Contributing](#contributing)
- [License](#license)
- [Legal Information](#legal-information)
## Features
- **Unified API:** Seamlessly integrate multiple AI providers with a consistent interface.
- **Chat & Streaming:** Support for both synchronous and real-time streaming chat interactions.
- **Audio & Vision:** Generate audio responses and perform detailed image analysis.
- **Document Processing:** Analyze PDFs and other documents using vision models.
- **Extensible:** Easily extend the library to support additional AI providers.
## Installation
To install SmartAi, run the following command:
To install SmartAi into your project, you need to run the following command in your terminal:
```bash
npm install @push.rocks/smartai
```
This will add the package to your projects dependencies.
This command will add the SmartAi library to your project's dependencies, making it available for use in your TypeScript application.
## Supported AI Providers
## Usage
SmartAi supports multiple AI providers. Configure each provider with its corresponding token or settings:
SmartAi is designed to provide a comprehensive and unified API for working seamlessly with multiple AI providers like OpenAI, Anthropic, Perplexity, and others. Below we will delve into how to make the most out of this library, illustrating the setup and functionality with in-depth examples. Our scenarios will explore synchronous and streaming interactions, audio generation, document handling, and vision tasks with different AI providers.
### OpenAI
### Initialization
- **Models:** GPT-4, GPT-3.5-turbo, GPT-4-vision-preview
- **Features:** Chat, Streaming, Audio Generation, Vision, Document Processing
- **Configuration Example:**
```typescript
openaiToken: 'your-openai-token'
```
### X.AI
- **Models:** Grok-2-latest
- **Features:** Chat, Streaming, Document Processing
- **Configuration Example:**
```typescript
xaiToken: 'your-xai-token'
```
### Anthropic
- **Models:** Claude-3-opus-20240229
- **Features:** Chat, Streaming, Vision, Document Processing
- **Configuration Example:**
```typescript
anthropicToken: 'your-anthropic-token'
```
### Perplexity
- **Models:** Mixtral-8x7b-instruct
- **Features:** Chat, Streaming
- **Configuration Example:**
```typescript
perplexityToken: 'your-perplexity-token'
```
### Groq
- **Models:** Llama-3.3-70b-versatile
- **Features:** Chat, Streaming
- **Configuration Example:**
```typescript
groqToken: 'your-groq-token'
```
### Ollama
- **Models:** Configurable (default: llama2; use llava for vision/document tasks)
- **Features:** Chat, Streaming, Vision, Document Processing
- **Configuration Example:**
```typescript
ollama: {
baseUrl: 'http://localhost:11434', // Optional
model: 'llama2', // Optional
visionModel: 'llava' // Optional for vision and document tasks
}
```
## Quick Start
Initialize SmartAi with the provider configurations you plan to use:
Initialization is the first step before using any AI functionalities. You should provide API tokens for each provider you plan to utilize.
```typescript
import { SmartAi } from '@push.rocks/smartai';
const smartAi = new SmartAi({
openaiToken: 'your-openai-token',
xaiToken: 'your-xai-token',
anthropicToken: 'your-anthropic-token',
perplexityToken: 'your-perplexity-token',
xaiToken: 'your-xai-token',
groqToken: 'your-groq-token',
ollama: {
baseUrl: 'http://localhost:11434',
model: 'llama2'
model: 'llama2',
visionModel: 'llava'
},
exo: {
baseUrl: 'http://localhost:8080/v1',
apiKey: 'your-api-key'
}
});
await smartAi.start();
```
## Usage Examples
### Chat Interactions
**Synchronous Chat:**
Interaction through chat is a key feature. SmartAi caters to both synchronous and asynchronous (streaming) chats across several AI models.
#### Regular Synchronous Chat
Connect with AI models via straightforward request-response interactions.
```typescript
const response = await smartAi.openaiProvider.chat({
const syncResponse = await smartAi.openaiProvider.chat({
systemMessage: 'You are a helpful assistant.',
userMessage: 'What is the capital of France?',
messageHistory: [] // Include previous conversation messages if applicable
messageHistory: [] // Could include context or preceding messages
});
console.log(response.message);
console.log(syncResponse.message); // Outputs: "The capital of France is Paris."
```
### Streaming Chat
#### Real-Time Streaming Chat
**Real-Time Streaming:**
For continuous interaction and lower latency, engage in streaming chat.
```typescript
const textEncoder = new TextEncoder();
const textDecoder = new TextDecoder();
// Create a transform stream for sending and receiving data
// Establish a transform stream
const { writable, readable } = new TransformStream();
const writer = writable.getWriter();
@ -169,7 +80,7 @@ const message = {
writer.write(textEncoder.encode(JSON.stringify(message) + '\n'));
// Start streaming the response
// Initiate streaming
const stream = await smartAi.openaiProvider.chatStream(readable);
const reader = stream.getReader();
@ -182,152 +93,153 @@ while (true) {
### Audio Generation
Generate audio (supported by providers like OpenAI):
Audio generation from textual input is possible using providers like OpenAI.
```typescript
const audioStream = await smartAi.openaiProvider.audio({
message: 'Hello, this is a test of text-to-speech'
message: 'This is a test message for generating speech.'
});
// Process the audio stream, for example, play it or save to a file.
// Use the audioStream e.g., playing or saving it.
```
### Document Processing
### Document Analysis
Analyze and extract key information from documents:
SmartAi can ingest and process documents, extracting meaningful information or performing classifications.
```typescript
// Example using OpenAI
const documentResult = await smartAi.openaiProvider.document({
systemMessage: 'Classify the document type',
userMessage: 'What type of document is this?',
messageHistory: [],
pdfDocuments: [pdfBuffer] // Uint8Array containing the PDF content
});
```
Other providers (e.g., Ollama and Anthropic) follow a similar pattern:
```typescript
// Using Ollama for document processing
const ollamaResult = await smartAi.ollamaProvider.document({
systemMessage: 'You are a document analysis assistant',
userMessage: 'Extract key information from this document',
const pdfBuffer = await fetchPdf('https://example.com/document.pdf');
const documentRes = await smartAi.openaiProvider.document({
systemMessage: 'Determine the nature of the document.',
userMessage: 'Classify this document.',
messageHistory: [],
pdfDocuments: [pdfBuffer]
});
console.log(documentRes.message); // Outputs: classified document type
```
SmartAi allows easy switching between providers, thus giving developers flexibility:
```typescript
// Using Anthropic for document processing
const anthropicResult = await smartAi.anthropicProvider.document({
systemMessage: 'Analyze the document',
userMessage: 'Please extract the main points',
const anthopicRes = await smartAi.anthropicProvider.document({
systemMessage: 'Analyze this document.',
userMessage: 'Extract core points.',
messageHistory: [],
pdfDocuments: [pdfBuffer]
});
console.log(anthopicRes.message); // Outputs: summarized core points
```
### Vision Processing
Analyze images with vision capabilities:
Engage AI models in analyzing and describing images:
```typescript
// Using OpenAI GPT-4 Vision
const imageDescription = await smartAi.openaiProvider.vision({
image: imageBuffer, // Uint8Array containing image data
prompt: 'What do you see in this image?'
const imageBuffer = await fetchImage('path/to/image.jpg');
// Using OpenAI's vision capabilities
const visionOutput = await smartAi.openaiProvider.vision({
image: imageBuffer,
prompt: 'Describe the image.'
});
// Using Ollama for vision tasks
const ollamaImageAnalysis = await smartAi.ollamaProvider.vision({
image: imageBuffer,
prompt: 'Analyze this image in detail'
});
// Using Anthropic for vision analysis
const anthropicImageAnalysis = await smartAi.anthropicProvider.vision({
image: imageBuffer,
prompt: 'Describe the contents of this image'
});
console.log(visionOutput); // Outputs: image description
```
## Error Handling
Use other providers for more varied analysis:
Always wrap API calls in try-catch blocks to manage errors effectively:
```typescript
const ollamaOutput = await smartAi.ollamaProvider.vision({
image: imageBuffer,
prompt: 'Detailed analysis required.'
});
console.log(ollamaOutput); // Outputs: detailed analysis results
```
### Error Handling
Due to the nature of external integrations, ensure to wrap AI calls within try-catch blocks.
```typescript
try {
const response = await smartAi.openaiProvider.chat({
systemMessage: 'You are a helpful assistant.',
userMessage: 'Hello!',
const response = await smartAi.anthropicProvider.chat({
systemMessage: 'Hello!',
userMessage: 'Help me out.',
messageHistory: []
});
console.log(response.message);
} catch (error: any) {
console.error('AI provider error:', error.message);
console.error('Encountered an error:', error.message);
}
```
## Development
### Providers and Customization
### Running Tests
The library supports provider-specific customization, enabling tailored interactions:
To run the test suite, use the following command:
```typescript
const smartAi = new SmartAi({
openaiToken: 'your-openai-token',
anthropicToken: 'your-anthropic-token',
ollama: {
baseUrl: 'http://localhost:11434',
model: 'llama2',
visionModel: 'llava'
}
});
```bash
npm run test
await smartAi.start();
```
Ensure your environment is configured with the appropriate tokens and settings for the providers you are testing.
### Advanced Streaming Customization
### Building the Project
Developers can implement real-time processing pipelines with custom transformations:
Compile the TypeScript code and build the package using:
```typescript
const customProcessingStream = new TransformStream({
transform(chunk, controller) {
const processed = chunk.toUpperCase(); // Example transformation
controller.enqueue(processed);
}
});
```bash
npm run build
const processedStream = stream.pipeThrough(customProcessingStream);
const processedReader = processedStream.getReader();
while (true) {
const { done, value } = await processedReader.read();
if (done) break;
console.log('Processed Output:', value);
}
```
This command prepares the library for distribution.
This approach can facilitate adaptive content processing workflows.
## Contributing
### Conclusion
Contributions are welcome! Please follow these steps:
SmartAi is a powerful toolkit for multi-faceted AI integration, offering robust solutions for chat, media, and document processing. Developers can enjoy a consistent API experience while leveraging the strengths of each supported AI model.
1. Fork the repository.
2. Create a feature branch:
```bash
git checkout -b feature/my-feature
```
3. Commit your changes with clear messages:
```bash
git commit -m 'Add new feature'
```
4. Push your branch to your fork:
```bash
git push origin feature/my-feature
```
5. Open a Pull Request with a detailed description of your changes.
For futher exploration, developers might consider perusing individual provider's documentation to understand specific capabilities and limitations.
## License
This project is licensed under the [MIT License](LICENSE).
## License and Legal Information
## Legal Information
This repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository.
**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.
### Trademarks
This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and its related products or services are trademarks of Task Venture Capital GmbH and are not covered by the MIT License. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines.
This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.
### Company Information
Task Venture Capital GmbH
Registered at District Court Bremen HRB 35230 HB, Germany
Contact: hello@task.vc
Registered at District court Bremen HRB 35230 HB, Germany
By using this repository, you agree to the terms outlined in this section.
For any legal inquiries or if you require further information, please contact us via email at hello@task.vc.
---
Happy coding with SmartAi!
By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.

View File

@ -21,8 +21,7 @@ tap.test('should create chat response with openai', async () => {
const response = await testSmartai.openaiProvider.chat({
systemMessage: 'Hello',
userMessage: userMessage,
messageHistory: [
],
messageHistory: [],
});
console.log(`userMessage: ${userMessage}`);
console.log(response.message);
@ -55,7 +54,7 @@ tap.test('should recognize companies in a pdf', async () => {
address: string;
city: string;
country: string;
EU: boolean; // wether the entity is within EU
EU: boolean; // whether the entity is within EU
};
entityReceiver: {
type: 'official state entity' | 'company' | 'person';
@ -63,7 +62,7 @@ tap.test('should recognize companies in a pdf', async () => {
address: string;
city: string;
country: string;
EU: boolean; // wether the entity is within EU
EU: boolean; // whether the entity is within EU
};
date: string; // the date of the document as YYYY-MM-DD
title: string; // a short title, suitable for a filename
@ -75,10 +74,27 @@ tap.test('should recognize companies in a pdf', async () => {
pdfDocuments: [pdfBuffer],
});
console.log(result);
})
});
tap.test('should create audio response with openai', async () => {
// Call the audio method with a sample message.
const audioStream = await testSmartai.openaiProvider.audio({
message: 'This is a test of audio generation.',
});
// Read all chunks from the stream.
const chunks: Uint8Array[] = [];
for await (const chunk of audioStream) {
chunks.push(chunk as Uint8Array);
}
const audioBuffer = Buffer.concat(chunks);
await smartfile.fs.toFs(audioBuffer, './.nogit/testoutput.mp3');
console.log(`Audio Buffer length: ${audioBuffer.length}`);
// Assert that the resulting buffer is not empty.
expect(audioBuffer.length).toBeGreaterThan(0);
});
tap.test('should stop the smartai instance', async () => {
await testSmartai.stop();
});
export default tap.start();
export default tap.start();

View File

@ -3,6 +3,6 @@
*/
export const commitinfo = {
name: '@push.rocks/smartai',
version: '0.3.1',
description: 'A TypeScript library for integrating and interacting with multiple AI models, offering capabilities for chat and potentially audio responses.'
version: '0.5.1',
description: 'SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.'
}

View File

@ -48,6 +48,18 @@ export class Conversation {
return conversation;
}
public static async createWithExo(smartaiRefArg: SmartAi) {
if (!smartaiRefArg.exoProvider) {
throw new Error('Exo provider not available');
}
const conversation = new Conversation(smartaiRefArg, {
processFunction: async (input) => {
return '' // TODO implement proper streaming
}
});
return conversation;
}
public static async createWithOllama(smartaiRefArg: SmartAi) {
if (!smartaiRefArg.ollamaProvider) {
throw new Error('Ollama provider not available');
@ -60,6 +72,30 @@ export class Conversation {
return conversation;
}
public static async createWithGroq(smartaiRefArg: SmartAi) {
if (!smartaiRefArg.groqProvider) {
throw new Error('Groq provider not available');
}
const conversation = new Conversation(smartaiRefArg, {
processFunction: async (input) => {
return '' // TODO implement proper streaming
}
});
return conversation;
}
public static async createWithXai(smartaiRefArg: SmartAi) {
if (!smartaiRefArg.xaiProvider) {
throw new Error('XAI provider not available');
}
const conversation = new Conversation(smartaiRefArg, {
processFunction: async (input) => {
return '' // TODO implement proper streaming
}
});
return conversation;
}
// INSTANCE
smartaiRef: SmartAi
private systemMessage: string;

View File

@ -1,18 +1,32 @@
import { Conversation } from './classes.conversation.js';
import * as plugins from './plugins.js';
import { AnthropicProvider } from './provider.anthropic.js';
import type { OllamaProvider } from './provider.ollama.js';
import { OllamaProvider } from './provider.ollama.js';
import { OpenAiProvider } from './provider.openai.js';
import type { PerplexityProvider } from './provider.perplexity.js';
import { PerplexityProvider } from './provider.perplexity.js';
import { ExoProvider } from './provider.exo.js';
import { GroqProvider } from './provider.groq.js';
import { XAIProvider } from './provider.xai.js';
export interface ISmartAiOptions {
openaiToken?: string;
anthropicToken?: string;
perplexityToken?: string;
groqToken?: string;
xaiToken?: string;
exo?: {
baseUrl?: string;
apiKey?: string;
};
ollama?: {
baseUrl?: string;
model?: string;
visionModel?: string;
};
}
export type TProvider = 'openai' | 'anthropic' | 'perplexity' | 'ollama';
export type TProvider = 'openai' | 'anthropic' | 'perplexity' | 'ollama' | 'exo' | 'groq' | 'xai';
export class SmartAi {
public options: ISmartAiOptions;
@ -21,6 +35,9 @@ export class SmartAi {
public anthropicProvider: AnthropicProvider;
public perplexityProvider: PerplexityProvider;
public ollamaProvider: OllamaProvider;
public exoProvider: ExoProvider;
public groqProvider: GroqProvider;
public xaiProvider: XAIProvider;
constructor(optionsArg: ISmartAiOptions) {
this.options = optionsArg;
@ -37,6 +54,40 @@ export class SmartAi {
this.anthropicProvider = new AnthropicProvider({
anthropicToken: this.options.anthropicToken,
});
await this.anthropicProvider.start();
}
if (this.options.perplexityToken) {
this.perplexityProvider = new PerplexityProvider({
perplexityToken: this.options.perplexityToken,
});
await this.perplexityProvider.start();
}
if (this.options.groqToken) {
this.groqProvider = new GroqProvider({
groqToken: this.options.groqToken,
});
await this.groqProvider.start();
}
if (this.options.xaiToken) {
this.xaiProvider = new XAIProvider({
xaiToken: this.options.xaiToken,
});
await this.xaiProvider.start();
}
if (this.options.ollama) {
this.ollamaProvider = new OllamaProvider({
baseUrl: this.options.ollama.baseUrl,
model: this.options.ollama.model,
visionModel: this.options.ollama.visionModel,
});
await this.ollamaProvider.start();
}
if (this.options.exo) {
this.exoProvider = new ExoProvider({
exoBaseUrl: this.options.exo.baseUrl,
apiKey: this.options.exo.apiKey,
});
await this.exoProvider.start();
}
}
@ -47,6 +98,8 @@ export class SmartAi {
*/
createConversation(provider: TProvider) {
switch (provider) {
case 'exo':
return Conversation.createWithExo(this);
case 'openai':
return Conversation.createWithOpenAi(this);
case 'anthropic':
@ -55,6 +108,10 @@ export class SmartAi {
return Conversation.createWithPerplexity(this);
case 'ollama':
return Conversation.createWithOllama(this);
case 'groq':
return Conversation.createWithGroq(this);
case 'xai':
return Conversation.createWithXai(this);
default:
throw new Error('Provider not available');
}

128
ts/provider.exo.ts Normal file
View File

@ -0,0 +1,128 @@
import * as plugins from './plugins.js';
import * as paths from './paths.js';
import { MultiModalModel } from './abstract.classes.multimodal.js';
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js';
import type { ChatCompletionMessageParam } from 'openai/resources/chat/completions';
export interface IExoProviderOptions {
exoBaseUrl?: string;
apiKey?: string;
}
export class ExoProvider extends MultiModalModel {
private options: IExoProviderOptions;
public openAiApiClient: plugins.openai.default;
constructor(optionsArg: IExoProviderOptions = {}) {
super();
this.options = {
exoBaseUrl: 'http://localhost:8080/v1', // Default Exo API endpoint
...optionsArg
};
}
public async start() {
this.openAiApiClient = new plugins.openai.default({
apiKey: this.options.apiKey || 'not-needed', // Exo might not require an API key for local deployment
baseURL: this.options.exoBaseUrl,
});
}
public async stop() {}
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
// Create a TextDecoder to handle incoming chunks
const decoder = new TextDecoder();
let buffer = '';
let currentMessage: { role: string; content: string; } | null = null;
// Create a TransformStream to process the input
const transform = new TransformStream<Uint8Array, string>({
transform: async (chunk, controller) => {
buffer += decoder.decode(chunk, { stream: true });
// Try to parse complete JSON messages from the buffer
while (true) {
const newlineIndex = buffer.indexOf('\n');
if (newlineIndex === -1) break;
const line = buffer.slice(0, newlineIndex);
buffer = buffer.slice(newlineIndex + 1);
if (line.trim()) {
try {
const message = JSON.parse(line);
currentMessage = message;
// Process the message based on its type
if (message.type === 'message') {
const response = await this.chat({
systemMessage: '',
userMessage: message.content,
messageHistory: [{ role: message.role as 'user' | 'assistant' | 'system', content: message.content }]
});
controller.enqueue(JSON.stringify(response) + '\n');
}
} catch (error) {
console.error('Error processing message:', error);
}
}
}
},
flush(controller) {
if (buffer) {
try {
const message = JSON.parse(buffer);
currentMessage = message;
} catch (error) {
console.error('Error processing remaining buffer:', error);
}
}
}
});
return input.pipeThrough(transform);
}
public async chat(options: ChatOptions): Promise<ChatResponse> {
const messages: ChatCompletionMessageParam[] = [
{ role: 'system', content: options.systemMessage },
...options.messageHistory,
{ role: 'user', content: options.userMessage }
];
try {
const response = await this.openAiApiClient.chat.completions.create({
model: 'local-model', // Exo uses local models
messages: messages,
stream: false
});
return {
role: 'assistant',
message: response.choices[0]?.message?.content || ''
};
} catch (error) {
console.error('Error in chat completion:', error);
throw error;
}
}
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
throw new Error('Audio generation is not supported by Exo provider');
}
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
throw new Error('Vision processing is not supported by Exo provider');
}
public async document(optionsArg: {
systemMessage: string;
userMessage: string;
pdfDocuments: Uint8Array[];
messageHistory: ChatMessage[];
}): Promise<{ message: any }> {
throw new Error('Document processing is not supported by Exo provider');
}
}

View File

@ -32,7 +32,7 @@ export class GroqProvider extends MultiModalModel {
// Create a TransformStream to process the input
const transform = new TransformStream<Uint8Array, string>({
async transform(chunk, controller) {
transform: async (chunk, controller) => {
buffer += decoder.decode(chunk, { stream: true });
// Try to parse complete JSON messages from the buffer

View File

@ -45,7 +45,7 @@ export class OllamaProvider extends MultiModalModel {
// Create a TransformStream to process the input
const transform = new TransformStream<Uint8Array, string>({
async transform(chunk, controller) {
transform: async (chunk, controller) => {
buffer += decoder.decode(chunk, { stream: true });
// Try to parse complete JSON messages from the buffer

View File

@ -1,10 +1,20 @@
import * as plugins from './plugins.js';
import * as paths from './paths.js';
// Custom type definition for chat completion messages
export type TChatCompletionRequestMessage = {
role: "system" | "user" | "assistant";
content: string;
};
import { MultiModalModel } from './abstract.classes.multimodal.js';
export interface IOpenaiProviderOptions {
openaiToken: string;
chatModel?: string;
audioModel?: string;
visionModel?: string;
// Optionally add more model options (e.g., documentModel) if needed.
}
export class OpenAiProvider extends MultiModalModel {
@ -31,11 +41,14 @@ export class OpenAiProvider extends MultiModalModel {
// Create a TextDecoder to handle incoming chunks
const decoder = new TextDecoder();
let buffer = '';
let currentMessage: { role: string; content: string; } | null = null;
let currentMessage: {
role: "function" | "user" | "system" | "assistant" | "tool" | "developer";
content: string;
} | null = null;
// Create a TransformStream to process the input
const transform = new TransformStream<Uint8Array, string>({
async transform(chunk, controller) {
transform: async (chunk, controller) => {
buffer += decoder.decode(chunk, { stream: true });
// Try to parse complete JSON messages from the buffer
@ -50,7 +63,7 @@ export class OpenAiProvider extends MultiModalModel {
try {
const message = JSON.parse(line);
currentMessage = {
role: message.role || 'user',
role: (message.role || 'user') as "function" | "user" | "system" | "assistant" | "tool" | "developer",
content: message.content || '',
};
} catch (e) {
@ -61,20 +74,24 @@ export class OpenAiProvider extends MultiModalModel {
// If we have a complete message, send it to OpenAI
if (currentMessage) {
const stream = await this.openAiApiClient.chat.completions.create({
model: 'gpt-4',
messages: [{ role: currentMessage.role, content: currentMessage.content }],
const messageToSend = { role: "user" as const, content: currentMessage.content };
const chatModel = this.options.chatModel ?? 'o3-mini';
const requestParams: any = {
model: chatModel,
messages: [messageToSend],
stream: true,
});
};
// Temperature is omitted since the model does not support it.
const stream = await this.openAiApiClient.chat.completions.create(requestParams);
// Explicitly cast the stream as an async iterable to satisfy TypeScript.
const streamAsyncIterable = stream as unknown as AsyncIterableIterator<any>;
// Process each chunk from OpenAI
for await (const chunk of stream) {
for await (const chunk of streamAsyncIterable) {
const content = chunk.choices[0]?.delta?.content;
if (content) {
controller.enqueue(content);
}
}
currentMessage = null;
}
},
@ -104,15 +121,17 @@ export class OpenAiProvider extends MultiModalModel {
content: string;
}[];
}) {
const result = await this.openAiApiClient.chat.completions.create({
model: 'gpt-4o',
const chatModel = this.options.chatModel ?? 'o3-mini';
const requestParams: any = {
model: chatModel,
messages: [
{ role: 'system', content: optionsArg.systemMessage },
...optionsArg.messageHistory,
{ role: 'user', content: optionsArg.userMessage },
],
});
};
// Temperature parameter removed to avoid unsupported error.
const result = await this.openAiApiClient.chat.completions.create(requestParams);
return {
role: result.choices[0].message.role as 'assistant',
message: result.choices[0].message.content,
@ -122,7 +141,7 @@ export class OpenAiProvider extends MultiModalModel {
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
const done = plugins.smartpromise.defer<NodeJS.ReadableStream>();
const result = await this.openAiApiClient.audio.speech.create({
model: 'tts-1-hd',
model: this.options.audioModel ?? 'tts-1-hd',
input: optionsArg.message,
voice: 'nova',
response_format: 'mp3',
@ -144,27 +163,30 @@ export class OpenAiProvider extends MultiModalModel {
}) {
let pdfDocumentImageBytesArray: Uint8Array[] = [];
// Convert each PDF into one or more image byte arrays.
const smartpdfInstance = new plugins.smartpdf.SmartPdf();
await smartpdfInstance.start();
for (const pdfDocument of optionsArg.pdfDocuments) {
const documentImageArray = await this.smartpdfInstance.convertPDFToPngBytes(pdfDocument);
const documentImageArray = await smartpdfInstance.convertPDFToPngBytes(pdfDocument);
pdfDocumentImageBytesArray = pdfDocumentImageBytesArray.concat(documentImageArray);
}
await smartpdfInstance.stop();
console.log(`image smartfile array`);
console.log(pdfDocumentImageBytesArray.map((smartfile) => smartfile.length));
const smartfileArray = await plugins.smartarray.map(
pdfDocumentImageBytesArray,
async (pdfDocumentImageBytes) => {
return plugins.smartfile.SmartFile.fromBuffer(
'pdfDocumentImage.jpg',
Buffer.from(pdfDocumentImageBytes)
);
}
);
// Filter out any empty buffers to avoid sending invalid image URLs.
const validImageBytesArray = pdfDocumentImageBytesArray.filter(imageBytes => imageBytes && imageBytes.length > 0);
const imageAttachments = validImageBytesArray.map(imageBytes => ({
type: 'image_url',
image_url: {
url: 'data:image/png;base64,' + Buffer.from(imageBytes).toString('base64'),
},
}));
const result = await this.openAiApiClient.chat.completions.create({
model: 'gpt-4o',
// response_format: { type: "json_object" }, // not supported for now
const chatModel = this.options.chatModel ?? 'gpt-4o';
const requestParams: any = {
model: chatModel,
messages: [
{ role: 'system', content: optionsArg.systemMessage },
...optionsArg.messageHistory,
@ -172,30 +194,22 @@ export class OpenAiProvider extends MultiModalModel {
role: 'user',
content: [
{ type: 'text', text: optionsArg.userMessage },
...(() => {
const returnArray = [];
for (const imageBytes of pdfDocumentImageBytesArray) {
returnArray.push({
type: 'image_url',
image_url: {
url: 'data:image/png;base64,' + Buffer.from(imageBytes).toString('base64'),
},
});
}
return returnArray;
})(),
...imageAttachments,
],
},
],
});
};
// Temperature parameter removed.
const result = await this.openAiApiClient.chat.completions.create(requestParams);
return {
message: result.choices[0].message,
};
}
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
const result = await this.openAiApiClient.chat.completions.create({
model: 'gpt-4-vision-preview',
const visionModel = this.options.visionModel ?? 'gpt-4o';
const requestParams: any = {
model: visionModel,
messages: [
{
role: 'user',
@ -211,8 +225,8 @@ export class OpenAiProvider extends MultiModalModel {
}
],
max_tokens: 300
});
};
const result = await this.openAiApiClient.chat.completions.create(requestParams);
return result.choices[0].message.content || '';
}
}
}