feat(core): Added support for Exo AI provider
This commit is contained in:
parent
d93b198b09
commit
f89888a542
@ -1,5 +1,12 @@
|
||||
# Changelog
|
||||
|
||||
## 2025-02-08 - 0.4.0 - feat(core)
|
||||
Added support for Exo AI provider
|
||||
|
||||
- Introduced ExoProvider with chat functionalities.
|
||||
- Updated SmartAi class to initialize ExoProvider.
|
||||
- Extended Conversation class to support ExoProvider.
|
||||
|
||||
## 2025-02-05 - 0.3.3 - fix(documentation)
|
||||
Update readme with detailed license and legal information.
|
||||
|
||||
|
17
readme.md
17
readme.md
@ -110,6 +110,19 @@ SmartAi supports multiple AI providers. Configure each provider with its corresp
|
||||
}
|
||||
```
|
||||
|
||||
### Exo
|
||||
|
||||
- **Models:** Configurable (supports LLaMA, Mistral, LlaVA, Qwen, and Deepseek)
|
||||
- **Features:** Chat, Streaming
|
||||
- **Configuration Example:**
|
||||
|
||||
```typescript
|
||||
exo: {
|
||||
baseUrl: 'http://localhost:8080/v1', // Optional
|
||||
apiKey: 'your-api-key' // Optional for local deployments
|
||||
}
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
Initialize SmartAi with the provider configurations you plan to use:
|
||||
@ -126,6 +139,10 @@ const smartAi = new SmartAi({
|
||||
ollama: {
|
||||
baseUrl: 'http://localhost:11434',
|
||||
model: 'llama2'
|
||||
},
|
||||
exo: {
|
||||
baseUrl: 'http://localhost:8080/v1',
|
||||
apiKey: 'your-api-key'
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -3,6 +3,6 @@
|
||||
*/
|
||||
export const commitinfo = {
|
||||
name: '@push.rocks/smartai',
|
||||
version: '0.3.3',
|
||||
version: '0.4.0',
|
||||
description: 'A TypeScript library for integrating and interacting with multiple AI models, offering capabilities for chat and potentially audio responses.'
|
||||
}
|
||||
|
@ -48,6 +48,18 @@ export class Conversation {
|
||||
return conversation;
|
||||
}
|
||||
|
||||
public static async createWithExo(smartaiRefArg: SmartAi) {
|
||||
if (!smartaiRefArg.exoProvider) {
|
||||
throw new Error('Exo provider not available');
|
||||
}
|
||||
const conversation = new Conversation(smartaiRefArg, {
|
||||
processFunction: async (input) => {
|
||||
return '' // TODO implement proper streaming
|
||||
}
|
||||
});
|
||||
return conversation;
|
||||
}
|
||||
|
||||
public static async createWithOllama(smartaiRefArg: SmartAi) {
|
||||
if (!smartaiRefArg.ollamaProvider) {
|
||||
throw new Error('Ollama provider not available');
|
||||
@ -60,6 +72,30 @@ export class Conversation {
|
||||
return conversation;
|
||||
}
|
||||
|
||||
public static async createWithGroq(smartaiRefArg: SmartAi) {
|
||||
if (!smartaiRefArg.groqProvider) {
|
||||
throw new Error('Groq provider not available');
|
||||
}
|
||||
const conversation = new Conversation(smartaiRefArg, {
|
||||
processFunction: async (input) => {
|
||||
return '' // TODO implement proper streaming
|
||||
}
|
||||
});
|
||||
return conversation;
|
||||
}
|
||||
|
||||
public static async createWithXai(smartaiRefArg: SmartAi) {
|
||||
if (!smartaiRefArg.xaiProvider) {
|
||||
throw new Error('XAI provider not available');
|
||||
}
|
||||
const conversation = new Conversation(smartaiRefArg, {
|
||||
processFunction: async (input) => {
|
||||
return '' // TODO implement proper streaming
|
||||
}
|
||||
});
|
||||
return conversation;
|
||||
}
|
||||
|
||||
// INSTANCE
|
||||
smartaiRef: SmartAi
|
||||
private systemMessage: string;
|
||||
|
@ -1,18 +1,32 @@
|
||||
import { Conversation } from './classes.conversation.js';
|
||||
import * as plugins from './plugins.js';
|
||||
import { AnthropicProvider } from './provider.anthropic.js';
|
||||
import type { OllamaProvider } from './provider.ollama.js';
|
||||
import { OllamaProvider } from './provider.ollama.js';
|
||||
import { OpenAiProvider } from './provider.openai.js';
|
||||
import type { PerplexityProvider } from './provider.perplexity.js';
|
||||
import { PerplexityProvider } from './provider.perplexity.js';
|
||||
import { ExoProvider } from './provider.exo.js';
|
||||
import { GroqProvider } from './provider.groq.js';
|
||||
import { XAIProvider } from './provider.xai.js';
|
||||
|
||||
|
||||
export interface ISmartAiOptions {
|
||||
openaiToken?: string;
|
||||
anthropicToken?: string;
|
||||
perplexityToken?: string;
|
||||
groqToken?: string;
|
||||
xaiToken?: string;
|
||||
exo?: {
|
||||
baseUrl?: string;
|
||||
apiKey?: string;
|
||||
};
|
||||
ollama?: {
|
||||
baseUrl?: string;
|
||||
model?: string;
|
||||
visionModel?: string;
|
||||
};
|
||||
}
|
||||
|
||||
export type TProvider = 'openai' | 'anthropic' | 'perplexity' | 'ollama';
|
||||
export type TProvider = 'openai' | 'anthropic' | 'perplexity' | 'ollama' | 'exo' | 'groq' | 'xai';
|
||||
|
||||
export class SmartAi {
|
||||
public options: ISmartAiOptions;
|
||||
@ -21,6 +35,9 @@ export class SmartAi {
|
||||
public anthropicProvider: AnthropicProvider;
|
||||
public perplexityProvider: PerplexityProvider;
|
||||
public ollamaProvider: OllamaProvider;
|
||||
public exoProvider: ExoProvider;
|
||||
public groqProvider: GroqProvider;
|
||||
public xaiProvider: XAIProvider;
|
||||
|
||||
constructor(optionsArg: ISmartAiOptions) {
|
||||
this.options = optionsArg;
|
||||
@ -37,6 +54,40 @@ export class SmartAi {
|
||||
this.anthropicProvider = new AnthropicProvider({
|
||||
anthropicToken: this.options.anthropicToken,
|
||||
});
|
||||
await this.anthropicProvider.start();
|
||||
}
|
||||
if (this.options.perplexityToken) {
|
||||
this.perplexityProvider = new PerplexityProvider({
|
||||
perplexityToken: this.options.perplexityToken,
|
||||
});
|
||||
await this.perplexityProvider.start();
|
||||
}
|
||||
if (this.options.groqToken) {
|
||||
this.groqProvider = new GroqProvider({
|
||||
groqToken: this.options.groqToken,
|
||||
});
|
||||
await this.groqProvider.start();
|
||||
}
|
||||
if (this.options.xaiToken) {
|
||||
this.xaiProvider = new XAIProvider({
|
||||
xaiToken: this.options.xaiToken,
|
||||
});
|
||||
await this.xaiProvider.start();
|
||||
}
|
||||
if (this.options.ollama) {
|
||||
this.ollamaProvider = new OllamaProvider({
|
||||
baseUrl: this.options.ollama.baseUrl,
|
||||
model: this.options.ollama.model,
|
||||
visionModel: this.options.ollama.visionModel,
|
||||
});
|
||||
await this.ollamaProvider.start();
|
||||
}
|
||||
if (this.options.exo) {
|
||||
this.exoProvider = new ExoProvider({
|
||||
exoBaseUrl: this.options.exo.baseUrl,
|
||||
apiKey: this.options.exo.apiKey,
|
||||
});
|
||||
await this.exoProvider.start();
|
||||
}
|
||||
}
|
||||
|
||||
@ -47,6 +98,8 @@ export class SmartAi {
|
||||
*/
|
||||
createConversation(provider: TProvider) {
|
||||
switch (provider) {
|
||||
case 'exo':
|
||||
return Conversation.createWithExo(this);
|
||||
case 'openai':
|
||||
return Conversation.createWithOpenAi(this);
|
||||
case 'anthropic':
|
||||
@ -55,6 +108,10 @@ export class SmartAi {
|
||||
return Conversation.createWithPerplexity(this);
|
||||
case 'ollama':
|
||||
return Conversation.createWithOllama(this);
|
||||
case 'groq':
|
||||
return Conversation.createWithGroq(this);
|
||||
case 'xai':
|
||||
return Conversation.createWithXai(this);
|
||||
default:
|
||||
throw new Error('Provider not available');
|
||||
}
|
||||
|
128
ts/provider.exo.ts
Normal file
128
ts/provider.exo.ts
Normal file
@ -0,0 +1,128 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import * as paths from './paths.js';
|
||||
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
||||
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js';
|
||||
import type { ChatCompletionMessageParam } from 'openai/resources/chat/completions';
|
||||
|
||||
export interface IExoProviderOptions {
|
||||
exoBaseUrl?: string;
|
||||
apiKey?: string;
|
||||
}
|
||||
|
||||
export class ExoProvider extends MultiModalModel {
|
||||
private options: IExoProviderOptions;
|
||||
public openAiApiClient: plugins.openai.default;
|
||||
|
||||
constructor(optionsArg: IExoProviderOptions = {}) {
|
||||
super();
|
||||
this.options = {
|
||||
exoBaseUrl: 'http://localhost:8080/v1', // Default Exo API endpoint
|
||||
...optionsArg
|
||||
};
|
||||
}
|
||||
|
||||
public async start() {
|
||||
this.openAiApiClient = new plugins.openai.default({
|
||||
apiKey: this.options.apiKey || 'not-needed', // Exo might not require an API key for local deployment
|
||||
baseURL: this.options.exoBaseUrl,
|
||||
});
|
||||
}
|
||||
|
||||
public async stop() {}
|
||||
|
||||
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||
// Create a TextDecoder to handle incoming chunks
|
||||
const decoder = new TextDecoder();
|
||||
let buffer = '';
|
||||
let currentMessage: { role: string; content: string; } | null = null;
|
||||
|
||||
// Create a TransformStream to process the input
|
||||
const transform = new TransformStream<Uint8Array, string>({
|
||||
async transform(chunk, controller) {
|
||||
buffer += decoder.decode(chunk, { stream: true });
|
||||
|
||||
// Try to parse complete JSON messages from the buffer
|
||||
while (true) {
|
||||
const newlineIndex = buffer.indexOf('\n');
|
||||
if (newlineIndex === -1) break;
|
||||
|
||||
const line = buffer.slice(0, newlineIndex);
|
||||
buffer = buffer.slice(newlineIndex + 1);
|
||||
|
||||
if (line.trim()) {
|
||||
try {
|
||||
const message = JSON.parse(line);
|
||||
currentMessage = message;
|
||||
|
||||
// Process the message based on its type
|
||||
if (message.type === 'message') {
|
||||
const response = await this.chat({
|
||||
systemMessage: '',
|
||||
userMessage: message.content,
|
||||
messageHistory: [{ role: message.role as 'user' | 'assistant' | 'system', content: message.content }]
|
||||
});
|
||||
|
||||
controller.enqueue(JSON.stringify(response) + '\n');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error processing message:', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
flush(controller) {
|
||||
if (buffer) {
|
||||
try {
|
||||
const message = JSON.parse(buffer);
|
||||
currentMessage = message;
|
||||
} catch (error) {
|
||||
console.error('Error processing remaining buffer:', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return input.pipeThrough(transform);
|
||||
}
|
||||
|
||||
public async chat(options: ChatOptions): Promise<ChatResponse> {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: 'system', content: options.systemMessage },
|
||||
...options.messageHistory,
|
||||
{ role: 'user', content: options.userMessage }
|
||||
];
|
||||
|
||||
try {
|
||||
const response = await this.openAiApiClient.chat.completions.create({
|
||||
model: 'local-model', // Exo uses local models
|
||||
messages: messages,
|
||||
stream: false
|
||||
});
|
||||
|
||||
return {
|
||||
role: 'assistant',
|
||||
message: response.choices[0]?.message?.content || ''
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('Error in chat completion:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
||||
throw new Error('Audio generation is not supported by Exo provider');
|
||||
}
|
||||
|
||||
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
|
||||
throw new Error('Vision processing is not supported by Exo provider');
|
||||
}
|
||||
|
||||
public async document(optionsArg: {
|
||||
systemMessage: string;
|
||||
userMessage: string;
|
||||
pdfDocuments: Uint8Array[];
|
||||
messageHistory: ChatMessage[];
|
||||
}): Promise<{ message: any }> {
|
||||
throw new Error('Document processing is not supported by Exo provider');
|
||||
}
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user