fix(provider): Fix provider modules for consistency

This commit is contained in:
Philipp Kunz 2025-02-25 13:01:23 +00:00
parent 6916dd9e2a
commit 5dead05324
8 changed files with 4302 additions and 1272 deletions

View File

@ -1,5 +1,12 @@
# Changelog
## 2025-02-25 - 0.4.1 - fix(provider)
Fix provider modules for consistency
- Updated TypeScript interfaces and options in provider modules for better type safety.
- Modified transform stream handlers in Exo, Groq, and Ollama providers for consistency.
- Added optional model options to OpenAI provider for custom model usage.
## 2025-02-08 - 0.4.0 - feat(core)
Added support for Exo AI provider

View File

@ -14,24 +14,24 @@
"buildDocs": "(tsdoc)"
},
"devDependencies": {
"@git.zone/tsbuild": "^2.1.84",
"@git.zone/tsbundle": "^2.0.5",
"@git.zone/tsrun": "^1.2.49",
"@git.zone/tstest": "^1.0.90",
"@push.rocks/qenv": "^6.0.5",
"@push.rocks/tapbundle": "^5.3.0",
"@types/node": "^22.5.5"
"@git.zone/tsbuild": "^2.2.1",
"@git.zone/tsbundle": "^2.2.5",
"@git.zone/tsrun": "^1.3.3",
"@git.zone/tstest": "^1.0.96",
"@push.rocks/qenv": "^6.1.0",
"@push.rocks/tapbundle": "^5.5.6",
"@types/node": "^22.13.5"
},
"dependencies": {
"@anthropic-ai/sdk": "^0.27.3",
"@push.rocks/smartarray": "^1.0.8",
"@push.rocks/smartfile": "^11.0.21",
"@anthropic-ai/sdk": "^0.37.0",
"@push.rocks/smartarray": "^1.1.0",
"@push.rocks/smartfile": "^11.2.0",
"@push.rocks/smartpath": "^5.0.18",
"@push.rocks/smartpdf": "^3.1.6",
"@push.rocks/smartpromise": "^4.0.4",
"@push.rocks/smartrequest": "^2.0.22",
"@push.rocks/smartpdf": "^3.1.8",
"@push.rocks/smartpromise": "^4.2.3",
"@push.rocks/smartrequest": "^2.0.23",
"@push.rocks/webstream": "^1.0.10",
"openai": "^4.62.1"
"openai": "^4.85.4"
},
"repository": {
"type": "git",

5491
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@ -3,6 +3,6 @@
*/
export const commitinfo = {
name: '@push.rocks/smartai',
version: '0.4.0',
version: '0.4.1',
description: 'A TypeScript library for integrating and interacting with multiple AI models, offering capabilities for chat and potentially audio responses.'
}

View File

@ -38,7 +38,7 @@ export class ExoProvider extends MultiModalModel {
// Create a TransformStream to process the input
const transform = new TransformStream<Uint8Array, string>({
async transform(chunk, controller) {
transform: async (chunk, controller) => {
buffer += decoder.decode(chunk, { stream: true });
// Try to parse complete JSON messages from the buffer

View File

@ -32,7 +32,7 @@ export class GroqProvider extends MultiModalModel {
// Create a TransformStream to process the input
const transform = new TransformStream<Uint8Array, string>({
async transform(chunk, controller) {
transform: async (chunk, controller) => {
buffer += decoder.decode(chunk, { stream: true });
// Try to parse complete JSON messages from the buffer

View File

@ -45,7 +45,7 @@ export class OllamaProvider extends MultiModalModel {
// Create a TransformStream to process the input
const transform = new TransformStream<Uint8Array, string>({
async transform(chunk, controller) {
transform: async (chunk, controller) => {
buffer += decoder.decode(chunk, { stream: true });
// Try to parse complete JSON messages from the buffer

View File

@ -1,10 +1,20 @@
import * as plugins from './plugins.js';
import * as paths from './paths.js';
// Custom type definition for chat completion messages
export type TChatCompletionRequestMessage = {
role: "system" | "user" | "assistant";
content: string;
};
import { MultiModalModel } from './abstract.classes.multimodal.js';
export interface IOpenaiProviderOptions {
openaiToken: string;
chatModel?: string;
audioModel?: string;
visionModel?: string;
// Optionally add more model options (e.g., documentModel) if needed.
}
export class OpenAiProvider extends MultiModalModel {
@ -31,11 +41,14 @@ export class OpenAiProvider extends MultiModalModel {
// Create a TextDecoder to handle incoming chunks
const decoder = new TextDecoder();
let buffer = '';
let currentMessage: { role: string; content: string; } | null = null;
let currentMessage: {
role: "function" | "user" | "system" | "assistant" | "tool" | "developer";
content: string;
} | null = null;
// Create a TransformStream to process the input
const transform = new TransformStream<Uint8Array, string>({
async transform(chunk, controller) {
transform: async (chunk, controller) => {
buffer += decoder.decode(chunk, { stream: true });
// Try to parse complete JSON messages from the buffer
@ -50,7 +63,7 @@ export class OpenAiProvider extends MultiModalModel {
try {
const message = JSON.parse(line);
currentMessage = {
role: message.role || 'user',
role: (message.role || 'user') as "function" | "user" | "system" | "assistant" | "tool" | "developer",
content: message.content || '',
};
} catch (e) {
@ -61,9 +74,11 @@ export class OpenAiProvider extends MultiModalModel {
// If we have a complete message, send it to OpenAI
if (currentMessage) {
const messageToSend = { role: "user" as const, content: currentMessage.content };
const stream = await this.openAiApiClient.chat.completions.create({
model: 'gpt-4',
messages: [{ role: currentMessage.role, content: currentMessage.content }],
model: this.options.chatModel ?? 'o3-mini',
temperature: 0,
messages: [messageToSend],
stream: true,
});
@ -105,8 +120,8 @@ export class OpenAiProvider extends MultiModalModel {
}[];
}) {
const result = await this.openAiApiClient.chat.completions.create({
model: 'gpt-4o',
model: this.options.chatModel ?? 'o3-mini',
temperature: 0,
messages: [
{ role: 'system', content: optionsArg.systemMessage },
...optionsArg.messageHistory,
@ -122,7 +137,7 @@ export class OpenAiProvider extends MultiModalModel {
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
const done = plugins.smartpromise.defer<NodeJS.ReadableStream>();
const result = await this.openAiApiClient.audio.speech.create({
model: 'tts-1-hd',
model: this.options.audioModel ?? 'o3-mini',
input: optionsArg.message,
voice: 'nova',
response_format: 'mp3',
@ -163,8 +178,8 @@ export class OpenAiProvider extends MultiModalModel {
);
const result = await this.openAiApiClient.chat.completions.create({
model: 'gpt-4o',
// response_format: { type: "json_object" }, // not supported for now
model: this.options.chatModel ?? 'o3-mini',
temperature: 0,
messages: [
{ role: 'system', content: optionsArg.systemMessage },
...optionsArg.messageHistory,
@ -195,7 +210,8 @@ export class OpenAiProvider extends MultiModalModel {
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
const result = await this.openAiApiClient.chat.completions.create({
model: 'gpt-4-vision-preview',
model: this.options.visionModel ?? 'o3-mini',
temperature: 0,
messages: [
{
role: 'user',
@ -215,4 +231,4 @@ export class OpenAiProvider extends MultiModalModel {
return result.choices[0].message.content || '';
}
}
}