Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| ae8d3ccf33 | |||
| 3b900d0ba9 | |||
| d49152390f | |||
| d615ec9227 |
@@ -1,5 +1,12 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## 2026-01-20 - 0.12.0 - feat(ollama)
|
||||||
|
add support for base64-encoded images in chat messages and forward them to the Ollama provider
|
||||||
|
|
||||||
|
- Add optional images?: string[] to ChatMessage and ChatOptions interfaces (multimodal/vision support)
|
||||||
|
- Propagate images from messageHistory and ChatOptions to the Ollama API payload in chat, chatStreaming, and streaming handlers
|
||||||
|
- Changes are non-breaking: images are optional and existing behavior is preserved when absent
|
||||||
|
|
||||||
## 2026-01-20 - 0.11.0 - feat(ollama)
|
## 2026-01-20 - 0.11.0 - feat(ollama)
|
||||||
support defaultOptions and defaultTimeout for ollama provider
|
support defaultOptions and defaultTimeout for ollama provider
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@push.rocks/smartai",
|
"name": "@push.rocks/smartai",
|
||||||
"version": "0.11.0",
|
"version": "0.12.0",
|
||||||
"private": false,
|
"private": false,
|
||||||
"description": "SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.",
|
"description": "SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.",
|
||||||
"main": "dist_ts/index.js",
|
"main": "dist_ts/index.js",
|
||||||
|
|||||||
@@ -3,6 +3,6 @@
|
|||||||
*/
|
*/
|
||||||
export const commitinfo = {
|
export const commitinfo = {
|
||||||
name: '@push.rocks/smartai',
|
name: '@push.rocks/smartai',
|
||||||
version: '0.11.0',
|
version: '0.12.0',
|
||||||
description: 'SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.'
|
description: 'SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.'
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,8 @@ import * as plugins from './plugins.js';
|
|||||||
export interface ChatMessage {
|
export interface ChatMessage {
|
||||||
role: 'assistant' | 'user' | 'system';
|
role: 'assistant' | 'user' | 'system';
|
||||||
content: string;
|
content: string;
|
||||||
|
/** Base64-encoded images for vision-capable models */
|
||||||
|
images?: string[];
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -15,6 +17,16 @@ export interface ChatOptions {
|
|||||||
systemMessage: string;
|
systemMessage: string;
|
||||||
userMessage: string;
|
userMessage: string;
|
||||||
messageHistory: ChatMessage[];
|
messageHistory: ChatMessage[];
|
||||||
|
/** Base64-encoded images for the current message (vision-capable models) */
|
||||||
|
images?: string[];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Options for streaming chat interactions
|
||||||
|
*/
|
||||||
|
export interface StreamingChatOptions extends ChatOptions {
|
||||||
|
/** Callback fired for each token during generation */
|
||||||
|
onToken?: (token: string) => void;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -161,6 +173,14 @@ export abstract class MultiModalModel {
|
|||||||
*/
|
*/
|
||||||
public abstract chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>>;
|
public abstract chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Streaming chat with token callback
|
||||||
|
* Calls onToken for each token generated, returns final response
|
||||||
|
* @param optionsArg Options containing system message, user message, message history, and onToken callback
|
||||||
|
* @returns Promise resolving to the assistant's response
|
||||||
|
*/
|
||||||
|
public chatStreaming?(optionsArg: StreamingChatOptions): Promise<ChatResponse>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Text-to-speech conversion
|
* Text-to-speech conversion
|
||||||
* @param optionsArg Options containing the message to convert to speech
|
* @param optionsArg Options containing the message to convert to speech
|
||||||
|
|||||||
@@ -9,7 +9,8 @@ import type {
|
|||||||
ResearchResponse,
|
ResearchResponse,
|
||||||
ImageGenerateOptions,
|
ImageGenerateOptions,
|
||||||
ImageEditOptions,
|
ImageEditOptions,
|
||||||
ImageResponse
|
ImageResponse,
|
||||||
|
StreamingChatOptions
|
||||||
} from './abstract.classes.multimodal.js';
|
} from './abstract.classes.multimodal.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -42,6 +43,7 @@ export interface IOllamaChatOptions extends ChatOptions {
|
|||||||
options?: IOllamaModelOptions; // Per-request model options
|
options?: IOllamaModelOptions; // Per-request model options
|
||||||
timeout?: number; // Per-request timeout in ms
|
timeout?: number; // Per-request timeout in ms
|
||||||
model?: string; // Per-request model override
|
model?: string; // Per-request model override
|
||||||
|
// images is inherited from ChatOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -202,10 +204,30 @@ export class OllamaProvider extends MultiModalModel {
|
|||||||
// Implementing the synchronous chat interaction
|
// Implementing the synchronous chat interaction
|
||||||
public async chat(optionsArg: ChatOptions): Promise<ChatResponse> {
|
public async chat(optionsArg: ChatOptions): Promise<ChatResponse> {
|
||||||
// Format messages for Ollama
|
// Format messages for Ollama
|
||||||
|
const historyMessages = optionsArg.messageHistory.map((msg) => {
|
||||||
|
const formatted: { role: string; content: string; images?: string[] } = {
|
||||||
|
role: msg.role,
|
||||||
|
content: msg.content,
|
||||||
|
};
|
||||||
|
if (msg.images && msg.images.length > 0) {
|
||||||
|
formatted.images = msg.images;
|
||||||
|
}
|
||||||
|
return formatted;
|
||||||
|
});
|
||||||
|
|
||||||
|
// Build user message with optional images
|
||||||
|
const userMessage: { role: string; content: string; images?: string[] } = {
|
||||||
|
role: 'user',
|
||||||
|
content: optionsArg.userMessage,
|
||||||
|
};
|
||||||
|
if (optionsArg.images && optionsArg.images.length > 0) {
|
||||||
|
userMessage.images = optionsArg.images;
|
||||||
|
}
|
||||||
|
|
||||||
const messages = [
|
const messages = [
|
||||||
{ role: 'system', content: optionsArg.systemMessage },
|
{ role: 'system', content: optionsArg.systemMessage },
|
||||||
...optionsArg.messageHistory,
|
...historyMessages,
|
||||||
{ role: 'user', content: optionsArg.userMessage }
|
userMessage,
|
||||||
];
|
];
|
||||||
|
|
||||||
// Make API call to Ollama with defaultOptions and timeout
|
// Make API call to Ollama with defaultOptions and timeout
|
||||||
@@ -235,6 +257,35 @@ export class OllamaProvider extends MultiModalModel {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Streaming chat with token callback (implements MultiModalModel interface)
|
||||||
|
* Calls onToken for each token generated during the response
|
||||||
|
*/
|
||||||
|
public async chatStreaming(optionsArg: StreamingChatOptions): Promise<ChatResponse> {
|
||||||
|
const onToken = optionsArg.onToken;
|
||||||
|
|
||||||
|
// Use existing collectStreamResponse with callback, including images
|
||||||
|
const response = await this.collectStreamResponse(
|
||||||
|
{
|
||||||
|
systemMessage: optionsArg.systemMessage,
|
||||||
|
userMessage: optionsArg.userMessage,
|
||||||
|
messageHistory: optionsArg.messageHistory,
|
||||||
|
images: optionsArg.images,
|
||||||
|
},
|
||||||
|
(chunk) => {
|
||||||
|
if (onToken) {
|
||||||
|
if (chunk.thinking) onToken(chunk.thinking);
|
||||||
|
if (chunk.content) onToken(chunk.content);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
role: 'assistant' as const,
|
||||||
|
message: response.message,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Streaming chat with async iteration and options support
|
* Streaming chat with async iteration and options support
|
||||||
*/
|
*/
|
||||||
@@ -245,10 +296,31 @@ export class OllamaProvider extends MultiModalModel {
|
|||||||
const timeout = optionsArg.timeout || this.defaultTimeout;
|
const timeout = optionsArg.timeout || this.defaultTimeout;
|
||||||
const modelOptions = { ...this.defaultOptions, ...optionsArg.options };
|
const modelOptions = { ...this.defaultOptions, ...optionsArg.options };
|
||||||
|
|
||||||
|
// Format history messages with optional images
|
||||||
|
const historyMessages = optionsArg.messageHistory.map((msg) => {
|
||||||
|
const formatted: { role: string; content: string; images?: string[] } = {
|
||||||
|
role: msg.role,
|
||||||
|
content: msg.content,
|
||||||
|
};
|
||||||
|
if (msg.images && msg.images.length > 0) {
|
||||||
|
formatted.images = msg.images;
|
||||||
|
}
|
||||||
|
return formatted;
|
||||||
|
});
|
||||||
|
|
||||||
|
// Build user message with optional images
|
||||||
|
const userMessage: { role: string; content: string; images?: string[] } = {
|
||||||
|
role: 'user',
|
||||||
|
content: optionsArg.userMessage,
|
||||||
|
};
|
||||||
|
if (optionsArg.images && optionsArg.images.length > 0) {
|
||||||
|
userMessage.images = optionsArg.images;
|
||||||
|
}
|
||||||
|
|
||||||
const messages = [
|
const messages = [
|
||||||
{ role: 'system', content: optionsArg.systemMessage },
|
{ role: 'system', content: optionsArg.systemMessage },
|
||||||
...optionsArg.messageHistory,
|
...historyMessages,
|
||||||
{ role: 'user', content: optionsArg.userMessage }
|
userMessage,
|
||||||
];
|
];
|
||||||
|
|
||||||
const response = await fetch(`${this.baseUrl}/api/chat`, {
|
const response = await fetch(`${this.baseUrl}/api/chat`, {
|
||||||
@@ -338,10 +410,31 @@ export class OllamaProvider extends MultiModalModel {
|
|||||||
const timeout = optionsArg.timeout || this.defaultTimeout;
|
const timeout = optionsArg.timeout || this.defaultTimeout;
|
||||||
const modelOptions = { ...this.defaultOptions, ...optionsArg.options };
|
const modelOptions = { ...this.defaultOptions, ...optionsArg.options };
|
||||||
|
|
||||||
|
// Format history messages with optional images
|
||||||
|
const historyMessages = optionsArg.messageHistory.map((msg) => {
|
||||||
|
const formatted: { role: string; content: string; images?: string[] } = {
|
||||||
|
role: msg.role,
|
||||||
|
content: msg.content,
|
||||||
|
};
|
||||||
|
if (msg.images && msg.images.length > 0) {
|
||||||
|
formatted.images = msg.images;
|
||||||
|
}
|
||||||
|
return formatted;
|
||||||
|
});
|
||||||
|
|
||||||
|
// Build user message with optional images
|
||||||
|
const userMessage: { role: string; content: string; images?: string[] } = {
|
||||||
|
role: 'user',
|
||||||
|
content: optionsArg.userMessage,
|
||||||
|
};
|
||||||
|
if (optionsArg.images && optionsArg.images.length > 0) {
|
||||||
|
userMessage.images = optionsArg.images;
|
||||||
|
}
|
||||||
|
|
||||||
const messages = [
|
const messages = [
|
||||||
{ role: 'system', content: optionsArg.systemMessage },
|
{ role: 'system', content: optionsArg.systemMessage },
|
||||||
...optionsArg.messageHistory,
|
...historyMessages,
|
||||||
{ role: 'user', content: optionsArg.userMessage }
|
userMessage,
|
||||||
];
|
];
|
||||||
|
|
||||||
const response = await fetch(`${this.baseUrl}/api/chat`, {
|
const response = await fetch(`${this.baseUrl}/api/chat`, {
|
||||||
|
|||||||
Reference in New Issue
Block a user