diff --git a/ts/abstract.classes.multimodal.ts b/ts/abstract.classes.multimodal.ts index 3b80d35..eca8091 100644 --- a/ts/abstract.classes.multimodal.ts +++ b/ts/abstract.classes.multimodal.ts @@ -17,6 +17,14 @@ export interface ChatOptions { messageHistory: ChatMessage[]; } +/** + * Options for streaming chat interactions + */ +export interface StreamingChatOptions extends ChatOptions { + /** Callback fired for each token during generation */ + onToken?: (token: string) => void; +} + /** * Response format for chat interactions */ @@ -152,7 +160,7 @@ export abstract class MultiModalModel { * @returns Promise resolving to the assistant's response */ public abstract chat(optionsArg: ChatOptions): Promise; - + /** * Streaming interface for chat interactions * Allows for real-time responses from the model @@ -161,6 +169,14 @@ export abstract class MultiModalModel { */ public abstract chatStream(input: ReadableStream): Promise>; + /** + * Streaming chat with token callback + * Calls onToken for each token generated, returns final response + * @param optionsArg Options containing system message, user message, message history, and onToken callback + * @returns Promise resolving to the assistant's response + */ + public chatStreaming?(optionsArg: StreamingChatOptions): Promise; + /** * Text-to-speech conversion * @param optionsArg Options containing the message to convert to speech diff --git a/ts/provider.ollama.ts b/ts/provider.ollama.ts index aa59b09..169e286 100644 --- a/ts/provider.ollama.ts +++ b/ts/provider.ollama.ts @@ -9,7 +9,8 @@ import type { ResearchResponse, ImageGenerateOptions, ImageEditOptions, - ImageResponse + ImageResponse, + StreamingChatOptions } from './abstract.classes.multimodal.js'; /** @@ -235,6 +236,34 @@ export class OllamaProvider extends MultiModalModel { }; } + /** + * Streaming chat with token callback (implements MultiModalModel interface) + * Calls onToken for each token generated during the response + */ + public async chatStreaming(optionsArg: StreamingChatOptions): Promise { + const onToken = optionsArg.onToken; + + // Use existing collectStreamResponse with callback + const response = await this.collectStreamResponse( + { + systemMessage: optionsArg.systemMessage, + userMessage: optionsArg.userMessage, + messageHistory: optionsArg.messageHistory, + }, + (chunk) => { + if (onToken) { + if (chunk.thinking) onToken(chunk.thinking); + if (chunk.content) onToken(chunk.content); + } + } + ); + + return { + role: 'assistant' as const, + message: response.message, + }; + } + /** * Streaming chat with async iteration and options support */