feat(streaming): add chatStreaming method with token callback for real-time generation progress

- Add StreamingChatOptions interface with onToken callback
- Add optional chatStreaming method to MultiModalModel abstract class
- Implement chatStreaming in OllamaProvider using collectStreamResponse
This commit is contained in:
2026-01-20 00:37:49 +00:00
parent dfa863ee7d
commit d615ec9227
2 changed files with 47 additions and 2 deletions

View File

@@ -17,6 +17,14 @@ export interface ChatOptions {
messageHistory: ChatMessage[];
}
/**
* Options for streaming chat interactions
*/
export interface StreamingChatOptions extends ChatOptions {
/** Callback fired for each token during generation */
onToken?: (token: string) => void;
}
/**
* Response format for chat interactions
*/
@@ -152,7 +160,7 @@ export abstract class MultiModalModel {
* @returns Promise resolving to the assistant's response
*/
public abstract chat(optionsArg: ChatOptions): Promise<ChatResponse>;
/**
* Streaming interface for chat interactions
* Allows for real-time responses from the model
@@ -161,6 +169,14 @@ export abstract class MultiModalModel {
*/
public abstract chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>>;
/**
* Streaming chat with token callback
* Calls onToken for each token generated, returns final response
* @param optionsArg Options containing system message, user message, message history, and onToken callback
* @returns Promise resolving to the assistant's response
*/
public chatStreaming?(optionsArg: StreamingChatOptions): Promise<ChatResponse>;
/**
* Text-to-speech conversion
* @param optionsArg Options containing the message to convert to speech

View File

@@ -9,7 +9,8 @@ import type {
ResearchResponse,
ImageGenerateOptions,
ImageEditOptions,
ImageResponse
ImageResponse,
StreamingChatOptions
} from './abstract.classes.multimodal.js';
/**
@@ -235,6 +236,34 @@ export class OllamaProvider extends MultiModalModel {
};
}
/**
* Streaming chat with token callback (implements MultiModalModel interface)
* Calls onToken for each token generated during the response
*/
public async chatStreaming(optionsArg: StreamingChatOptions): Promise<ChatResponse> {
const onToken = optionsArg.onToken;
// Use existing collectStreamResponse with callback
const response = await this.collectStreamResponse(
{
systemMessage: optionsArg.systemMessage,
userMessage: optionsArg.userMessage,
messageHistory: optionsArg.messageHistory,
},
(chunk) => {
if (onToken) {
if (chunk.thinking) onToken(chunk.thinking);
if (chunk.content) onToken(chunk.content);
}
}
);
return {
role: 'assistant' as const,
message: response.message,
};
}
/**
* Streaming chat with async iteration and options support
*/