feat(streaming): add chatStreaming method with token callback for real-time generation progress

- Add StreamingChatOptions interface with onToken callback
- Add optional chatStreaming method to MultiModalModel abstract class
- Implement chatStreaming in OllamaProvider using collectStreamResponse
This commit is contained in:
2026-01-20 00:37:49 +00:00
parent dfa863ee7d
commit d615ec9227
2 changed files with 47 additions and 2 deletions

View File

@@ -17,6 +17,14 @@ export interface ChatOptions {
messageHistory: ChatMessage[];
}
/**
* Options for streaming chat interactions
*/
export interface StreamingChatOptions extends ChatOptions {
/** Callback fired for each token during generation */
onToken?: (token: string) => void;
}
/**
* Response format for chat interactions
*/
@@ -152,7 +160,7 @@ export abstract class MultiModalModel {
* @returns Promise resolving to the assistant's response
*/
public abstract chat(optionsArg: ChatOptions): Promise<ChatResponse>;
/**
* Streaming interface for chat interactions
* Allows for real-time responses from the model
@@ -161,6 +169,14 @@ export abstract class MultiModalModel {
*/
public abstract chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>>;
/**
* Streaming chat with token callback
* Calls onToken for each token generated, returns final response
* @param optionsArg Options containing system message, user message, message history, and onToken callback
* @returns Promise resolving to the assistant's response
*/
public chatStreaming?(optionsArg: StreamingChatOptions): Promise<ChatResponse>;
/**
* Text-to-speech conversion
* @param optionsArg Options containing the message to convert to speech