feat(streaming): add chatStreaming method with token callback for real-time generation progress

- Add StreamingChatOptions interface with onToken callback
- Add optional chatStreaming method to MultiModalModel abstract class
- Implement chatStreaming in OllamaProvider using collectStreamResponse
This commit is contained in:
2026-01-20 00:37:49 +00:00
parent dfa863ee7d
commit d615ec9227
2 changed files with 47 additions and 2 deletions

View File

@@ -9,7 +9,8 @@ import type {
ResearchResponse,
ImageGenerateOptions,
ImageEditOptions,
ImageResponse
ImageResponse,
StreamingChatOptions
} from './abstract.classes.multimodal.js';
/**
@@ -235,6 +236,34 @@ export class OllamaProvider extends MultiModalModel {
};
}
/**
* Streaming chat with token callback (implements MultiModalModel interface)
* Calls onToken for each token generated during the response
*/
public async chatStreaming(optionsArg: StreamingChatOptions): Promise<ChatResponse> {
const onToken = optionsArg.onToken;
// Use existing collectStreamResponse with callback
const response = await this.collectStreamResponse(
{
systemMessage: optionsArg.systemMessage,
userMessage: optionsArg.userMessage,
messageHistory: optionsArg.messageHistory,
},
(chunk) => {
if (onToken) {
if (chunk.thinking) onToken(chunk.thinking);
if (chunk.content) onToken(chunk.content);
}
}
);
return {
role: 'assistant' as const,
message: response.message,
};
}
/**
* Streaming chat with async iteration and options support
*/