feat(streaming): add streaming support to DriverAgent and DualAgentOrchestrator

- Add onToken callback option to IDualAgentOptions interface
- Add onToken property and setOnToken method to DriverAgent
- Wire up streaming in startTask and continueWithMessage methods
- Pass source identifier ('driver'/'guardian') to onToken callback
This commit is contained in:
2026-01-20 00:38:36 +00:00
parent fe0de36b1a
commit 37d4069806
5 changed files with 64 additions and 17 deletions

View File

@@ -10,6 +10,8 @@ export interface IDriverAgentOptions {
systemMessage?: string;
/** Maximum history messages to pass to API (default: 20). Set to 0 for unlimited. */
maxHistoryMessages?: number;
/** Callback fired for each token during LLM generation */
onToken?: (token: string) => void;
}
/**
@@ -22,6 +24,7 @@ export class DriverAgent {
private maxHistoryMessages: number;
private messageHistory: plugins.smartai.ChatMessage[] = [];
private tools: Map<string, BaseToolWrapper> = new Map();
private onToken?: (token: string) => void;
constructor(
provider: plugins.smartai.MultiModalModel,
@@ -36,9 +39,18 @@ export class DriverAgent {
} else {
this.systemMessage = options?.systemMessage || this.getDefaultSystemMessage();
this.maxHistoryMessages = options?.maxHistoryMessages ?? 20;
this.onToken = options?.onToken;
}
}
/**
* Set the token callback for streaming mode
* @param callback Function to call for each generated token
*/
public setOnToken(callback: (token: string) => void): void {
this.onToken = callback;
}
/**
* Register a tool for use by the driver
*/
@@ -85,12 +97,25 @@ export class DriverAgent {
fullSystemMessage = this.getNoToolsSystemMessage();
}
// Get response from provider
const response = await this.provider.chat({
systemMessage: fullSystemMessage,
userMessage: userMessage,
messageHistory: [],
});
// Get response from provider - use streaming if available and callback is set
let response: plugins.smartai.ChatResponse;
if (this.onToken && typeof (this.provider as any).chatStreaming === 'function') {
// Use streaming mode with token callback
response = await (this.provider as any).chatStreaming({
systemMessage: fullSystemMessage,
userMessage: userMessage,
messageHistory: [],
onToken: this.onToken,
});
} else {
// Fallback to non-streaming mode
response = await this.provider.chat({
systemMessage: fullSystemMessage,
userMessage: userMessage,
messageHistory: [],
});
}
// Add assistant response to history
this.messageHistory.push({
@@ -139,11 +164,25 @@ export class DriverAgent {
historyForChat = fullHistory;
}
const response = await this.provider.chat({
systemMessage: fullSystemMessage,
userMessage: message,
messageHistory: historyForChat,
});
// Get response from provider - use streaming if available and callback is set
let response: plugins.smartai.ChatResponse;
if (this.onToken && typeof (this.provider as any).chatStreaming === 'function') {
// Use streaming mode with token callback
response = await (this.provider as any).chatStreaming({
systemMessage: fullSystemMessage,
userMessage: message,
messageHistory: historyForChat,
onToken: this.onToken,
});
} else {
// Fallback to non-streaming mode
response = await this.provider.chat({
systemMessage: fullSystemMessage,
userMessage: message,
messageHistory: historyForChat,
});
}
// Add assistant response to history
this.messageHistory.push({

View File

@@ -181,9 +181,15 @@ export class DualAgentOrchestrator {
: this.driverProvider;
// NOW create agents with initialized providers
// Set up token callback wrapper if streaming is enabled
const driverOnToken = this.options.onToken
? (token: string) => this.options.onToken!(token, 'driver')
: undefined;
this.driver = new DriverAgent(this.driverProvider, {
systemMessage: this.options.driverSystemMessage,
maxHistoryMessages: this.options.maxHistoryMessages,
onToken: driverOnToken,
});
this.guardian = new GuardianAgent(this.guardianProvider, this.options.guardianPolicyPrompt);

View File

@@ -34,6 +34,8 @@ export interface IDualAgentOptions extends plugins.smartai.ISmartAiOptions {
onProgress?: (event: IProgressEvent) => void;
/** Prefix for log messages (e.g., "[README]", "[Commit]"). Default: empty */
logPrefix?: string;
/** Callback fired for each token during LLM generation (streaming mode) */
onToken?: (token: string, source: 'driver' | 'guardian') => void;
}
// ================================