feat(streaming): add streaming support to DriverAgent and DualAgentOrchestrator

- Add onToken callback option to IDualAgentOptions interface
- Add onToken property and setOnToken method to DriverAgent
- Wire up streaming in startTask and continueWithMessage methods
- Pass source identifier ('driver'/'guardian') to onToken callback
This commit is contained in:
2026-01-20 00:38:36 +00:00
parent fe0de36b1a
commit 37d4069806
5 changed files with 64 additions and 17 deletions

View File

@@ -21,7 +21,7 @@
"@types/node": "^25.0.2"
},
"dependencies": {
"@push.rocks/smartai": "^0.11.0",
"@push.rocks/smartai": "^0.11.1",
"@push.rocks/smartbrowser": "^2.0.8",
"@push.rocks/smartdeno": "^1.2.0",
"@push.rocks/smartfs": "^1.2.0",

10
pnpm-lock.yaml generated
View File

@@ -9,8 +9,8 @@ importers:
.:
dependencies:
'@push.rocks/smartai':
specifier: ^0.11.0
version: 0.11.0(typescript@5.9.3)(ws@8.18.3)(zod@3.25.76)
specifier: ^0.11.1
version: 0.11.1(typescript@5.9.3)(ws@8.18.3)(zod@3.25.76)
'@push.rocks/smartbrowser':
specifier: ^2.0.8
version: 2.0.8(typescript@5.9.3)
@@ -844,8 +844,8 @@ packages:
'@push.rocks/qenv@6.1.3':
resolution: {integrity: sha512-+z2hsAU/7CIgpYLFqvda8cn9rUBMHqLdQLjsFfRn5jPoD7dJ5rFlpkbhfM4Ws8mHMniwWaxGKo+q/YBhtzRBLg==}
'@push.rocks/smartai@0.11.0':
resolution: {integrity: sha512-Fm10tPY/UBLgQD/i0cuUPx3oHpl/VeefBm9uANNvwKXbNmWrgSBxl0EjyqXxzLvQ+Q5hpVkp+D53pyIh7tFDcA==}
'@push.rocks/smartai@0.11.1':
resolution: {integrity: sha512-GPOfy4h0ItHfQabfpmzmZYiSYAtXHpGdY6IJkjyW+IN10sgMQEbCpr/2u7MkgEDJHJYQZ49BWZH7/7GdtSxwrg==}
'@push.rocks/smartarchive@4.2.4':
resolution: {integrity: sha512-uiqVAXPxmr8G5rv3uZvZFMOCt8l7cZC3nzvsy4YQqKf/VkPhKIEX+b7LkAeNlxPSYUiBQUkNRoawg9+5BaMcHg==}
@@ -5172,7 +5172,7 @@ snapshots:
'@push.rocks/smartlog': 3.1.10
'@push.rocks/smartpath': 6.0.0
'@push.rocks/smartai@0.11.0(typescript@5.9.3)(ws@8.18.3)(zod@3.25.76)':
'@push.rocks/smartai@0.11.1(typescript@5.9.3)(ws@8.18.3)(zod@3.25.76)':
dependencies:
'@anthropic-ai/sdk': 0.71.2(zod@3.25.76)
'@mistralai/mistralai': 1.12.0

View File

@@ -10,6 +10,8 @@ export interface IDriverAgentOptions {
systemMessage?: string;
/** Maximum history messages to pass to API (default: 20). Set to 0 for unlimited. */
maxHistoryMessages?: number;
/** Callback fired for each token during LLM generation */
onToken?: (token: string) => void;
}
/**
@@ -22,6 +24,7 @@ export class DriverAgent {
private maxHistoryMessages: number;
private messageHistory: plugins.smartai.ChatMessage[] = [];
private tools: Map<string, BaseToolWrapper> = new Map();
private onToken?: (token: string) => void;
constructor(
provider: plugins.smartai.MultiModalModel,
@@ -36,9 +39,18 @@ export class DriverAgent {
} else {
this.systemMessage = options?.systemMessage || this.getDefaultSystemMessage();
this.maxHistoryMessages = options?.maxHistoryMessages ?? 20;
this.onToken = options?.onToken;
}
}
/**
* Set the token callback for streaming mode
* @param callback Function to call for each generated token
*/
public setOnToken(callback: (token: string) => void): void {
this.onToken = callback;
}
/**
* Register a tool for use by the driver
*/
@@ -85,12 +97,25 @@ export class DriverAgent {
fullSystemMessage = this.getNoToolsSystemMessage();
}
// Get response from provider
const response = await this.provider.chat({
systemMessage: fullSystemMessage,
userMessage: userMessage,
messageHistory: [],
});
// Get response from provider - use streaming if available and callback is set
let response: plugins.smartai.ChatResponse;
if (this.onToken && typeof (this.provider as any).chatStreaming === 'function') {
// Use streaming mode with token callback
response = await (this.provider as any).chatStreaming({
systemMessage: fullSystemMessage,
userMessage: userMessage,
messageHistory: [],
onToken: this.onToken,
});
} else {
// Fallback to non-streaming mode
response = await this.provider.chat({
systemMessage: fullSystemMessage,
userMessage: userMessage,
messageHistory: [],
});
}
// Add assistant response to history
this.messageHistory.push({
@@ -139,11 +164,25 @@ export class DriverAgent {
historyForChat = fullHistory;
}
const response = await this.provider.chat({
systemMessage: fullSystemMessage,
userMessage: message,
messageHistory: historyForChat,
});
// Get response from provider - use streaming if available and callback is set
let response: plugins.smartai.ChatResponse;
if (this.onToken && typeof (this.provider as any).chatStreaming === 'function') {
// Use streaming mode with token callback
response = await (this.provider as any).chatStreaming({
systemMessage: fullSystemMessage,
userMessage: message,
messageHistory: historyForChat,
onToken: this.onToken,
});
} else {
// Fallback to non-streaming mode
response = await this.provider.chat({
systemMessage: fullSystemMessage,
userMessage: message,
messageHistory: historyForChat,
});
}
// Add assistant response to history
this.messageHistory.push({

View File

@@ -181,9 +181,15 @@ export class DualAgentOrchestrator {
: this.driverProvider;
// NOW create agents with initialized providers
// Set up token callback wrapper if streaming is enabled
const driverOnToken = this.options.onToken
? (token: string) => this.options.onToken!(token, 'driver')
: undefined;
this.driver = new DriverAgent(this.driverProvider, {
systemMessage: this.options.driverSystemMessage,
maxHistoryMessages: this.options.maxHistoryMessages,
onToken: driverOnToken,
});
this.guardian = new GuardianAgent(this.guardianProvider, this.options.guardianPolicyPrompt);

View File

@@ -34,6 +34,8 @@ export interface IDualAgentOptions extends plugins.smartai.ISmartAiOptions {
onProgress?: (event: IProgressEvent) => void;
/** Prefix for log messages (e.g., "[README]", "[Commit]"). Default: empty */
logPrefix?: string;
/** Callback fired for each token during LLM generation (streaming mode) */
onToken?: (token: string, source: 'driver' | 'guardian') => void;
}
// ================================