22 Commits

Author SHA1 Message Date
c1b269f301 v1.6.2
Some checks failed
Default (tags) / security (push) Successful in 34s
Default (tags) / test (push) Failing after 35s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-20 03:56:44 +00:00
7cb970f9e2 fix(release): bump version to 1.6.2 2026-01-20 03:56:44 +00:00
1fbcf8bb8b fix(driveragent): save tool_calls in message history for native tool calling
When using native tool calling, the assistant's tool_calls must be saved
in message history. Without this, the model doesn't know it already called
a tool and loops indefinitely calling the same tool.

This fix saves tool_calls in both startTaskWithNativeTools and
continueWithNativeTools methods.

Also updates @push.rocks/smartai to v0.13.3 for tool_calls forwarding support.
2026-01-20 03:56:10 +00:00
4a8789019a v1.6.1
Some checks failed
Default (tags) / security (push) Successful in 38s
Default (tags) / test (push) Failing after 35s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-20 03:38:07 +00:00
0da85a5dcd fix(driveragent): include full message history for tool results and use a continuation prompt when invoking provider.collectStreamResponse 2026-01-20 03:38:07 +00:00
121e216eea v1.6.0
Some checks failed
Default (tags) / security (push) Successful in 33s
Default (tags) / test (push) Failing after 36s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-20 03:28:59 +00:00
eb1058bfb5 feat(smartagent): record native tool results in message history by adding optional toolName to continueWithNativeTools and passing tool identifier from DualAgent 2026-01-20 03:28:59 +00:00
ecdc125a43 v1.5.4
Some checks failed
Default (tags) / security (push) Successful in 33s
Default (tags) / test (push) Failing after 36s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-20 03:16:02 +00:00
73657be550 fix(driveragent): prevent duplicate thinking/output markers during token streaming and mark transitions 2026-01-20 03:16:02 +00:00
4e4d3c0e08 v1.5.3
Some checks failed
Default (tags) / security (push) Successful in 38s
Default (tags) / test (push) Failing after 40s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-20 03:10:53 +00:00
79efe8f6b8 fix(driveragent): prefix thinking tokens with [THINKING] when forwarding streaming chunks to onToken 2026-01-20 03:10:53 +00:00
8bcf3257e2 v1.5.2
Some checks failed
Default (tags) / security (push) Successful in 33s
Default (tags) / test (push) Failing after 35s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-20 02:54:58 +00:00
6753553394 fix(): no changes in this diff; nothing to release 2026-01-20 02:54:58 +00:00
a46dbd0da6 fix(driveragent): enable streaming for native tool calling methods 2026-01-20 02:54:45 +00:00
7379daf4c5 v1.5.1
Some checks failed
Default (tags) / security (push) Successful in 37s
Default (tags) / test (push) Failing after 35s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-20 02:45:41 +00:00
83422b4b0e fix(smartagent): bump patch version to 1.5.1 (no changes in diff) 2026-01-20 02:45:41 +00:00
4310c8086b feat(native-tools): add native tool calling support for Ollama models
- Add INativeToolCall interface for native tool call format
- Add useNativeToolCalling option to IDualAgentOptions
- Add getToolsAsJsonSchema() to convert tools to Ollama JSON Schema format
- Add parseNativeToolCalls() to convert native tool calls to proposals
- Add startTaskWithNativeTools() and continueWithNativeTools() to DriverAgent
- Update DualAgentOrchestrator to support both XML parsing and native tool calling modes

Native tool calling is more efficient for models like GPT-OSS that use Harmony format,
as it activates Ollama's built-in tool parser instead of requiring XML generation.
2026-01-20 02:44:54 +00:00
472a8ed7f8 v1.5.0
Some checks failed
Default (tags) / security (push) Successful in 37s
Default (tags) / test (push) Failing after 35s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-20 02:05:12 +00:00
44137a8710 feat(driveragent): preserve assistant reasoning in message history and update @push.rocks/smartai dependency to ^0.13.0 2026-01-20 02:05:12 +00:00
c12a6a7be9 v1.4.2
Some checks failed
Default (tags) / security (push) Successful in 38s
Default (tags) / test (push) Failing after 40s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-20 01:41:18 +00:00
49dcc7a1a1 fix(repo): no changes detected in diff 2026-01-20 01:41:18 +00:00
e649e9caab fix(driver): make tool call format instructions explicit about literal XML output
The system message now clearly states that the <tool_call> XML tags MUST
be literally written in the response, not just described. Includes examples
of CORRECT vs WRONG usage to help smaller models understand.
2026-01-20 01:40:57 +00:00
7 changed files with 541 additions and 45 deletions

View File

@@ -1,5 +1,68 @@
# Changelog
## 2026-01-20 - 1.6.2 - fix(release)
bump version to 1.6.2
- No source changes detected in the diff
- Current package.json version is 1.6.1
- Recommend a patch bump to 1.6.2 for a release
## 2026-01-20 - 1.6.1 - fix(driveragent)
include full message history for tool results and use a continuation prompt when invoking provider.collectStreamResponse
- When toolName is provided, include the full messageHistory (do not slice off the last message) so tool result messages are preserved.
- Set userMessage to a continuation prompt ('Continue with the task. The tool result has been provided above.') when handling tool results to avoid repeating the tool output.
- Keeps existing maxHistoryMessages trimming and validates provider.collectStreamResponse is available before streaming.
## 2026-01-20 - 1.6.0 - feat(smartagent)
record native tool results in message history by adding optional toolName to continueWithNativeTools and passing tool identifier from DualAgent
- continueWithNativeTools(message, toolName?) now accepts an optional toolName; when provided the message is stored with role 'tool' and includes a toolName property (cast to ChatMessage)
- DualAgent constructs a toolNameForHistory as `${proposal.toolName}_${proposal.action}` and forwards it to continueWithNativeTools in both normal and error flows
- Preserves tool-origin information in the conversation history to support native tool calling and tracking
## 2026-01-20 - 1.5.4 - fix(driveragent)
prevent duplicate thinking/output markers during token streaming and mark transitions
- Add isInThinkingMode flag to track thinking vs output state
- Emit "\n[THINKING] " only when transitioning into thinking mode (avoids repeated thinking markers)
- Emit "\n[OUTPUT] " when transitioning out of thinking mode to mark content output
- Reset thinking state after response completes to ensure correct markers for subsequent responses
- Applied the same streaming marker logic to both response handling paths
## 2026-01-20 - 1.5.3 - fix(driveragent)
prefix thinking tokens with [THINKING] when forwarding streaming chunks to onToken
- Wraps chunk.thinking with '[THINKING] ' before calling onToken to mark thinking tokens
- Forwards chunk.content unchanged
- Change applied in ts/smartagent.classes.driveragent.ts for both initial and subsequent assistant streaming responses
- No API signature changes; only the token payloads sent to onToken are altered
## 2026-01-20 - 1.5.2 - fix()
no changes in this diff; nothing to release
- No files changed; no release required
- No code or dependency changes detected
## 2026-01-20 - 1.5.1 - fix(smartagent)
bump patch version to 1.5.1 (no changes in diff)
- No code changes detected in the provided diff
- Current package.json version is 1.5.0
- Recommended semantic version bump: patch -> 1.5.1
## 2026-01-20 - 1.5.0 - feat(driveragent)
preserve assistant reasoning in message history and update @push.rocks/smartai dependency to ^0.13.0
- Store response.reasoning in messageHistory for assistant responses (two places in driveragent)
- Bump dependency @push.rocks/smartai from ^0.12.0 to ^0.13.0
## 2026-01-20 - 1.4.2 - fix(repo)
no changes detected in diff
- No files changed in diff; no code or metadata updates were made.
- No version bump required.
## 2026-01-20 - 1.4.1 - fix()
no changes detected (empty diff)

View File

@@ -1,6 +1,6 @@
{
"name": "@push.rocks/smartagent",
"version": "1.4.1",
"version": "1.6.2",
"private": false,
"description": "an agentic framework built on top of @push.rocks/smartai",
"main": "dist_ts/index.js",
@@ -21,7 +21,7 @@
"@types/node": "^25.0.2"
},
"dependencies": {
"@push.rocks/smartai": "^0.12.0",
"@push.rocks/smartai": "^0.13.3",
"@push.rocks/smartbrowser": "^2.0.8",
"@push.rocks/smartdeno": "^1.2.0",
"@push.rocks/smartfs": "^1.2.0",

28
pnpm-lock.yaml generated
View File

@@ -9,8 +9,8 @@ importers:
.:
dependencies:
'@push.rocks/smartai':
specifier: ^0.12.0
version: 0.12.0(typescript@5.9.3)(ws@8.18.3)(zod@3.25.76)
specifier: ^0.13.3
version: 0.13.3(typescript@5.9.3)(ws@8.18.3)(zod@3.25.76)
'@push.rocks/smartbrowser':
specifier: ^2.0.8
version: 2.0.8(typescript@5.9.3)
@@ -243,6 +243,10 @@ packages:
resolution: {integrity: sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ==}
engines: {node: '>=6.9.0'}
'@babel/runtime@7.28.6':
resolution: {integrity: sha512-05WQkdpL9COIMz4LjTxGpPNCdlpyimKppYNoJ5Di5EUObifl8t4tuLuUBBZEpoLYOmfvIWrsp9fCl0HoPRVTdA==}
engines: {node: '>=6.9.0'}
'@borewit/text-codec@0.1.1':
resolution: {integrity: sha512-5L/uBxmjaCIX5h8Z+uu+kA9BQLkc/Wl06UGR5ajNRxu+/XjonB5i8JpgFMrPj3LXTCPA0pv8yxUvbUi+QthGGA==}
@@ -267,6 +271,9 @@ packages:
'@emnapi/runtime@1.7.1':
resolution: {integrity: sha512-PVtJr5CmLwYAU9PZDMITZoR5iAOShYREoR45EyyLrbntV50mdePTgUn4AmOw90Ifcj+x2kRjdzr1HP3RrNiHGA==}
'@emnapi/runtime@1.8.1':
resolution: {integrity: sha512-mehfKSMWjjNol8659Z8KxEMrdSJDDot5SXMq00dM8BN4o+CLNXQ0xH2V7EchNHV4RmbZLmmPdEaXZc5H2FXmDg==}
'@emnapi/wasi-threads@1.1.0':
resolution: {integrity: sha512-WI0DdZ8xFSbgMjR1sFsKABJ/C5OnRrjT06JXbZKexJGrDuPTzZdDYfFlsgcCXCyf+suG5QU2e/y1Wo2V/OapLQ==}
@@ -837,8 +844,8 @@ packages:
'@push.rocks/qenv@6.1.3':
resolution: {integrity: sha512-+z2hsAU/7CIgpYLFqvda8cn9rUBMHqLdQLjsFfRn5jPoD7dJ5rFlpkbhfM4Ws8mHMniwWaxGKo+q/YBhtzRBLg==}
'@push.rocks/smartai@0.12.0':
resolution: {integrity: sha512-T4HRaSSxO6TQGGXlQeswX2eYkB+gMu0FbKF9qCUri6FdRlYzmPDn19jgPrPJxyg5m3oj6TzflvfYwcBCFlWo/A==}
'@push.rocks/smartai@0.13.3':
resolution: {integrity: sha512-VDZzHs101hpGMmUaectuLfcME4kHpuOS7o5ffuGk5lYl383foyAN71+5v441jpk/gLDNf2KhDACR/d2O4n90Ag==}
'@push.rocks/smartarchive@4.2.4':
resolution: {integrity: sha512-uiqVAXPxmr8G5rv3uZvZFMOCt8l7cZC3nzvsy4YQqKf/VkPhKIEX+b7LkAeNlxPSYUiBQUkNRoawg9+5BaMcHg==}
@@ -4331,6 +4338,8 @@ snapshots:
'@babel/runtime@7.28.4': {}
'@babel/runtime@7.28.6': {}
'@borewit/text-codec@0.1.1': {}
'@cloudflare/workers-types@4.20251202.0': {}
@@ -4395,6 +4404,11 @@ snapshots:
tslib: 2.8.1
optional: true
'@emnapi/runtime@1.8.1':
dependencies:
tslib: 2.8.1
optional: true
'@emnapi/wasi-threads@1.1.0':
dependencies:
tslib: 2.8.1
@@ -4682,7 +4696,7 @@ snapshots:
'@img/sharp-wasm32@0.34.5':
dependencies:
'@emnapi/runtime': 1.7.1
'@emnapi/runtime': 1.8.1
optional: true
'@img/sharp-win32-arm64@0.34.5':
@@ -5158,7 +5172,7 @@ snapshots:
'@push.rocks/smartlog': 3.1.10
'@push.rocks/smartpath': 6.0.0
'@push.rocks/smartai@0.12.0(typescript@5.9.3)(ws@8.18.3)(zod@3.25.76)':
'@push.rocks/smartai@0.13.3(typescript@5.9.3)(ws@8.18.3)(zod@3.25.76)':
dependencies:
'@anthropic-ai/sdk': 0.71.2(zod@3.25.76)
'@mistralai/mistralai': 1.12.0
@@ -7588,7 +7602,7 @@ snapshots:
json-schema-to-ts@3.1.1:
dependencies:
'@babel/runtime': 7.28.4
'@babel/runtime': 7.28.6
ts-algebra: 2.0.0
jsonfile@6.2.0:

View File

@@ -3,6 +3,6 @@
*/
export const commitinfo = {
name: '@push.rocks/smartagent',
version: '1.4.1',
version: '1.6.2',
description: 'an agentic framework built on top of @push.rocks/smartai'
}

View File

@@ -25,6 +25,7 @@ export class DriverAgent {
private messageHistory: plugins.smartai.ChatMessage[] = [];
private tools: Map<string, BaseToolWrapper> = new Map();
private onToken?: (token: string) => void;
private isInThinkingMode = false; // Track thinking/content state for markers
constructor(
provider: plugins.smartai.MultiModalModel,
@@ -121,10 +122,11 @@ export class DriverAgent {
});
}
// Add assistant response to history (store images if provided)
// Add assistant response to history (store images if provided, preserve reasoning for GPT-OSS)
const historyMessage: plugins.smartai.ChatMessage = {
role: 'assistant',
content: response.message,
reasoning: response.reasoning,
};
this.messageHistory.push(historyMessage);
@@ -189,10 +191,11 @@ export class DriverAgent {
});
}
// Add assistant response to history
// Add assistant response to history (preserve reasoning for GPT-OSS)
this.messageHistory.push({
role: 'assistant',
content: response.message,
reasoning: response.reasoning,
});
return {
@@ -375,33 +378,33 @@ export class DriverAgent {
## Your Role
You analyze tasks, break them down into steps, and use tools to accomplish goals.
## Tool Usage Format
When you need to use a tool, output a tool call proposal in this format:
## CRITICAL: Tool Usage Format
To use a tool, you MUST literally write out the XML tags in your response. The system parses your output looking for these exact tags. Do NOT just describe or mention the tool call - you must OUTPUT the actual XML.
CORRECT (the XML is in the output):
<tool_call>
<tool>tool_name</tool>
<action>action_name</action>
<params>
{"param1": "value1", "param2": "value2"}
</params>
<reasoning>Brief explanation of why this action is needed</reasoning>
<tool>json</tool>
<action>validate</action>
<params>{"jsonString": "{\\"key\\":\\"value\\"}"}</params>
</tool_call>
WRONG (just describing, no actual XML):
"I will call json.validate now" or "Let me use the tool"
## Guidelines
1. Think step by step about what needs to be done
2. Use only the tools that are available to you
3. Provide clear reasoning for each tool call
4. If a tool call is rejected, adapt your approach based on the feedback
5. When the task is complete, indicate this clearly:
2. When you need a tool, OUTPUT the <tool_call> XML tags - do not just mention them
3. Only propose ONE tool call at a time
4. Wait for the result before proposing the next action
5. When the task is complete, OUTPUT:
<task_complete>
Brief summary of what was accomplished
Your final result here
</task_complete>
## Important
- Only propose ONE tool call at a time
- Wait for the result before proposing the next action
- If you encounter an error, analyze it and try an alternative approach
- The <tool_call> and <task_complete> tags MUST appear literally in your response
- If you just say "I'll call the tool" without the actual XML, it will NOT work
- If you need clarification, ask using <needs_clarification>your question</needs_clarification>`;
}
@@ -440,4 +443,333 @@ Your complete output here
public reset(): void {
this.messageHistory = [];
}
// ================================
// Native Tool Calling Support
// ================================
/**
* Start a task with native tool calling support
* Uses Ollama's native tool calling API instead of XML parsing
* @param task The task description
* @param images Optional base64-encoded images for vision tasks
* @returns Response with content, reasoning, and any tool calls
*/
public async startTaskWithNativeTools(
task: string,
images?: string[]
): Promise<{ message: interfaces.IAgentMessage; toolCalls?: interfaces.INativeToolCall[] }> {
// Reset message history
this.messageHistory = [];
// Build simple user message (no XML instructions needed for native tool calling)
const userMessage = `TASK: ${task}\n\nComplete this task using the available tools. When done, provide your final output.`;
// Add to history
this.messageHistory.push({
role: 'user',
content: userMessage,
});
// Build system message for native tool calling
const fullSystemMessage = this.getNativeToolsSystemMessage();
// Get tools in JSON schema format
const tools = this.getToolsAsJsonSchema();
// Check if provider supports native tool calling (Ollama)
const provider = this.provider as any;
if (typeof provider.collectStreamResponse !== 'function') {
throw new Error('Provider does not support native tool calling. Use startTask() instead.');
}
// Use collectStreamResponse for streaming support with tools
const response = await provider.collectStreamResponse(
{
systemMessage: fullSystemMessage,
userMessage: userMessage,
messageHistory: [],
images: images,
tools: tools.length > 0 ? tools : undefined,
},
// Pass onToken callback through onChunk for streaming with thinking markers
this.onToken ? (chunk: any) => {
if (chunk.thinking && this.onToken) {
// Add marker only when transitioning INTO thinking mode
if (!this.isInThinkingMode) {
this.onToken('\n[THINKING] ');
this.isInThinkingMode = true;
}
this.onToken(chunk.thinking);
}
if (chunk.content && this.onToken) {
// Add marker when transitioning OUT of thinking mode
if (this.isInThinkingMode) {
this.onToken('\n[OUTPUT] ');
this.isInThinkingMode = false;
}
this.onToken(chunk.content);
}
} : undefined
);
// Reset thinking state after response completes
this.isInThinkingMode = false;
// Add assistant response to history
const historyMessage: any = {
role: 'assistant',
content: response.message || '',
reasoning: response.thinking || response.reasoning,
};
// CRITICAL: Preserve tool_calls in history for native tool calling
// Without this, the model doesn't know it already called a tool and loops forever
if (response.toolCalls && response.toolCalls.length > 0) {
historyMessage.tool_calls = response.toolCalls.map((tc: any) => ({
function: {
name: tc.function.name,
arguments: tc.function.arguments,
},
}));
}
this.messageHistory.push(historyMessage as unknown as plugins.smartai.ChatMessage);
// Convert Ollama tool calls to our format
let toolCalls: interfaces.INativeToolCall[] | undefined;
if (response.toolCalls && response.toolCalls.length > 0) {
toolCalls = response.toolCalls.map((tc: any) => ({
function: {
name: tc.function.name,
arguments: tc.function.arguments,
index: tc.function.index,
},
}));
}
return {
message: {
role: 'assistant',
content: response.message || '',
},
toolCalls,
};
}
/**
* Continue conversation with native tool calling support
* @param message The message to continue with (e.g., tool result)
* @param toolName Optional tool name - when provided, message is added as role: 'tool' instead of 'user'
* @returns Response with content, reasoning, and any tool calls
*/
public async continueWithNativeTools(
message: string,
toolName?: string
): Promise<{ message: interfaces.IAgentMessage; toolCalls?: interfaces.INativeToolCall[] }> {
// Add the new message to history
if (toolName) {
// Tool result - must use role: 'tool' for native tool calling
// The 'tool' role is supported by providers but not in the ChatMessage type
this.messageHistory.push({
role: 'tool',
content: message,
toolName: toolName,
} as unknown as plugins.smartai.ChatMessage);
} else {
// Regular user message
this.messageHistory.push({
role: 'user',
content: message,
});
}
// Build system message
const fullSystemMessage = this.getNativeToolsSystemMessage();
// Get tools in JSON schema format
const tools = this.getToolsAsJsonSchema();
// Get response from provider with history windowing
// For tool results, include the full history (with tool message)
// For regular user messages, exclude the last message (it becomes userMessage)
let historyForChat: plugins.smartai.ChatMessage[];
const fullHistory = toolName
? this.messageHistory // Include tool result in history
: this.messageHistory.slice(0, -1); // Exclude last user message
if (this.maxHistoryMessages > 0 && fullHistory.length > this.maxHistoryMessages) {
historyForChat = [
fullHistory[0],
...fullHistory.slice(-(this.maxHistoryMessages - 1)),
];
} else {
historyForChat = fullHistory;
}
// Check if provider supports native tool calling
const provider = this.provider as any;
if (typeof provider.collectStreamResponse !== 'function') {
throw new Error('Provider does not support native tool calling. Use continueWithMessage() instead.');
}
// For tool results, use a continuation prompt instead of repeating the result
const userMessage = toolName
? 'Continue with the task. The tool result has been provided above.'
: message;
// Use collectStreamResponse for streaming support with tools
const response = await provider.collectStreamResponse(
{
systemMessage: fullSystemMessage,
userMessage: userMessage,
messageHistory: historyForChat,
tools: tools.length > 0 ? tools : undefined,
},
// Pass onToken callback through onChunk for streaming with thinking markers
this.onToken ? (chunk: any) => {
if (chunk.thinking && this.onToken) {
// Add marker only when transitioning INTO thinking mode
if (!this.isInThinkingMode) {
this.onToken('\n[THINKING] ');
this.isInThinkingMode = true;
}
this.onToken(chunk.thinking);
}
if (chunk.content && this.onToken) {
// Add marker when transitioning OUT of thinking mode
if (this.isInThinkingMode) {
this.onToken('\n[OUTPUT] ');
this.isInThinkingMode = false;
}
this.onToken(chunk.content);
}
} : undefined
);
// Reset thinking state after response completes
this.isInThinkingMode = false;
// Add assistant response to history
const historyMessage: any = {
role: 'assistant',
content: response.message || '',
reasoning: response.thinking || response.reasoning,
};
// CRITICAL: Preserve tool_calls in history for native tool calling
// Without this, the model doesn't know it already called a tool and loops forever
if (response.toolCalls && response.toolCalls.length > 0) {
historyMessage.tool_calls = response.toolCalls.map((tc: any) => ({
function: {
name: tc.function.name,
arguments: tc.function.arguments,
},
}));
}
this.messageHistory.push(historyMessage as unknown as plugins.smartai.ChatMessage);
// Convert Ollama tool calls to our format
let toolCalls: interfaces.INativeToolCall[] | undefined;
if (response.toolCalls && response.toolCalls.length > 0) {
toolCalls = response.toolCalls.map((tc: any) => ({
function: {
name: tc.function.name,
arguments: tc.function.arguments,
index: tc.function.index,
},
}));
}
return {
message: {
role: 'assistant',
content: response.message || '',
},
toolCalls,
};
}
/**
* Get system message for native tool calling mode
* Simplified prompt that lets the model use tools naturally
*/
private getNativeToolsSystemMessage(): string {
return `You are an AI assistant that executes tasks by using available tools.
## Your Role
You analyze tasks, break them down into steps, and use tools to accomplish goals.
## Guidelines
1. Think step by step about what needs to be done
2. Use the available tools to complete the task
3. Process tool results and continue until the task is complete
4. When the task is complete, provide a final summary
## Important
- Use tools when needed to gather information or perform actions
- If you need clarification, ask the user
- Always verify your work before marking the task complete`;
}
/**
* Convert registered tools to Ollama JSON Schema format for native tool calling
* Each tool action becomes a separate function with name format: "toolName_actionName"
* @returns Array of IOllamaTool compatible tool definitions
*/
public getToolsAsJsonSchema(): plugins.smartai.IOllamaTool[] {
const tools: plugins.smartai.IOllamaTool[] = [];
for (const tool of this.tools.values()) {
for (const action of tool.actions) {
// Build the tool definition in Ollama format
const toolDef: plugins.smartai.IOllamaTool = {
type: 'function',
function: {
name: `${tool.name}_${action.name}`, // e.g., "json_validate"
description: `[${tool.name}] ${action.description}`,
parameters: action.parameters as plugins.smartai.IOllamaTool['function']['parameters'],
},
};
tools.push(toolDef);
}
}
return tools;
}
/**
* Parse native tool calls from provider response into IToolCallProposal format
* @param toolCalls Array of native tool calls from the provider
* @returns Array of IToolCallProposal ready for execution
*/
public parseNativeToolCalls(
toolCalls: interfaces.INativeToolCall[]
): interfaces.IToolCallProposal[] {
return toolCalls.map(tc => {
// Split "json_validate" -> toolName="json", action="validate"
const fullName = tc.function.name;
const underscoreIndex = fullName.indexOf('_');
let toolName: string;
let action: string;
if (underscoreIndex > 0) {
toolName = fullName.substring(0, underscoreIndex);
action = fullName.substring(underscoreIndex + 1);
} else {
// Fallback: treat entire name as tool name with empty action
toolName = fullName;
action = '';
}
return {
proposalId: this.generateProposalId(),
toolName,
action,
params: tc.function.arguments,
};
});
}
}

View File

@@ -242,12 +242,18 @@ export class DualAgentOrchestrator {
throw new Error('Orchestrator not started. Call start() first.');
}
// Use native tool calling if enabled
const useNativeTools = this.options.useNativeToolCalling === true;
this.conversationHistory = [];
let iterations = 0;
let consecutiveRejections = 0;
let completed = false;
let finalResult: string | null = null;
// Track pending native tool calls
let pendingNativeToolCalls: interfaces.INativeToolCall[] | undefined;
// Extract images from options
const images = options?.images;
@@ -258,7 +264,17 @@ export class DualAgentOrchestrator {
});
// Start the driver with the task and optional images
let driverResponse = await this.driver.startTask(task, images);
let driverResponse: interfaces.IAgentMessage;
if (useNativeTools) {
// Native tool calling mode
const result = await this.driver.startTaskWithNativeTools(task, images);
driverResponse = result.message;
pendingNativeToolCalls = result.toolCalls;
} else {
// XML parsing mode
driverResponse = await this.driver.startTask(task, images);
}
this.conversationHistory.push(driverResponse);
// Emit task started event
@@ -281,10 +297,16 @@ export class DualAgentOrchestrator {
maxIterations: this.options.maxIterations,
});
// Check if task is complete
if (this.driver.isTaskComplete(driverResponse.content)) {
// Check if task is complete (for native mode, no pending tool calls and has content)
const isComplete = useNativeTools
? (!pendingNativeToolCalls || pendingNativeToolCalls.length === 0) && driverResponse.content.length > 0
: this.driver.isTaskComplete(driverResponse.content);
if (isComplete) {
completed = true;
finalResult = this.driver.extractTaskResult(driverResponse.content) || driverResponse.content;
finalResult = useNativeTools
? driverResponse.content
: (this.driver.extractTaskResult(driverResponse.content) || driverResponse.content);
// Emit task completed event
this.emitProgress({
@@ -315,13 +337,34 @@ export class DualAgentOrchestrator {
};
}
// Parse tool call proposals
const proposals = this.driver.parseToolCallProposals(driverResponse.content);
// Parse tool call proposals - native mode uses pendingNativeToolCalls, XML mode parses content
let proposals: interfaces.IToolCallProposal[];
if (useNativeTools && pendingNativeToolCalls && pendingNativeToolCalls.length > 0) {
// Native tool calling mode - convert native tool calls to proposals
proposals = this.driver.parseNativeToolCalls(pendingNativeToolCalls);
pendingNativeToolCalls = undefined; // Clear after processing
} else if (!useNativeTools) {
// XML parsing mode
proposals = this.driver.parseToolCallProposals(driverResponse.content);
} else {
proposals = [];
}
if (proposals.length === 0) {
// No tool calls found - remind the model of the exact XML format
driverResponse = await this.driver.continueWithMessage(
`No valid tool call was found in your response. To use a tool, you MUST output the exact XML format:
if (useNativeTools) {
// Native mode: no tool calls and no content means we should continue
const result = await this.driver.continueWithNativeTools(
'Please continue with the task. Use the available tools or provide your final output.'
);
driverResponse = result.message;
pendingNativeToolCalls = result.toolCalls;
this.conversationHistory.push(driverResponse);
continue;
} else {
// XML mode: remind the model of the exact XML format
driverResponse = await this.driver.continueWithMessage(
`No valid tool call was found in your response. To use a tool, you MUST output the exact XML format:
<tool_call>
<tool>tool_name</tool>
@@ -340,9 +383,10 @@ Or to complete the task:
<task_complete>your final JSON output here</task_complete>
Please output the exact XML format above.`
);
this.conversationHistory.push(driverResponse);
continue;
);
this.conversationHistory.push(driverResponse);
continue;
}
}
// Process the first proposal (one at a time)
@@ -449,13 +493,31 @@ Please output the exact XML format above.`
toolResult: result,
});
driverResponse = await this.driver.continueWithMessage(resultMessage);
// Continue with appropriate method based on mode
if (useNativeTools) {
const toolNameForHistory = `${proposal.toolName}_${proposal.action}`;
const continueResult = await this.driver.continueWithNativeTools(resultMessage, toolNameForHistory);
driverResponse = continueResult.message;
pendingNativeToolCalls = continueResult.toolCalls;
} else {
driverResponse = await this.driver.continueWithMessage(resultMessage);
}
this.conversationHistory.push(driverResponse);
} catch (error) {
const errorMessage = `Tool execution failed: ${error instanceof Error ? error.message : String(error)}`;
driverResponse = await this.driver.continueWithMessage(
`TOOL ERROR: ${errorMessage}\n\nPlease try a different approach.`
);
if (useNativeTools) {
const toolNameForHistory = `${proposal.toolName}_${proposal.action}`;
const continueResult = await this.driver.continueWithNativeTools(
`TOOL ERROR: ${errorMessage}\n\nPlease try a different approach.`,
toolNameForHistory
);
driverResponse = continueResult.message;
pendingNativeToolCalls = continueResult.toolCalls;
} else {
driverResponse = await this.driver.continueWithMessage(
`TOOL ERROR: ${errorMessage}\n\nPlease try a different approach.`
);
}
this.conversationHistory.push(driverResponse);
}
} else {
@@ -492,7 +554,14 @@ Please output the exact XML format above.`
guardianDecision: decision,
});
driverResponse = await this.driver.continueWithMessage(feedback);
// Continue with appropriate method based on mode
if (useNativeTools) {
const continueResult = await this.driver.continueWithNativeTools(feedback);
driverResponse = continueResult.message;
pendingNativeToolCalls = continueResult.toolCalls;
} else {
driverResponse = await this.driver.continueWithMessage(feedback);
}
this.conversationHistory.push(driverResponse);
}
}

View File

@@ -48,6 +48,12 @@ export interface IDualAgentOptions extends plugins.smartai.ISmartAiOptions {
logPrefix?: string;
/** Callback fired for each token during LLM generation (streaming mode) */
onToken?: (token: string, source: 'driver' | 'guardian') => void;
/**
* Enable native tool calling mode (default: false)
* When enabled, uses Ollama's native tool calling API instead of XML parsing
* This is more efficient for models that support it (e.g., GPT-OSS with Harmony format)
*/
useNativeToolCalling?: boolean;
}
// ================================
@@ -83,6 +89,18 @@ export interface IToolAction {
parameters: Record<string, unknown>;
}
/**
* Native tool call from provider (matches Ollama's tool calling format)
* Format: function name is "toolName_actionName" (e.g., "json_validate")
*/
export interface INativeToolCall {
function: {
name: string; // Format: "toolName_actionName"
arguments: Record<string, unknown>;
index?: number;
};
}
/**
* Proposed tool call from the Driver
*/