Files
smartai/test/test.ollama.ts

391 lines
13 KiB
TypeScript
Raw Permalink Normal View History

import { tap, expect } from '@git.zone/tstest/tapbundle';
import { createOllamaModel } from '../ts/smartai.provider.ollama.js';
import type { ISmartAiOptions } from '../ts/smartai.interfaces.js';
tap.test('createOllamaModel returns valid LanguageModelV3', async () => {
const model = createOllamaModel({
provider: 'ollama',
model: 'qwen3:8b',
ollamaOptions: { think: true, num_ctx: 4096 },
});
expect(model.specificationVersion).toEqual('v3');
expect(model.provider).toEqual('ollama');
expect(model.modelId).toEqual('qwen3:8b');
expect(model).toHaveProperty('doGenerate');
expect(model).toHaveProperty('doStream');
});
tap.test('Qwen models get default temperature 0.55', async () => {
// Mock fetch to capture the request body
const originalFetch = globalThis.fetch;
let capturedBody: Record<string, unknown> | undefined;
globalThis.fetch = async (input: RequestInfo | URL, init?: RequestInit) => {
capturedBody = JSON.parse(init?.body as string);
return new Response(JSON.stringify({
message: { content: 'test response', role: 'assistant' },
done: true,
prompt_eval_count: 10,
eval_count: 5,
}), { status: 200 });
};
try {
const model = createOllamaModel({
provider: 'ollama',
model: 'qwen3:8b',
});
await model.doGenerate({
prompt: [{ role: 'user', content: [{ type: 'text', text: 'hello' }] }],
inputFormat: 'prompt',
} as any);
expect(capturedBody).toBeTruthy();
// Temperature 0.55 should be in the options
expect((capturedBody!.options as Record<string, unknown>).temperature).toEqual(0.55);
} finally {
globalThis.fetch = originalFetch;
}
});
tap.test('think option is passed at top level of request body', async () => {
const originalFetch = globalThis.fetch;
let capturedBody: Record<string, unknown> | undefined;
globalThis.fetch = async (input: RequestInfo | URL, init?: RequestInit) => {
capturedBody = JSON.parse(init?.body as string);
return new Response(JSON.stringify({
message: { content: 'test', role: 'assistant', thinking: 'let me think...' },
done: true,
prompt_eval_count: 10,
eval_count: 5,
}), { status: 200 });
};
try {
const model = createOllamaModel({
provider: 'ollama',
model: 'qwen3:8b',
ollamaOptions: { think: true, num_ctx: 4096 },
});
await model.doGenerate({
prompt: [{ role: 'user', content: [{ type: 'text', text: 'hello' }] }],
inputFormat: 'prompt',
} as any);
expect(capturedBody).toBeTruthy();
// think should be at top level, not inside options
expect(capturedBody!.think).toEqual(true);
// num_ctx should be in options
expect((capturedBody!.options as Record<string, unknown>).num_ctx).toEqual(4096);
} finally {
globalThis.fetch = originalFetch;
}
});
tap.test('Non-qwen models do not get default temperature', async () => {
const originalFetch = globalThis.fetch;
let capturedBody: Record<string, unknown> | undefined;
globalThis.fetch = async (input: RequestInfo | URL, init?: RequestInit) => {
capturedBody = JSON.parse(init?.body as string);
return new Response(JSON.stringify({
message: { content: 'test', role: 'assistant' },
done: true,
}), { status: 200 });
};
try {
const model = createOllamaModel({
provider: 'ollama',
model: 'llama3:8b',
});
await model.doGenerate({
prompt: [{ role: 'user', content: [{ type: 'text', text: 'hello' }] }],
inputFormat: 'prompt',
} as any);
expect(capturedBody).toBeTruthy();
// No temperature should be set
expect((capturedBody!.options as Record<string, unknown>).temperature).toBeUndefined();
} finally {
globalThis.fetch = originalFetch;
}
});
tap.test('doGenerate parses reasoning/thinking from response', async () => {
const originalFetch = globalThis.fetch;
globalThis.fetch = async (input: RequestInfo | URL, init?: RequestInit) => {
return new Response(JSON.stringify({
message: {
content: 'The answer is 42.',
role: 'assistant',
thinking: 'Let me reason about this carefully...',
},
done: true,
prompt_eval_count: 20,
eval_count: 15,
}), { status: 200 });
};
try {
const model = createOllamaModel({
provider: 'ollama',
model: 'qwen3:8b',
ollamaOptions: { think: true },
});
const result = await model.doGenerate({
prompt: [{ role: 'user', content: [{ type: 'text', text: 'What is the meaning of life?' }] }],
} as any);
// Should have both reasoning and text content
const reasoningParts = result.content.filter(c => c.type === 'reasoning');
const textParts = result.content.filter(c => c.type === 'text');
expect(reasoningParts.length).toEqual(1);
expect((reasoningParts[0] as any).text).toEqual('Let me reason about this carefully...');
expect(textParts.length).toEqual(1);
expect((textParts[0] as any).text).toEqual('The answer is 42.');
expect(result.finishReason.unified).toEqual('stop');
} finally {
globalThis.fetch = originalFetch;
}
});
tap.test('doGenerate parses tool calls from response', async () => {
const originalFetch = globalThis.fetch;
globalThis.fetch = async (input: RequestInfo | URL, init?: RequestInit) => {
return new Response(JSON.stringify({
message: {
content: '',
role: 'assistant',
tool_calls: [
{
function: {
name: 'get_weather',
arguments: { location: 'London', unit: 'celsius' },
},
},
],
},
done: true,
prompt_eval_count: 30,
eval_count: 10,
}), { status: 200 });
};
try {
const model = createOllamaModel({
provider: 'ollama',
model: 'qwen3:8b',
});
const result = await model.doGenerate({
prompt: [{ role: 'user', content: [{ type: 'text', text: 'What is the weather in London?' }] }],
tools: [{
type: 'function' as const,
name: 'get_weather',
description: 'Get weather for a location',
inputSchema: {
type: 'object',
properties: {
location: { type: 'string' },
unit: { type: 'string' },
},
},
}],
} as any);
const toolCalls = result.content.filter(c => c.type === 'tool-call');
expect(toolCalls.length).toEqual(1);
expect((toolCalls[0] as any).toolName).toEqual('get_weather');
expect(JSON.parse((toolCalls[0] as any).input)).toEqual({ location: 'London', unit: 'celsius' });
expect(result.finishReason.unified).toEqual('tool-calls');
} finally {
globalThis.fetch = originalFetch;
}
});
tap.test('doStream produces correct stream parts', async () => {
const originalFetch = globalThis.fetch;
// Simulate Ollama's newline-delimited JSON streaming
const chunks = [
JSON.stringify({ message: { content: 'Hello', role: 'assistant' }, done: false }) + '\n',
JSON.stringify({ message: { content: ' world', role: 'assistant' }, done: false }) + '\n',
JSON.stringify({ message: { content: '!', role: 'assistant' }, done: true, prompt_eval_count: 5, eval_count: 3 }) + '\n',
];
globalThis.fetch = async (input: RequestInfo | URL, init?: RequestInit) => {
const encoder = new TextEncoder();
const stream = new ReadableStream({
start(controller) {
for (const chunk of chunks) {
controller.enqueue(encoder.encode(chunk));
}
controller.close();
},
});
return new Response(stream, { status: 200 });
};
try {
const model = createOllamaModel({
provider: 'ollama',
model: 'llama3:8b',
});
const result = await model.doStream({
prompt: [{ role: 'user', content: [{ type: 'text', text: 'hello' }] }],
} as any);
const parts: any[] = [];
const reader = result.stream.getReader();
while (true) {
const { done, value } = await reader.read();
if (done) break;
parts.push(value);
}
// Should have: text-start, text-delta x3, text-end, finish
const textDeltas = parts.filter(p => p.type === 'text-delta');
const finishParts = parts.filter(p => p.type === 'finish');
const textStarts = parts.filter(p => p.type === 'text-start');
const textEnds = parts.filter(p => p.type === 'text-end');
expect(textStarts.length).toEqual(1);
expect(textDeltas.length).toEqual(3);
expect(textDeltas.map((d: any) => d.delta).join('')).toEqual('Hello world!');
expect(textEnds.length).toEqual(1);
expect(finishParts.length).toEqual(1);
expect(finishParts[0].finishReason.unified).toEqual('stop');
} finally {
globalThis.fetch = originalFetch;
}
});
tap.test('doStream handles thinking/reasoning in stream', async () => {
const originalFetch = globalThis.fetch;
const chunks = [
JSON.stringify({ message: { thinking: 'Let me think...', content: '', role: 'assistant' }, done: false }) + '\n',
JSON.stringify({ message: { thinking: ' about this.', content: '', role: 'assistant' }, done: false }) + '\n',
JSON.stringify({ message: { content: 'The answer.', role: 'assistant' }, done: false }) + '\n',
JSON.stringify({ message: { content: '', role: 'assistant' }, done: true, prompt_eval_count: 10, eval_count: 8 }) + '\n',
];
globalThis.fetch = async (input: RequestInfo | URL, init?: RequestInit) => {
const encoder = new TextEncoder();
const stream = new ReadableStream({
start(controller) {
for (const chunk of chunks) {
controller.enqueue(encoder.encode(chunk));
}
controller.close();
},
});
return new Response(stream, { status: 200 });
};
try {
const model = createOllamaModel({
provider: 'ollama',
model: 'qwen3:8b',
ollamaOptions: { think: true },
});
const result = await model.doStream({
prompt: [{ role: 'user', content: [{ type: 'text', text: 'think about this' }] }],
} as any);
const parts: any[] = [];
const reader = result.stream.getReader();
while (true) {
const { done, value } = await reader.read();
if (done) break;
parts.push(value);
}
const reasoningStarts = parts.filter(p => p.type === 'reasoning-start');
const reasoningDeltas = parts.filter(p => p.type === 'reasoning-delta');
const reasoningEnds = parts.filter(p => p.type === 'reasoning-end');
const textDeltas = parts.filter(p => p.type === 'text-delta');
expect(reasoningStarts.length).toEqual(1);
expect(reasoningDeltas.length).toEqual(2);
expect(reasoningDeltas.map((d: any) => d.delta).join('')).toEqual('Let me think... about this.');
expect(reasoningEnds.length).toEqual(1);
expect(textDeltas.length).toEqual(1);
expect(textDeltas[0].delta).toEqual('The answer.');
} finally {
globalThis.fetch = originalFetch;
}
});
tap.test('message conversion handles system, assistant, and tool messages', async () => {
const originalFetch = globalThis.fetch;
let capturedBody: Record<string, unknown> | undefined;
globalThis.fetch = async (input: RequestInfo | URL, init?: RequestInit) => {
capturedBody = JSON.parse(init?.body as string);
return new Response(JSON.stringify({
message: { content: 'response', role: 'assistant' },
done: true,
}), { status: 200 });
};
try {
const model = createOllamaModel({
provider: 'ollama',
model: 'llama3:8b',
});
await model.doGenerate({
prompt: [
{ role: 'system', content: 'You are helpful.' },
{ role: 'user', content: [{ type: 'text', text: 'Hi' }] },
{
role: 'assistant',
content: [
{ type: 'text', text: 'Let me check.' },
{ type: 'tool-call', toolCallId: 'tc1', toolName: 'search', input: '{"q":"test"}' },
],
},
{
role: 'tool',
content: [
{ type: 'tool-result', toolCallId: 'tc1', output: { type: 'text', value: 'result data' } },
],
},
{ role: 'user', content: [{ type: 'text', text: 'What did you find?' }] },
],
} as any);
const messages = capturedBody!.messages as Array<Record<string, unknown>>;
expect(messages.length).toEqual(5);
expect(messages[0].role).toEqual('system');
expect(messages[0].content).toEqual('You are helpful.');
expect(messages[1].role).toEqual('user');
expect(messages[1].content).toEqual('Hi');
expect(messages[2].role).toEqual('assistant');
expect(messages[2].content).toEqual('Let me check.');
expect((messages[2].tool_calls as any[]).length).toEqual(1);
expect((messages[2].tool_calls as any[])[0].function.name).toEqual('search');
expect(messages[3].role).toEqual('tool');
expect(messages[3].content).toEqual('result data');
expect(messages[4].role).toEqual('user');
expect(messages[4].content).toEqual('What did you find?');
} finally {
globalThis.fetch = originalFetch;
}
});
export default tap.start();