Files
smartai/test/test.smartai.ts
T

261 lines
7.1 KiB
TypeScript

import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as qenv from '@push.rocks/qenv';
import { simulateReadableStream } from 'ai';
import { MockLanguageModelV3 } from 'ai/test';
import * as smartai from '../ts/index.js';
const testQenv = new qenv.Qenv('./', './.nogit/');
tap.test('getModel should return a LanguageModelV3 for anthropic', async () => {
const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN');
if (!apiKey) {
console.log('ANTHROPIC_TOKEN not set, skipping test');
return;
}
const model = smartai.getModel({
provider: 'anthropic',
model: 'claude-sonnet-4-5-20250929',
apiKey,
});
expect(model).toHaveProperty('specificationVersion');
expect(model).toHaveProperty('provider');
expect(model).toHaveProperty('modelId');
expect(model).toHaveProperty('doGenerate');
expect(model).toHaveProperty('doStream');
});
tap.test('getModel with anthropic prompt caching returns wrapped model', async () => {
const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN');
if (!apiKey) {
console.log('ANTHROPIC_TOKEN not set, skipping test');
return;
}
// Default: prompt caching enabled
const model = smartai.getModel({
provider: 'anthropic',
model: 'claude-sonnet-4-5-20250929',
apiKey,
});
// With caching disabled
const modelNoCaching = smartai.getModel({
provider: 'anthropic',
model: 'claude-sonnet-4-5-20250929',
apiKey,
promptCaching: false,
});
// Both should be valid models
expect(model).toHaveProperty('doGenerate');
expect(modelNoCaching).toHaveProperty('doGenerate');
});
tap.test('generateText with anthropic model', async () => {
const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN');
if (!apiKey) {
console.log('ANTHROPIC_TOKEN not set, skipping test');
return;
}
const model = smartai.getModel({
provider: 'anthropic',
model: 'claude-sonnet-4-5-20250929',
apiKey,
});
const result = await smartai.generateText({
model,
prompt: 'Say hello in exactly 3 words.',
});
console.log('Anthropic response:', result.text);
expect(result.text).toBeTruthy();
expect(result.text.length).toBeGreaterThan(0);
});
tap.test('getModel should return a LanguageModelV3 for openai', async () => {
const apiKey = await testQenv.getEnvVarOnDemand('OPENAI_TOKEN');
if (!apiKey) {
console.log('OPENAI_TOKEN not set, skipping test');
return;
}
const model = smartai.getModel({
provider: 'openai',
model: 'gpt-4o-mini',
apiKey,
});
expect(model).toHaveProperty('doGenerate');
expect(model).toHaveProperty('doStream');
});
tap.test('getModelSetup should return OpenAI providerOptions for GPT-5.5 xhigh', async () => {
const setup = smartai.getModelSetup({
provider: 'openai',
model: 'gpt-5.5',
apiKey: 'test-key',
providerOptions: {
openai: {
reasoningEffort: 'xhigh',
textVerbosity: 'high',
},
},
});
expect(setup.model.modelId).toEqual('gpt-5.5');
expect(setup.providerOptions?.openai?.reasoningEffort).toEqual('xhigh');
expect(setup.providerOptions?.openai?.textVerbosity).toEqual('high');
});
tap.test('generateText should pass OpenAI providerOptions through AI SDK', async () => {
const setup = smartai.getModelSetup({
provider: 'openai',
model: 'gpt-5.5',
apiKey: 'test-key',
providerOptions: {
openai: {
reasoningEffort: 'xhigh',
textVerbosity: 'high',
},
},
});
const model = new MockLanguageModelV3({
doGenerate: async () => ({
content: [{ type: 'text', text: 'ok' }],
finishReason: { unified: 'stop', raw: undefined },
usage: {
inputTokens: { total: 1, noCache: 1, cacheRead: undefined, cacheWrite: undefined },
outputTokens: { total: 1, text: 1, reasoning: undefined },
},
warnings: [],
}),
});
await smartai.generateText({
model,
prompt: 'test',
providerOptions: setup.providerOptions,
});
expect(model.doGenerateCalls[0].providerOptions?.openai?.reasoningEffort).toEqual('xhigh');
expect(model.doGenerateCalls[0].providerOptions?.openai?.textVerbosity).toEqual('high');
});
tap.test('streamText should pass OpenAI providerOptions through AI SDK', async () => {
const setup = smartai.getModelSetup({
provider: 'openai',
model: 'gpt-5.5',
apiKey: 'test-key',
providerOptions: {
openai: {
reasoningEffort: 'xhigh',
textVerbosity: 'high',
},
},
});
const model = new MockLanguageModelV3({
doStream: async () => ({
stream: simulateReadableStream({
chunks: [
{ type: 'text-start', id: 'text-1' },
{ type: 'text-delta', id: 'text-1', delta: 'ok' },
{ type: 'text-end', id: 'text-1' },
{
type: 'finish',
finishReason: { unified: 'stop', raw: undefined },
logprobs: undefined,
usage: {
inputTokens: { total: 1, noCache: 1, cacheRead: undefined, cacheWrite: undefined },
outputTokens: { total: 1, text: 1, reasoning: undefined },
},
},
],
}),
}),
});
const result = smartai.streamText({
model,
prompt: 'test',
providerOptions: setup.providerOptions,
});
await result.text;
expect(model.doStreamCalls[0].providerOptions?.openai?.reasoningEffort).toEqual('xhigh');
expect(model.doStreamCalls[0].providerOptions?.openai?.textVerbosity).toEqual('high');
});
tap.test('streamText with anthropic model', async () => {
const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN');
if (!apiKey) {
console.log('ANTHROPIC_TOKEN not set, skipping test');
return;
}
const model = smartai.getModel({
provider: 'anthropic',
model: 'claude-sonnet-4-5-20250929',
apiKey,
});
const result = await smartai.streamText({
model,
prompt: 'Count from 1 to 5.',
});
const tokens: string[] = [];
for await (const chunk of result.textStream) {
tokens.push(chunk);
}
const fullText = tokens.join('');
console.log('Streamed text:', fullText);
expect(fullText).toBeTruthy();
expect(fullText.length).toBeGreaterThan(0);
expect(tokens.length).toBeGreaterThan(0);
});
tap.test('generateText with openai model', async () => {
const apiKey = await testQenv.getEnvVarOnDemand('OPENAI_TOKEN');
if (!apiKey) {
console.log('OPENAI_TOKEN not set, skipping test');
return;
}
const model = smartai.getModel({
provider: 'openai',
model: 'gpt-4o-mini',
apiKey,
});
const result = await smartai.generateText({
model,
prompt: 'What is 2+2? Reply with just the number.',
});
console.log('OpenAI response:', result.text);
expect(result.text).toBeTruthy();
expect(result.text).toInclude('4');
});
tap.test('getModel should throw for unknown provider', async () => {
let threw = false;
try {
smartai.getModel({
provider: 'nonexistent' as any,
model: 'test',
});
} catch (e) {
threw = true;
const message = e instanceof Error ? e.message : String(e);
expect(message).toInclude('Unknown provider');
}
expect(threw).toBeTrue();
});
export default tap.start();