162 lines
4.2 KiB
TypeScript
162 lines
4.2 KiB
TypeScript
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
|
import * as qenv from '@push.rocks/qenv';
|
|
import * as smartai from '../ts/index.js';
|
|
|
|
const testQenv = new qenv.Qenv('./', './.nogit/');
|
|
|
|
tap.test('getModel should return a LanguageModelV3 for anthropic', async () => {
|
|
const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN');
|
|
if (!apiKey) {
|
|
console.log('ANTHROPIC_TOKEN not set, skipping test');
|
|
return;
|
|
}
|
|
|
|
const model = smartai.getModel({
|
|
provider: 'anthropic',
|
|
model: 'claude-sonnet-4-5-20250929',
|
|
apiKey,
|
|
});
|
|
|
|
expect(model).toHaveProperty('specificationVersion');
|
|
expect(model).toHaveProperty('provider');
|
|
expect(model).toHaveProperty('modelId');
|
|
expect(model).toHaveProperty('doGenerate');
|
|
expect(model).toHaveProperty('doStream');
|
|
});
|
|
|
|
tap.test('getModel with anthropic prompt caching returns wrapped model', async () => {
|
|
const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN');
|
|
if (!apiKey) {
|
|
console.log('ANTHROPIC_TOKEN not set, skipping test');
|
|
return;
|
|
}
|
|
|
|
// Default: prompt caching enabled
|
|
const model = smartai.getModel({
|
|
provider: 'anthropic',
|
|
model: 'claude-sonnet-4-5-20250929',
|
|
apiKey,
|
|
});
|
|
|
|
// With caching disabled
|
|
const modelNoCaching = smartai.getModel({
|
|
provider: 'anthropic',
|
|
model: 'claude-sonnet-4-5-20250929',
|
|
apiKey,
|
|
promptCaching: false,
|
|
});
|
|
|
|
// Both should be valid models
|
|
expect(model).toHaveProperty('doGenerate');
|
|
expect(modelNoCaching).toHaveProperty('doGenerate');
|
|
});
|
|
|
|
tap.test('generateText with anthropic model', async () => {
|
|
const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN');
|
|
if (!apiKey) {
|
|
console.log('ANTHROPIC_TOKEN not set, skipping test');
|
|
return;
|
|
}
|
|
|
|
const model = smartai.getModel({
|
|
provider: 'anthropic',
|
|
model: 'claude-sonnet-4-5-20250929',
|
|
apiKey,
|
|
});
|
|
|
|
const result = await smartai.generateText({
|
|
model,
|
|
prompt: 'Say hello in exactly 3 words.',
|
|
});
|
|
|
|
console.log('Anthropic response:', result.text);
|
|
expect(result.text).toBeTruthy();
|
|
expect(result.text.length).toBeGreaterThan(0);
|
|
});
|
|
|
|
tap.test('getModel should return a LanguageModelV3 for openai', async () => {
|
|
const apiKey = await testQenv.getEnvVarOnDemand('OPENAI_TOKEN');
|
|
if (!apiKey) {
|
|
console.log('OPENAI_TOKEN not set, skipping test');
|
|
return;
|
|
}
|
|
|
|
const model = smartai.getModel({
|
|
provider: 'openai',
|
|
model: 'gpt-4o-mini',
|
|
apiKey,
|
|
});
|
|
|
|
expect(model).toHaveProperty('doGenerate');
|
|
expect(model).toHaveProperty('doStream');
|
|
});
|
|
|
|
tap.test('streamText with anthropic model', async () => {
|
|
const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN');
|
|
if (!apiKey) {
|
|
console.log('ANTHROPIC_TOKEN not set, skipping test');
|
|
return;
|
|
}
|
|
|
|
const model = smartai.getModel({
|
|
provider: 'anthropic',
|
|
model: 'claude-sonnet-4-5-20250929',
|
|
apiKey,
|
|
});
|
|
|
|
const result = await smartai.streamText({
|
|
model,
|
|
prompt: 'Count from 1 to 5.',
|
|
});
|
|
|
|
const tokens: string[] = [];
|
|
for await (const chunk of result.textStream) {
|
|
tokens.push(chunk);
|
|
}
|
|
|
|
const fullText = tokens.join('');
|
|
console.log('Streamed text:', fullText);
|
|
expect(fullText).toBeTruthy();
|
|
expect(fullText.length).toBeGreaterThan(0);
|
|
expect(tokens.length).toBeGreaterThan(1); // Should have multiple chunks
|
|
});
|
|
|
|
tap.test('generateText with openai model', async () => {
|
|
const apiKey = await testQenv.getEnvVarOnDemand('OPENAI_TOKEN');
|
|
if (!apiKey) {
|
|
console.log('OPENAI_TOKEN not set, skipping test');
|
|
return;
|
|
}
|
|
|
|
const model = smartai.getModel({
|
|
provider: 'openai',
|
|
model: 'gpt-4o-mini',
|
|
apiKey,
|
|
});
|
|
|
|
const result = await smartai.generateText({
|
|
model,
|
|
prompt: 'What is 2+2? Reply with just the number.',
|
|
});
|
|
|
|
console.log('OpenAI response:', result.text);
|
|
expect(result.text).toBeTruthy();
|
|
expect(result.text).toInclude('4');
|
|
});
|
|
|
|
tap.test('getModel should throw for unknown provider', async () => {
|
|
let threw = false;
|
|
try {
|
|
smartai.getModel({
|
|
provider: 'nonexistent' as any,
|
|
model: 'test',
|
|
});
|
|
} catch (e) {
|
|
threw = true;
|
|
expect(e.message).toInclude('Unknown provider');
|
|
}
|
|
expect(threw).toBeTrue();
|
|
});
|
|
|
|
export default tap.start();
|