BREAKING CHANGE(vercel-ai-sdk): migrate to Vercel AI SDK v6 and introduce provider registry (getModel) returning LanguageModelV3
This commit is contained in:
@@ -1,55 +0,0 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
import { SmartFs, SmartFsProviderNode } from '@push.rocks/smartfs';
|
||||
|
||||
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||
const smartfs = new SmartFs(new SmartFsProviderNode());
|
||||
|
||||
import * as smartai from '../ts/index.js';
|
||||
|
||||
let testSmartai: smartai.SmartAi;
|
||||
|
||||
tap.test('ElevenLabs Audio: should create a smartai instance with ElevenLabs provider', async () => {
|
||||
testSmartai = new smartai.SmartAi({
|
||||
elevenlabsToken: await testQenv.getEnvVarOnDemand('ELEVENLABS_TOKEN'),
|
||||
elevenlabs: {
|
||||
defaultVoiceId: '19STyYD15bswVz51nqLf',
|
||||
},
|
||||
});
|
||||
await testSmartai.start();
|
||||
});
|
||||
|
||||
tap.test('ElevenLabs Audio: should create audio response', async () => {
|
||||
const audioStream = await testSmartai.elevenlabsProvider.audio({
|
||||
message: 'Welcome to SmartAI, the unified interface for the world\'s leading artificial intelligence providers. SmartAI brings together OpenAI, Anthropic, Perplexity, and ElevenLabs under a single elegant TypeScript API. Whether you need text generation, vision analysis, document processing, or premium text-to-speech capabilities, SmartAI provides a consistent and powerful interface for all your AI needs. Build intelligent applications at lightning speed without vendor lock-in.',
|
||||
});
|
||||
const chunks: Uint8Array[] = [];
|
||||
for await (const chunk of audioStream) {
|
||||
chunks.push(chunk as Uint8Array);
|
||||
}
|
||||
const audioBuffer = Buffer.concat(chunks);
|
||||
await smartfs.file('./.nogit/testoutput_elevenlabs.mp3').write(audioBuffer);
|
||||
console.log(`Audio Buffer length: ${audioBuffer.length}`);
|
||||
expect(audioBuffer.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
tap.test('ElevenLabs Audio: should create audio with custom voice', async () => {
|
||||
const audioStream = await testSmartai.elevenlabsProvider.audio({
|
||||
message: 'Testing with a different voice.',
|
||||
voiceId: 'JBFqnCBsd6RMkjVDRZzb',
|
||||
});
|
||||
const chunks: Uint8Array[] = [];
|
||||
for await (const chunk of audioStream) {
|
||||
chunks.push(chunk as Uint8Array);
|
||||
}
|
||||
const audioBuffer = Buffer.concat(chunks);
|
||||
await smartfs.file('./.nogit/testoutput_elevenlabs_custom.mp3').write(audioBuffer);
|
||||
console.log(`Audio Buffer length (custom voice): ${audioBuffer.length}`);
|
||||
expect(audioBuffer.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
tap.test('ElevenLabs Audio: should stop the smartai instance', async () => {
|
||||
await testSmartai.stop();
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -1,40 +0,0 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
import { SmartFs, SmartFsProviderNode } from '@push.rocks/smartfs';
|
||||
|
||||
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||
const smartfs = new SmartFs(new SmartFsProviderNode());
|
||||
|
||||
import * as smartai from '../ts/index.js';
|
||||
|
||||
let testSmartai: smartai.SmartAi;
|
||||
|
||||
tap.test('OpenAI Audio: should create a smartai instance with OpenAI provider', async () => {
|
||||
testSmartai = new smartai.SmartAi({
|
||||
openaiToken: await testQenv.getEnvVarOnDemand('OPENAI_TOKEN'),
|
||||
});
|
||||
await testSmartai.start();
|
||||
});
|
||||
|
||||
tap.test('OpenAI Audio: should create audio response', async () => {
|
||||
// Call the audio method with a sample message.
|
||||
const audioStream = await testSmartai.openaiProvider.audio({
|
||||
message: 'This is a test of audio generation.',
|
||||
});
|
||||
// Read all chunks from the stream.
|
||||
const chunks: Uint8Array[] = [];
|
||||
for await (const chunk of audioStream) {
|
||||
chunks.push(chunk as Uint8Array);
|
||||
}
|
||||
const audioBuffer = Buffer.concat(chunks);
|
||||
await smartfs.file('./.nogit/testoutput.mp3').write(audioBuffer);
|
||||
console.log(`Audio Buffer length: ${audioBuffer.length}`);
|
||||
// Assert that the resulting buffer is not empty.
|
||||
expect(audioBuffer.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
tap.test('OpenAI Audio: should stop the smartai instance', async () => {
|
||||
await testSmartai.stop();
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -1,36 +0,0 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
|
||||
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||
|
||||
import * as smartai from '../ts/index.js';
|
||||
|
||||
let anthropicProvider: smartai.AnthropicProvider;
|
||||
|
||||
tap.test('Audio Stubs: should create Anthropic provider', async () => {
|
||||
anthropicProvider = new smartai.AnthropicProvider({
|
||||
anthropicToken: await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN'),
|
||||
});
|
||||
await anthropicProvider.start();
|
||||
});
|
||||
|
||||
tap.test('Audio Stubs: Anthropic audio should throw not supported error', async () => {
|
||||
let errorCaught = false;
|
||||
|
||||
try {
|
||||
await anthropicProvider.audio({
|
||||
message: 'This should fail'
|
||||
});
|
||||
} catch (error) {
|
||||
errorCaught = true;
|
||||
expect(error.message).toInclude('not yet supported');
|
||||
}
|
||||
|
||||
expect(errorCaught).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('Audio Stubs: should stop Anthropic provider', async () => {
|
||||
await anthropicProvider.stop();
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
36
test/test.audio.ts
Normal file
36
test/test.audio.ts
Normal file
@@ -0,0 +1,36 @@
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
import { textToSpeech } from '../ts_audio/index.js';
|
||||
|
||||
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||
|
||||
tap.test('textToSpeech should return a readable stream', async () => {
|
||||
const apiKey = await testQenv.getEnvVarOnDemand('OPENAI_TOKEN');
|
||||
if (!apiKey) {
|
||||
console.log('OPENAI_TOKEN not set, skipping test');
|
||||
return;
|
||||
}
|
||||
|
||||
const stream = await textToSpeech({
|
||||
apiKey,
|
||||
text: 'Hello, this is a test of the text to speech system.',
|
||||
voice: 'alloy',
|
||||
model: 'tts-1',
|
||||
});
|
||||
|
||||
expect(stream).toBeTruthy();
|
||||
expect(stream.readable).toBeTrue();
|
||||
|
||||
// Read some bytes to verify it's actual audio data
|
||||
const chunks: Buffer[] = [];
|
||||
for await (const chunk of stream) {
|
||||
chunks.push(Buffer.from(chunk));
|
||||
if (chunks.length > 2) break; // Just read a few chunks to verify
|
||||
}
|
||||
|
||||
const totalBytes = chunks.reduce((sum, c) => sum + c.length, 0);
|
||||
console.log(`Audio stream produced ${totalBytes} bytes in ${chunks.length} chunks`);
|
||||
expect(totalBytes).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -1,93 +0,0 @@
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as smartai from '../ts/index.js';
|
||||
|
||||
// Basic instantiation tests that don't require API tokens
|
||||
// These tests can run in CI/CD environments without credentials
|
||||
|
||||
tap.test('Basic: should create SmartAi instance', async () => {
|
||||
const testSmartai = new smartai.SmartAi({
|
||||
openaiToken: 'dummy-token-for-testing'
|
||||
});
|
||||
expect(testSmartai).toBeInstanceOf(smartai.SmartAi);
|
||||
// Provider is only created after calling start()
|
||||
expect(testSmartai.options.openaiToken).toEqual('dummy-token-for-testing');
|
||||
});
|
||||
|
||||
tap.test('Basic: should instantiate OpenAI provider', async () => {
|
||||
const openaiProvider = new smartai.OpenAiProvider({
|
||||
openaiToken: 'dummy-token'
|
||||
});
|
||||
expect(openaiProvider).toBeInstanceOf(smartai.OpenAiProvider);
|
||||
expect(typeof openaiProvider.chat).toEqual('function');
|
||||
expect(typeof openaiProvider.audio).toEqual('function');
|
||||
expect(typeof openaiProvider.vision).toEqual('function');
|
||||
expect(typeof openaiProvider.document).toEqual('function');
|
||||
expect(typeof openaiProvider.research).toEqual('function');
|
||||
});
|
||||
|
||||
tap.test('Basic: should instantiate Anthropic provider', async () => {
|
||||
const anthropicProvider = new smartai.AnthropicProvider({
|
||||
anthropicToken: 'dummy-token'
|
||||
});
|
||||
expect(anthropicProvider).toBeInstanceOf(smartai.AnthropicProvider);
|
||||
expect(typeof anthropicProvider.chat).toEqual('function');
|
||||
expect(typeof anthropicProvider.audio).toEqual('function');
|
||||
expect(typeof anthropicProvider.vision).toEqual('function');
|
||||
expect(typeof anthropicProvider.document).toEqual('function');
|
||||
expect(typeof anthropicProvider.research).toEqual('function');
|
||||
});
|
||||
|
||||
tap.test('Basic: should instantiate Perplexity provider', async () => {
|
||||
const perplexityProvider = new smartai.PerplexityProvider({
|
||||
perplexityToken: 'dummy-token'
|
||||
});
|
||||
expect(perplexityProvider).toBeInstanceOf(smartai.PerplexityProvider);
|
||||
expect(typeof perplexityProvider.chat).toEqual('function');
|
||||
expect(typeof perplexityProvider.research).toEqual('function');
|
||||
});
|
||||
|
||||
tap.test('Basic: should instantiate Groq provider', async () => {
|
||||
const groqProvider = new smartai.GroqProvider({
|
||||
groqToken: 'dummy-token'
|
||||
});
|
||||
expect(groqProvider).toBeInstanceOf(smartai.GroqProvider);
|
||||
expect(typeof groqProvider.chat).toEqual('function');
|
||||
expect(typeof groqProvider.research).toEqual('function');
|
||||
});
|
||||
|
||||
tap.test('Basic: should instantiate Ollama provider', async () => {
|
||||
const ollamaProvider = new smartai.OllamaProvider({
|
||||
baseUrl: 'http://localhost:11434'
|
||||
});
|
||||
expect(ollamaProvider).toBeInstanceOf(smartai.OllamaProvider);
|
||||
expect(typeof ollamaProvider.chat).toEqual('function');
|
||||
expect(typeof ollamaProvider.research).toEqual('function');
|
||||
});
|
||||
|
||||
tap.test('Basic: should instantiate xAI provider', async () => {
|
||||
const xaiProvider = new smartai.XAIProvider({
|
||||
xaiToken: 'dummy-token'
|
||||
});
|
||||
expect(xaiProvider).toBeInstanceOf(smartai.XAIProvider);
|
||||
expect(typeof xaiProvider.chat).toEqual('function');
|
||||
expect(typeof xaiProvider.research).toEqual('function');
|
||||
});
|
||||
|
||||
tap.test('Basic: should instantiate Exo provider', async () => {
|
||||
const exoProvider = new smartai.ExoProvider({
|
||||
exoBaseUrl: 'http://localhost:8000'
|
||||
});
|
||||
expect(exoProvider).toBeInstanceOf(smartai.ExoProvider);
|
||||
expect(typeof exoProvider.chat).toEqual('function');
|
||||
expect(typeof exoProvider.research).toEqual('function');
|
||||
});
|
||||
|
||||
tap.test('Basic: all providers should extend MultiModalModel', async () => {
|
||||
const openai = new smartai.OpenAiProvider({ openaiToken: 'test' });
|
||||
const anthropic = new smartai.AnthropicProvider({ anthropicToken: 'test' });
|
||||
|
||||
expect(openai).toBeInstanceOf(smartai.MultiModalModel);
|
||||
expect(anthropic).toBeInstanceOf(smartai.MultiModalModel);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -1,72 +0,0 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
|
||||
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||
|
||||
import * as smartai from '../ts/index.js';
|
||||
|
||||
let anthropicProvider: smartai.AnthropicProvider;
|
||||
|
||||
tap.test('Anthropic Chat: should create and start Anthropic provider', async () => {
|
||||
anthropicProvider = new smartai.AnthropicProvider({
|
||||
anthropicToken: await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN'),
|
||||
});
|
||||
await anthropicProvider.start();
|
||||
expect(anthropicProvider).toBeInstanceOf(smartai.AnthropicProvider);
|
||||
});
|
||||
|
||||
tap.test('Anthropic Chat: should create chat response', async () => {
|
||||
const userMessage = 'What is the capital of France? Answer in one word.';
|
||||
const response = await anthropicProvider.chat({
|
||||
systemMessage: 'You are a helpful assistant. Be concise.',
|
||||
userMessage: userMessage,
|
||||
messageHistory: [],
|
||||
});
|
||||
console.log(`Anthropic Chat - User: ${userMessage}`);
|
||||
console.log(`Anthropic Chat - Response: ${response.message}`);
|
||||
|
||||
expect(response.role).toEqual('assistant');
|
||||
expect(response.message).toBeTruthy();
|
||||
expect(response.message.toLowerCase()).toInclude('paris');
|
||||
});
|
||||
|
||||
tap.test('Anthropic Chat: should handle message history', async () => {
|
||||
const messageHistory: smartai.ChatMessage[] = [
|
||||
{ role: 'user', content: 'My name is Claude Test' },
|
||||
{ role: 'assistant', content: 'Nice to meet you, Claude Test!' }
|
||||
];
|
||||
|
||||
const response = await anthropicProvider.chat({
|
||||
systemMessage: 'You are a helpful assistant with good memory.',
|
||||
userMessage: 'What is my name?',
|
||||
messageHistory: messageHistory,
|
||||
});
|
||||
|
||||
console.log(`Anthropic Memory Test - Response: ${response.message}`);
|
||||
expect(response.message.toLowerCase()).toInclude('claude test');
|
||||
});
|
||||
|
||||
tap.test('Anthropic Chat: should handle errors gracefully', async () => {
|
||||
// Test with invalid message (empty)
|
||||
let errorCaught = false;
|
||||
|
||||
try {
|
||||
await anthropicProvider.chat({
|
||||
systemMessage: '',
|
||||
userMessage: '',
|
||||
messageHistory: [],
|
||||
});
|
||||
} catch (error) {
|
||||
errorCaught = true;
|
||||
console.log('Expected error caught:', error.message);
|
||||
}
|
||||
|
||||
// Anthropic might handle empty messages, so we don't assert error
|
||||
console.log(`Error handling test - Error caught: ${errorCaught}`);
|
||||
});
|
||||
|
||||
tap.test('Anthropic Chat: should stop the provider', async () => {
|
||||
await anthropicProvider.stop();
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -1,66 +0,0 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
|
||||
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||
|
||||
import * as smartai from '../ts/index.js';
|
||||
|
||||
let mistralProvider: smartai.MistralProvider;
|
||||
|
||||
tap.test('Mistral Chat: should create and start Mistral provider', async () => {
|
||||
mistralProvider = new smartai.MistralProvider({
|
||||
mistralToken: await testQenv.getEnvVarOnDemand('MISTRAL_API_KEY'),
|
||||
});
|
||||
await mistralProvider.start();
|
||||
expect(mistralProvider).toBeInstanceOf(smartai.MistralProvider);
|
||||
});
|
||||
|
||||
tap.test('Mistral Chat: should create chat response', async () => {
|
||||
const userMessage = 'What is the capital of France? Answer in one word.';
|
||||
const response = await mistralProvider.chat({
|
||||
systemMessage: 'You are a helpful assistant. Be concise.',
|
||||
userMessage: userMessage,
|
||||
messageHistory: [],
|
||||
});
|
||||
console.log(`Mistral Chat - User: ${userMessage}`);
|
||||
console.log(`Mistral Chat - Response: ${response.message}`);
|
||||
|
||||
expect(response.role).toEqual('assistant');
|
||||
expect(response.message).toBeTruthy();
|
||||
expect(response.message.toLowerCase()).toInclude('paris');
|
||||
});
|
||||
|
||||
tap.test('Mistral Chat: should handle message history', async () => {
|
||||
const messageHistory: smartai.ChatMessage[] = [
|
||||
{ role: 'user', content: 'My name is Claude Test' },
|
||||
{ role: 'assistant', content: 'Nice to meet you, Claude Test!' }
|
||||
];
|
||||
|
||||
const response = await mistralProvider.chat({
|
||||
systemMessage: 'You are a helpful assistant with good memory.',
|
||||
userMessage: 'What is my name?',
|
||||
messageHistory: messageHistory,
|
||||
});
|
||||
|
||||
console.log(`Mistral Memory Test - Response: ${response.message}`);
|
||||
expect(response.message.toLowerCase()).toInclude('claude test');
|
||||
});
|
||||
|
||||
tap.test('Mistral Chat: should handle longer conversations', async () => {
|
||||
const response = await mistralProvider.chat({
|
||||
systemMessage: 'You are a helpful coding assistant.',
|
||||
userMessage: 'Write a simple hello world function in TypeScript. Keep it brief.',
|
||||
messageHistory: [],
|
||||
});
|
||||
|
||||
console.log(`Mistral Coding Test - Response: ${response.message}`);
|
||||
expect(response.message).toBeTruthy();
|
||||
// Should contain some TypeScript/function code
|
||||
expect(response.message).toInclude('function');
|
||||
});
|
||||
|
||||
tap.test('Mistral Chat: should stop the provider', async () => {
|
||||
await mistralProvider.stop();
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -1,34 +0,0 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
|
||||
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||
|
||||
import * as smartai from '../ts/index.js';
|
||||
|
||||
let testSmartai: smartai.SmartAi;
|
||||
|
||||
tap.test('OpenAI Chat: should create a smartai instance with OpenAI provider', async () => {
|
||||
testSmartai = new smartai.SmartAi({
|
||||
openaiToken: await testQenv.getEnvVarOnDemand('OPENAI_TOKEN'),
|
||||
});
|
||||
await testSmartai.start();
|
||||
});
|
||||
|
||||
tap.test('OpenAI Chat: should create chat response', async () => {
|
||||
const userMessage = 'How are you?';
|
||||
const response = await testSmartai.openaiProvider.chat({
|
||||
systemMessage: 'Hello',
|
||||
userMessage: userMessage,
|
||||
messageHistory: [],
|
||||
});
|
||||
console.log(`userMessage: ${userMessage}`);
|
||||
console.log(response.message);
|
||||
expect(response.role).toEqual('assistant');
|
||||
expect(response.message).toBeTruthy();
|
||||
});
|
||||
|
||||
tap.test('OpenAI Chat: should stop the smartai instance', async () => {
|
||||
await testSmartai.stop();
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -1,79 +0,0 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
import * as smartrequest from '@push.rocks/smartrequest';
|
||||
import { SmartFs, SmartFsProviderNode } from '@push.rocks/smartfs';
|
||||
|
||||
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||
const smartfs = new SmartFs(new SmartFsProviderNode());
|
||||
|
||||
import * as smartai from '../ts/index.js';
|
||||
|
||||
let anthropicProvider: smartai.AnthropicProvider;
|
||||
|
||||
tap.test('Anthropic Document: should create and start Anthropic provider', async () => {
|
||||
anthropicProvider = new smartai.AnthropicProvider({
|
||||
anthropicToken: await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN'),
|
||||
});
|
||||
await anthropicProvider.start();
|
||||
expect(anthropicProvider).toBeInstanceOf(smartai.AnthropicProvider);
|
||||
});
|
||||
|
||||
tap.test('Anthropic Document: should document a PDF', async () => {
|
||||
const pdfUrl = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf';
|
||||
const pdfResponse = await smartrequest.SmartRequest.create()
|
||||
.url(pdfUrl)
|
||||
.get();
|
||||
|
||||
const result = await anthropicProvider.document({
|
||||
systemMessage: 'Classify the document. Only the following answers are allowed: "invoice", "bank account statement", "contract", "test document", "other". The answer should only contain the keyword for machine use.',
|
||||
userMessage: 'Classify this document.',
|
||||
messageHistory: [],
|
||||
pdfDocuments: [Buffer.from(await pdfResponse.arrayBuffer())],
|
||||
});
|
||||
|
||||
console.log(`Anthropic Document - Result:`, result);
|
||||
expect(result).toBeTruthy();
|
||||
expect(result.message).toBeTruthy();
|
||||
});
|
||||
|
||||
tap.test('Anthropic Document: should handle complex document analysis', async () => {
|
||||
// Test with the demo PDF if it exists
|
||||
const pdfPath = './.nogit/demo_without_textlayer.pdf';
|
||||
let pdfBuffer: Uint8Array;
|
||||
|
||||
try {
|
||||
pdfBuffer = await smartfs.file(pdfPath).read();
|
||||
} catch (error) {
|
||||
// If the file doesn't exist, use the dummy PDF
|
||||
console.log('Demo PDF not found, using dummy PDF instead');
|
||||
const pdfUrl = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf';
|
||||
const pdfResponse = await smartrequest.SmartRequest.create()
|
||||
.url(pdfUrl)
|
||||
.get();
|
||||
pdfBuffer = Buffer.from(await pdfResponse.arrayBuffer());
|
||||
}
|
||||
|
||||
const result = await anthropicProvider.document({
|
||||
systemMessage: `
|
||||
Analyze this document and provide a JSON response with the following structure:
|
||||
{
|
||||
"documentType": "string",
|
||||
"hasText": boolean,
|
||||
"summary": "string"
|
||||
}
|
||||
`,
|
||||
userMessage: 'Analyze this document.',
|
||||
messageHistory: [],
|
||||
pdfDocuments: [pdfBuffer],
|
||||
});
|
||||
|
||||
console.log(`Anthropic Complex Document Analysis:`, result);
|
||||
expect(result).toBeTruthy();
|
||||
expect(result.message).toBeTruthy();
|
||||
});
|
||||
|
||||
tap.test('Anthropic Document: should stop the provider', async () => {
|
||||
await anthropicProvider.stop();
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -1,100 +0,0 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
import * as smartrequest from '@push.rocks/smartrequest';
|
||||
import { SmartFs, SmartFsProviderNode } from '@push.rocks/smartfs';
|
||||
|
||||
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||
const smartfs = new SmartFs(new SmartFsProviderNode());
|
||||
|
||||
import * as smartai from '../ts/index.js';
|
||||
|
||||
let mistralProvider: smartai.MistralProvider;
|
||||
|
||||
tap.test('Mistral Document: should create and start Mistral provider', async () => {
|
||||
mistralProvider = new smartai.MistralProvider({
|
||||
mistralToken: await testQenv.getEnvVarOnDemand('MISTRAL_API_KEY'),
|
||||
tableFormat: 'markdown',
|
||||
});
|
||||
await mistralProvider.start();
|
||||
expect(mistralProvider).toBeInstanceOf(smartai.MistralProvider);
|
||||
});
|
||||
|
||||
tap.test('Mistral Document: should process a PDF document', async () => {
|
||||
const pdfUrl = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf';
|
||||
const pdfResponse = await smartrequest.SmartRequest.create()
|
||||
.url(pdfUrl)
|
||||
.get();
|
||||
|
||||
const result = await mistralProvider.document({
|
||||
systemMessage: 'Classify the document. Only the following answers are allowed: "invoice", "bank account statement", "contract", "test document", "other". The answer should only contain the keyword for machine use.',
|
||||
userMessage: 'Classify this document.',
|
||||
messageHistory: [],
|
||||
pdfDocuments: [Buffer.from(await pdfResponse.arrayBuffer())],
|
||||
});
|
||||
|
||||
console.log(`Mistral Document - Result:`, result);
|
||||
expect(result).toBeTruthy();
|
||||
expect(result.message).toBeTruthy();
|
||||
});
|
||||
|
||||
tap.test('Mistral Document: should handle complex document analysis', async () => {
|
||||
// Test with the demo PDF if it exists
|
||||
const pdfPath = './.nogit/demo_without_textlayer.pdf';
|
||||
let pdfBuffer: Uint8Array;
|
||||
|
||||
try {
|
||||
pdfBuffer = await smartfs.file(pdfPath).read();
|
||||
} catch (error) {
|
||||
// If the file doesn't exist, use the dummy PDF
|
||||
console.log('Demo PDF not found, using dummy PDF instead');
|
||||
const pdfUrl = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf';
|
||||
const pdfResponse = await smartrequest.SmartRequest.create()
|
||||
.url(pdfUrl)
|
||||
.get();
|
||||
pdfBuffer = Buffer.from(await pdfResponse.arrayBuffer());
|
||||
}
|
||||
|
||||
const result = await mistralProvider.document({
|
||||
systemMessage: `
|
||||
Analyze this document and provide a JSON response with the following structure:
|
||||
{
|
||||
"documentType": "string",
|
||||
"hasText": boolean,
|
||||
"summary": "string"
|
||||
}
|
||||
`,
|
||||
userMessage: 'Analyze this document.',
|
||||
messageHistory: [],
|
||||
pdfDocuments: [pdfBuffer],
|
||||
});
|
||||
|
||||
console.log(`Mistral Complex Document Analysis:`, result);
|
||||
expect(result).toBeTruthy();
|
||||
expect(result.message).toBeTruthy();
|
||||
});
|
||||
|
||||
tap.test('Mistral Document: should process multiple PDF documents', async () => {
|
||||
const pdfUrl = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf';
|
||||
const pdfResponse = await smartrequest.SmartRequest.create()
|
||||
.url(pdfUrl)
|
||||
.get();
|
||||
|
||||
const pdfBuffer = Buffer.from(await pdfResponse.arrayBuffer());
|
||||
|
||||
const result = await mistralProvider.document({
|
||||
systemMessage: 'You are a document comparison assistant.',
|
||||
userMessage: 'Are these two documents the same? Answer yes or no.',
|
||||
messageHistory: [],
|
||||
pdfDocuments: [pdfBuffer, pdfBuffer], // Same document twice for test
|
||||
});
|
||||
|
||||
console.log(`Mistral Multi-Document - Result:`, result);
|
||||
expect(result).toBeTruthy();
|
||||
expect(result.message).toBeTruthy();
|
||||
});
|
||||
|
||||
tap.test('Mistral Document: should stop the provider', async () => {
|
||||
await mistralProvider.stop();
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -1,77 +0,0 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
import * as smartrequest from '@push.rocks/smartrequest';
|
||||
import { SmartFs, SmartFsProviderNode } from '@push.rocks/smartfs';
|
||||
|
||||
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||
const smartfs = new SmartFs(new SmartFsProviderNode());
|
||||
|
||||
import * as smartai from '../ts/index.js';
|
||||
|
||||
let testSmartai: smartai.SmartAi;
|
||||
|
||||
tap.test('OpenAI Document: should create a smartai instance with OpenAI provider', async () => {
|
||||
testSmartai = new smartai.SmartAi({
|
||||
openaiToken: await testQenv.getEnvVarOnDemand('OPENAI_TOKEN'),
|
||||
});
|
||||
await testSmartai.start();
|
||||
});
|
||||
|
||||
tap.test('OpenAI Document: should document a pdf', async () => {
|
||||
const pdfUrl = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf';
|
||||
const pdfResponse = await smartrequest.SmartRequest.create()
|
||||
.url(pdfUrl)
|
||||
.get();
|
||||
const result = await testSmartai.openaiProvider.document({
|
||||
systemMessage: 'Classify the document. Only the following answers are allowed: "invoice", "bank account statement", "contract", "other". The answer should only contain the keyword for machine use.',
|
||||
userMessage: "Classify the document.",
|
||||
messageHistory: [],
|
||||
pdfDocuments: [Buffer.from(await pdfResponse.arrayBuffer())],
|
||||
});
|
||||
console.log(result);
|
||||
expect(result.message).toBeTruthy();
|
||||
});
|
||||
|
||||
tap.test('OpenAI Document: should recognize companies in a pdf', async () => {
|
||||
const pdfBuffer = await smartfs.file('./.nogit/demo_without_textlayer.pdf').read();
|
||||
const result = await testSmartai.openaiProvider.document({
|
||||
systemMessage: `
|
||||
summarize the document.
|
||||
|
||||
answer in JSON format, adhering to the following schema:
|
||||
\`\`\`typescript
|
||||
type TAnswer = {
|
||||
entitySender: {
|
||||
type: 'official state entity' | 'company' | 'person';
|
||||
name: string;
|
||||
address: string;
|
||||
city: string;
|
||||
country: string;
|
||||
EU: boolean; // whether the entity is within EU
|
||||
};
|
||||
entityReceiver: {
|
||||
type: 'official state entity' | 'company' | 'person';
|
||||
name: string;
|
||||
address: string;
|
||||
city: string;
|
||||
country: string;
|
||||
EU: boolean; // whether the entity is within EU
|
||||
};
|
||||
date: string; // the date of the document as YYYY-MM-DD
|
||||
title: string; // a short title, suitable for a filename
|
||||
}
|
||||
\`\`\`
|
||||
`,
|
||||
userMessage: "Classify the document.",
|
||||
messageHistory: [],
|
||||
pdfDocuments: [pdfBuffer],
|
||||
});
|
||||
console.log(result);
|
||||
expect(result.message).toBeTruthy();
|
||||
});
|
||||
|
||||
tap.test('OpenAI Document: should stop the smartai instance', async () => {
|
||||
await testSmartai.stop();
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
50
test/test.document.ts
Normal file
50
test/test.document.ts
Normal file
@@ -0,0 +1,50 @@
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
import { getModel } from '../ts/index.js';
|
||||
import { analyzeDocuments, stopSmartpdf } from '../ts_document/index.js';
|
||||
|
||||
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||
|
||||
tap.test('analyzeDocuments should analyze a PDF', async () => {
|
||||
const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN');
|
||||
if (!apiKey) {
|
||||
console.log('ANTHROPIC_TOKEN not set, skipping test');
|
||||
return;
|
||||
}
|
||||
|
||||
// Create a minimal test PDF (this is a valid minimal PDF)
|
||||
const minimalPdf = Buffer.from(
|
||||
'%PDF-1.0\n1 0 obj<</Type/Catalog/Pages 2 0 R>>endobj\n' +
|
||||
'2 0 obj<</Type/Pages/Kids[3 0 R]/Count 1>>endobj\n' +
|
||||
'3 0 obj<</Type/Page/MediaBox[0 0 612 792]/Parent 2 0 R/Contents 4 0 R/Resources<</Font<</F1 5 0 R>>>>>>endobj\n' +
|
||||
'4 0 obj<</Length 44>>stream\nBT /F1 12 Tf 100 700 Td (Hello World) Tj ET\nendstream\nendobj\n' +
|
||||
'5 0 obj<</Type/Font/Subtype/Type1/BaseFont/Helvetica>>endobj\n' +
|
||||
'xref\n0 6\n0000000000 65535 f \n0000000009 00000 n \n0000000058 00000 n \n0000000115 00000 n \n0000000266 00000 n \n0000000360 00000 n \n' +
|
||||
'trailer<</Size 6/Root 1 0 R>>\nstartxref\n434\n%%EOF'
|
||||
);
|
||||
|
||||
const model = getModel({
|
||||
provider: 'anthropic',
|
||||
model: 'claude-sonnet-4-5-20250929',
|
||||
apiKey,
|
||||
promptCaching: false,
|
||||
});
|
||||
|
||||
try {
|
||||
const result = await analyzeDocuments({
|
||||
model,
|
||||
systemMessage: 'You are a document analysis assistant.',
|
||||
userMessage: 'What text is visible in this document?',
|
||||
pdfDocuments: [minimalPdf],
|
||||
});
|
||||
|
||||
console.log('Document analysis result:', result);
|
||||
expect(result).toBeTruthy();
|
||||
} catch (error) {
|
||||
console.log('Document test failed (may need puppeteer):', error.message);
|
||||
} finally {
|
||||
await stopSmartpdf();
|
||||
}
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -1,203 +0,0 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
import * as smartai from '../ts/index.js';
|
||||
import * as path from 'path';
|
||||
import { promises as fs } from 'fs';
|
||||
|
||||
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||
|
||||
let openaiProvider: smartai.OpenAiProvider;
|
||||
|
||||
// Helper function to save image results
|
||||
async function saveImageResult(testName: string, result: any) {
|
||||
const sanitizedName = testName.replace(/[^a-z0-9]/gi, '_').toLowerCase();
|
||||
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
|
||||
const filename = `openai_${sanitizedName}_${timestamp}.json`;
|
||||
const filepath = path.join('.nogit', 'testresults', 'images', filename);
|
||||
|
||||
await fs.mkdir(path.dirname(filepath), { recursive: true });
|
||||
await fs.writeFile(filepath, JSON.stringify(result, null, 2), 'utf-8');
|
||||
|
||||
console.log(` 💾 Saved to: ${filepath}`);
|
||||
|
||||
// Also save the actual image if b64_json is present
|
||||
if (result.images && result.images[0]?.b64_json) {
|
||||
const imageFilename = `openai_${sanitizedName}_${timestamp}.png`;
|
||||
const imageFilepath = path.join('.nogit', 'testresults', 'images', imageFilename);
|
||||
await fs.writeFile(imageFilepath, Buffer.from(result.images[0].b64_json, 'base64'));
|
||||
console.log(` 🖼️ Image saved to: ${imageFilepath}`);
|
||||
}
|
||||
}
|
||||
|
||||
tap.test('OpenAI Image Generation: should initialize provider', async () => {
|
||||
const openaiToken = await testQenv.getEnvVarOnDemand('OPENAI_TOKEN');
|
||||
expect(openaiToken).toBeTruthy();
|
||||
|
||||
openaiProvider = new smartai.OpenAiProvider({
|
||||
openaiToken,
|
||||
imageModel: 'gpt-image-1'
|
||||
});
|
||||
|
||||
await openaiProvider.start();
|
||||
expect(openaiProvider).toBeInstanceOf(smartai.OpenAiProvider);
|
||||
});
|
||||
|
||||
tap.test('OpenAI Image: Basic generation with gpt-image-1', async () => {
|
||||
const result = await openaiProvider.imageGenerate({
|
||||
prompt: 'A cute robot reading a book in a cozy library, digital art style',
|
||||
model: 'gpt-image-1',
|
||||
quality: 'medium',
|
||||
size: '1024x1024'
|
||||
});
|
||||
|
||||
console.log('Basic gpt-image-1 Generation:');
|
||||
console.log('- Images generated:', result.images.length);
|
||||
console.log('- Model used:', result.metadata?.model);
|
||||
console.log('- Quality:', result.metadata?.quality);
|
||||
console.log('- Size:', result.metadata?.size);
|
||||
console.log('- Tokens used:', result.metadata?.tokensUsed);
|
||||
|
||||
await saveImageResult('basic_generation_gptimage1', result);
|
||||
|
||||
expect(result.images).toBeTruthy();
|
||||
expect(result.images.length).toEqual(1);
|
||||
expect(result.images[0].b64_json).toBeTruthy();
|
||||
expect(result.metadata?.model).toEqual('gpt-image-1');
|
||||
});
|
||||
|
||||
tap.test('OpenAI Image: High quality with transparent background', async () => {
|
||||
const result = await openaiProvider.imageGenerate({
|
||||
prompt: 'A simple geometric logo of a mountain peak, minimal design, clean lines',
|
||||
model: 'gpt-image-1',
|
||||
quality: 'high',
|
||||
size: '1024x1024',
|
||||
background: 'transparent',
|
||||
outputFormat: 'png'
|
||||
});
|
||||
|
||||
console.log('High Quality Transparent:');
|
||||
console.log('- Quality:', result.metadata?.quality);
|
||||
console.log('- Background: transparent');
|
||||
console.log('- Format:', result.metadata?.outputFormat);
|
||||
console.log('- Tokens used:', result.metadata?.tokensUsed);
|
||||
|
||||
await saveImageResult('high_quality_transparent', result);
|
||||
|
||||
expect(result.images.length).toEqual(1);
|
||||
expect(result.images[0].b64_json).toBeTruthy();
|
||||
});
|
||||
|
||||
tap.test('OpenAI Image: WebP format with compression', async () => {
|
||||
const result = await openaiProvider.imageGenerate({
|
||||
prompt: 'A futuristic cityscape at sunset with flying cars, photorealistic',
|
||||
model: 'gpt-image-1',
|
||||
quality: 'high',
|
||||
size: '1536x1024',
|
||||
outputFormat: 'webp',
|
||||
outputCompression: 85
|
||||
});
|
||||
|
||||
console.log('WebP with Compression:');
|
||||
console.log('- Format:', result.metadata?.outputFormat);
|
||||
console.log('- Compression: 85%');
|
||||
console.log('- Size:', result.metadata?.size);
|
||||
|
||||
await saveImageResult('webp_compression', result);
|
||||
|
||||
expect(result.images.length).toEqual(1);
|
||||
expect(result.images[0].b64_json).toBeTruthy();
|
||||
});
|
||||
|
||||
tap.test('OpenAI Image: Text rendering with gpt-image-1', async () => {
|
||||
const result = await openaiProvider.imageGenerate({
|
||||
prompt: 'A vintage cafe sign that says "COFFEE & CODE" in elegant hand-lettered typography, warm colors',
|
||||
model: 'gpt-image-1',
|
||||
quality: 'high',
|
||||
size: '1024x1024'
|
||||
});
|
||||
|
||||
console.log('Text Rendering:');
|
||||
console.log('- Prompt includes text: "COFFEE & CODE"');
|
||||
console.log('- gpt-image-1 has superior text rendering');
|
||||
console.log('- Tokens used:', result.metadata?.tokensUsed);
|
||||
|
||||
await saveImageResult('text_rendering', result);
|
||||
|
||||
expect(result.images.length).toEqual(1);
|
||||
expect(result.images[0].b64_json).toBeTruthy();
|
||||
});
|
||||
|
||||
tap.test('OpenAI Image: Multiple images generation', async () => {
|
||||
const result = await openaiProvider.imageGenerate({
|
||||
prompt: 'Abstract colorful geometric patterns, modern minimalist art',
|
||||
model: 'gpt-image-1',
|
||||
n: 2,
|
||||
quality: 'medium',
|
||||
size: '1024x1024'
|
||||
});
|
||||
|
||||
console.log('Multiple Images:');
|
||||
console.log('- Images requested: 2');
|
||||
console.log('- Images generated:', result.images.length);
|
||||
|
||||
await saveImageResult('multiple_images', result);
|
||||
|
||||
expect(result.images.length).toEqual(2);
|
||||
expect(result.images[0].b64_json).toBeTruthy();
|
||||
expect(result.images[1].b64_json).toBeTruthy();
|
||||
});
|
||||
|
||||
tap.test('OpenAI Image: Low moderation setting', async () => {
|
||||
const result = await openaiProvider.imageGenerate({
|
||||
prompt: 'A fantasy battle scene with warriors and dragons',
|
||||
model: 'gpt-image-1',
|
||||
moderation: 'low',
|
||||
quality: 'medium'
|
||||
});
|
||||
|
||||
console.log('Low Moderation:');
|
||||
console.log('- Moderation: low (less restrictive filtering)');
|
||||
console.log('- Tokens used:', result.metadata?.tokensUsed);
|
||||
|
||||
await saveImageResult('low_moderation', result);
|
||||
|
||||
expect(result.images.length).toEqual(1);
|
||||
expect(result.images[0].b64_json).toBeTruthy();
|
||||
});
|
||||
|
||||
tap.test('OpenAI Image Editing: edit with gpt-image-1', async () => {
|
||||
// First, generate a base image
|
||||
const baseResult = await openaiProvider.imageGenerate({
|
||||
prompt: 'A simple white cat sitting on a red cushion',
|
||||
model: 'gpt-image-1',
|
||||
quality: 'low',
|
||||
size: '1024x1024'
|
||||
});
|
||||
|
||||
const baseImageBuffer = Buffer.from(baseResult.images[0].b64_json!, 'base64');
|
||||
|
||||
// Now edit it
|
||||
const editResult = await openaiProvider.imageEdit({
|
||||
image: baseImageBuffer,
|
||||
prompt: 'Change the cat to orange and add stylish sunglasses',
|
||||
model: 'gpt-image-1',
|
||||
quality: 'medium'
|
||||
});
|
||||
|
||||
console.log('Image Editing:');
|
||||
console.log('- Base image created');
|
||||
console.log('- Edit: change color and add sunglasses');
|
||||
console.log('- Result images:', editResult.images.length);
|
||||
|
||||
await saveImageResult('image_edit', editResult);
|
||||
|
||||
expect(editResult.images.length).toEqual(1);
|
||||
expect(editResult.images[0].b64_json).toBeTruthy();
|
||||
});
|
||||
|
||||
tap.test('OpenAI Image: should clean up provider', async () => {
|
||||
await openaiProvider.stop();
|
||||
console.log('OpenAI image provider stopped successfully');
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
35
test/test.image.ts
Normal file
35
test/test.image.ts
Normal file
@@ -0,0 +1,35 @@
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
import { generateImage } from '../ts_image/index.js';
|
||||
|
||||
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||
|
||||
tap.test('generateImage should return an image response', async () => {
|
||||
const apiKey = await testQenv.getEnvVarOnDemand('OPENAI_TOKEN');
|
||||
if (!apiKey) {
|
||||
console.log('OPENAI_TOKEN not set, skipping test');
|
||||
return;
|
||||
}
|
||||
|
||||
const result = await generateImage({
|
||||
apiKey,
|
||||
prompt: 'A simple red circle on a white background',
|
||||
model: 'gpt-image-1',
|
||||
size: '1024x1024',
|
||||
quality: 'low',
|
||||
n: 1,
|
||||
});
|
||||
|
||||
console.log('Image generation result: images count =', result.images.length);
|
||||
expect(result.images).toBeArray();
|
||||
expect(result.images.length).toBeGreaterThan(0);
|
||||
|
||||
const firstImage = result.images[0];
|
||||
// gpt-image-1 returns b64_json by default
|
||||
expect(firstImage.b64_json || firstImage.url).toBeTruthy();
|
||||
|
||||
expect(result.metadata).toBeTruthy();
|
||||
expect(result.metadata!.model).toEqual('gpt-image-1');
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -1,140 +0,0 @@
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as smartai from '../ts/index.js';
|
||||
|
||||
// Test interface exports and type checking
|
||||
// These tests verify that all interfaces are properly exported and usable
|
||||
|
||||
tap.test('Interfaces: ResearchOptions should be properly typed', async () => {
|
||||
const testOptions: smartai.ResearchOptions = {
|
||||
query: 'test query',
|
||||
searchDepth: 'basic',
|
||||
maxSources: 10,
|
||||
includeWebSearch: true,
|
||||
background: false
|
||||
};
|
||||
|
||||
expect(testOptions).toBeInstanceOf(Object);
|
||||
expect(testOptions.query).toEqual('test query');
|
||||
expect(testOptions.searchDepth).toEqual('basic');
|
||||
});
|
||||
|
||||
tap.test('Interfaces: ResearchResponse should be properly typed', async () => {
|
||||
const testResponse: smartai.ResearchResponse = {
|
||||
answer: 'test answer',
|
||||
sources: [
|
||||
{
|
||||
url: 'https://example.com',
|
||||
title: 'Example Source',
|
||||
snippet: 'This is a snippet'
|
||||
}
|
||||
],
|
||||
searchQueries: ['query1', 'query2'],
|
||||
metadata: {
|
||||
model: 'test-model',
|
||||
tokensUsed: 100
|
||||
}
|
||||
};
|
||||
|
||||
expect(testResponse).toBeInstanceOf(Object);
|
||||
expect(testResponse.answer).toEqual('test answer');
|
||||
expect(testResponse.sources).toBeArray();
|
||||
expect(testResponse.sources[0].url).toEqual('https://example.com');
|
||||
});
|
||||
|
||||
tap.test('Interfaces: ChatOptions should be properly typed', async () => {
|
||||
const testChatOptions: smartai.ChatOptions = {
|
||||
systemMessage: 'You are a helpful assistant',
|
||||
userMessage: 'Hello',
|
||||
messageHistory: [
|
||||
{ role: 'user', content: 'Previous message' },
|
||||
{ role: 'assistant', content: 'Previous response' }
|
||||
]
|
||||
};
|
||||
|
||||
expect(testChatOptions).toBeInstanceOf(Object);
|
||||
expect(testChatOptions.systemMessage).toBeTruthy();
|
||||
expect(testChatOptions.messageHistory).toBeArray();
|
||||
});
|
||||
|
||||
tap.test('Interfaces: ChatResponse should be properly typed', async () => {
|
||||
const testChatResponse: smartai.ChatResponse = {
|
||||
role: 'assistant',
|
||||
message: 'This is a response'
|
||||
};
|
||||
|
||||
expect(testChatResponse).toBeInstanceOf(Object);
|
||||
expect(testChatResponse.role).toEqual('assistant');
|
||||
expect(testChatResponse.message).toBeTruthy();
|
||||
});
|
||||
|
||||
tap.test('Interfaces: ChatMessage should be properly typed', async () => {
|
||||
const testMessage: smartai.ChatMessage = {
|
||||
role: 'user',
|
||||
content: 'Test message'
|
||||
};
|
||||
|
||||
expect(testMessage).toBeInstanceOf(Object);
|
||||
expect(testMessage.role).toBeOneOf(['user', 'assistant', 'system']);
|
||||
expect(testMessage.content).toBeTruthy();
|
||||
});
|
||||
|
||||
tap.test('Interfaces: Provider options should be properly typed', async () => {
|
||||
// OpenAI options
|
||||
const openaiOptions: smartai.IOpenaiProviderOptions = {
|
||||
openaiToken: 'test-token',
|
||||
chatModel: 'gpt-5-mini',
|
||||
audioModel: 'tts-1-hd',
|
||||
visionModel: '04-mini',
|
||||
researchModel: 'o4-mini-deep-research-2025-06-26',
|
||||
enableWebSearch: true
|
||||
};
|
||||
|
||||
expect(openaiOptions).toBeInstanceOf(Object);
|
||||
expect(openaiOptions.openaiToken).toBeTruthy();
|
||||
|
||||
// Anthropic options
|
||||
const anthropicOptions: smartai.IAnthropicProviderOptions = {
|
||||
anthropicToken: 'test-token',
|
||||
enableWebSearch: true,
|
||||
searchDomainAllowList: ['example.com'],
|
||||
searchDomainBlockList: ['blocked.com']
|
||||
};
|
||||
|
||||
expect(anthropicOptions).toBeInstanceOf(Object);
|
||||
expect(anthropicOptions.anthropicToken).toBeTruthy();
|
||||
});
|
||||
|
||||
tap.test('Interfaces: Search depth values should be valid', async () => {
|
||||
const validDepths: smartai.ResearchOptions['searchDepth'][] = ['basic', 'advanced', 'deep'];
|
||||
|
||||
for (const depth of validDepths) {
|
||||
const options: smartai.ResearchOptions = {
|
||||
query: 'test',
|
||||
searchDepth: depth
|
||||
};
|
||||
expect(options.searchDepth).toBeOneOf(['basic', 'advanced', 'deep', undefined]);
|
||||
}
|
||||
});
|
||||
|
||||
tap.test('Interfaces: Optional properties should work correctly', async () => {
|
||||
// Minimal ResearchOptions
|
||||
const minimalOptions: smartai.ResearchOptions = {
|
||||
query: 'test query'
|
||||
};
|
||||
|
||||
expect(minimalOptions.query).toBeTruthy();
|
||||
expect(minimalOptions.searchDepth).toBeUndefined();
|
||||
expect(minimalOptions.maxSources).toBeUndefined();
|
||||
|
||||
// Minimal ChatOptions
|
||||
const minimalChat: smartai.ChatOptions = {
|
||||
systemMessage: 'system',
|
||||
userMessage: 'user',
|
||||
messageHistory: []
|
||||
};
|
||||
|
||||
expect(minimalChat.messageHistory).toBeArray();
|
||||
expect(minimalChat.messageHistory.length).toEqual(0);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
390
test/test.ollama.ts
Normal file
390
test/test.ollama.ts
Normal file
@@ -0,0 +1,390 @@
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import { createOllamaModel } from '../ts/smartai.provider.ollama.js';
|
||||
import type { ISmartAiOptions } from '../ts/smartai.interfaces.js';
|
||||
|
||||
tap.test('createOllamaModel returns valid LanguageModelV3', async () => {
|
||||
const model = createOllamaModel({
|
||||
provider: 'ollama',
|
||||
model: 'qwen3:8b',
|
||||
ollamaOptions: { think: true, num_ctx: 4096 },
|
||||
});
|
||||
|
||||
expect(model.specificationVersion).toEqual('v3');
|
||||
expect(model.provider).toEqual('ollama');
|
||||
expect(model.modelId).toEqual('qwen3:8b');
|
||||
expect(model).toHaveProperty('doGenerate');
|
||||
expect(model).toHaveProperty('doStream');
|
||||
});
|
||||
|
||||
tap.test('Qwen models get default temperature 0.55', async () => {
|
||||
// Mock fetch to capture the request body
|
||||
const originalFetch = globalThis.fetch;
|
||||
let capturedBody: Record<string, unknown> | undefined;
|
||||
|
||||
globalThis.fetch = async (input: RequestInfo | URL, init?: RequestInit) => {
|
||||
capturedBody = JSON.parse(init?.body as string);
|
||||
return new Response(JSON.stringify({
|
||||
message: { content: 'test response', role: 'assistant' },
|
||||
done: true,
|
||||
prompt_eval_count: 10,
|
||||
eval_count: 5,
|
||||
}), { status: 200 });
|
||||
};
|
||||
|
||||
try {
|
||||
const model = createOllamaModel({
|
||||
provider: 'ollama',
|
||||
model: 'qwen3:8b',
|
||||
});
|
||||
|
||||
await model.doGenerate({
|
||||
prompt: [{ role: 'user', content: [{ type: 'text', text: 'hello' }] }],
|
||||
inputFormat: 'prompt',
|
||||
} as any);
|
||||
|
||||
expect(capturedBody).toBeTruthy();
|
||||
// Temperature 0.55 should be in the options
|
||||
expect((capturedBody!.options as Record<string, unknown>).temperature).toEqual(0.55);
|
||||
} finally {
|
||||
globalThis.fetch = originalFetch;
|
||||
}
|
||||
});
|
||||
|
||||
tap.test('think option is passed at top level of request body', async () => {
|
||||
const originalFetch = globalThis.fetch;
|
||||
let capturedBody: Record<string, unknown> | undefined;
|
||||
|
||||
globalThis.fetch = async (input: RequestInfo | URL, init?: RequestInit) => {
|
||||
capturedBody = JSON.parse(init?.body as string);
|
||||
return new Response(JSON.stringify({
|
||||
message: { content: 'test', role: 'assistant', thinking: 'let me think...' },
|
||||
done: true,
|
||||
prompt_eval_count: 10,
|
||||
eval_count: 5,
|
||||
}), { status: 200 });
|
||||
};
|
||||
|
||||
try {
|
||||
const model = createOllamaModel({
|
||||
provider: 'ollama',
|
||||
model: 'qwen3:8b',
|
||||
ollamaOptions: { think: true, num_ctx: 4096 },
|
||||
});
|
||||
|
||||
await model.doGenerate({
|
||||
prompt: [{ role: 'user', content: [{ type: 'text', text: 'hello' }] }],
|
||||
inputFormat: 'prompt',
|
||||
} as any);
|
||||
|
||||
expect(capturedBody).toBeTruthy();
|
||||
// think should be at top level, not inside options
|
||||
expect(capturedBody!.think).toEqual(true);
|
||||
// num_ctx should be in options
|
||||
expect((capturedBody!.options as Record<string, unknown>).num_ctx).toEqual(4096);
|
||||
} finally {
|
||||
globalThis.fetch = originalFetch;
|
||||
}
|
||||
});
|
||||
|
||||
tap.test('Non-qwen models do not get default temperature', async () => {
|
||||
const originalFetch = globalThis.fetch;
|
||||
let capturedBody: Record<string, unknown> | undefined;
|
||||
|
||||
globalThis.fetch = async (input: RequestInfo | URL, init?: RequestInit) => {
|
||||
capturedBody = JSON.parse(init?.body as string);
|
||||
return new Response(JSON.stringify({
|
||||
message: { content: 'test', role: 'assistant' },
|
||||
done: true,
|
||||
}), { status: 200 });
|
||||
};
|
||||
|
||||
try {
|
||||
const model = createOllamaModel({
|
||||
provider: 'ollama',
|
||||
model: 'llama3:8b',
|
||||
});
|
||||
|
||||
await model.doGenerate({
|
||||
prompt: [{ role: 'user', content: [{ type: 'text', text: 'hello' }] }],
|
||||
inputFormat: 'prompt',
|
||||
} as any);
|
||||
|
||||
expect(capturedBody).toBeTruthy();
|
||||
// No temperature should be set
|
||||
expect((capturedBody!.options as Record<string, unknown>).temperature).toBeUndefined();
|
||||
} finally {
|
||||
globalThis.fetch = originalFetch;
|
||||
}
|
||||
});
|
||||
|
||||
tap.test('doGenerate parses reasoning/thinking from response', async () => {
|
||||
const originalFetch = globalThis.fetch;
|
||||
|
||||
globalThis.fetch = async (input: RequestInfo | URL, init?: RequestInit) => {
|
||||
return new Response(JSON.stringify({
|
||||
message: {
|
||||
content: 'The answer is 42.',
|
||||
role: 'assistant',
|
||||
thinking: 'Let me reason about this carefully...',
|
||||
},
|
||||
done: true,
|
||||
prompt_eval_count: 20,
|
||||
eval_count: 15,
|
||||
}), { status: 200 });
|
||||
};
|
||||
|
||||
try {
|
||||
const model = createOllamaModel({
|
||||
provider: 'ollama',
|
||||
model: 'qwen3:8b',
|
||||
ollamaOptions: { think: true },
|
||||
});
|
||||
|
||||
const result = await model.doGenerate({
|
||||
prompt: [{ role: 'user', content: [{ type: 'text', text: 'What is the meaning of life?' }] }],
|
||||
} as any);
|
||||
|
||||
// Should have both reasoning and text content
|
||||
const reasoningParts = result.content.filter(c => c.type === 'reasoning');
|
||||
const textParts = result.content.filter(c => c.type === 'text');
|
||||
|
||||
expect(reasoningParts.length).toEqual(1);
|
||||
expect((reasoningParts[0] as any).text).toEqual('Let me reason about this carefully...');
|
||||
expect(textParts.length).toEqual(1);
|
||||
expect((textParts[0] as any).text).toEqual('The answer is 42.');
|
||||
expect(result.finishReason.unified).toEqual('stop');
|
||||
} finally {
|
||||
globalThis.fetch = originalFetch;
|
||||
}
|
||||
});
|
||||
|
||||
tap.test('doGenerate parses tool calls from response', async () => {
|
||||
const originalFetch = globalThis.fetch;
|
||||
|
||||
globalThis.fetch = async (input: RequestInfo | URL, init?: RequestInit) => {
|
||||
return new Response(JSON.stringify({
|
||||
message: {
|
||||
content: '',
|
||||
role: 'assistant',
|
||||
tool_calls: [
|
||||
{
|
||||
function: {
|
||||
name: 'get_weather',
|
||||
arguments: { location: 'London', unit: 'celsius' },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
done: true,
|
||||
prompt_eval_count: 30,
|
||||
eval_count: 10,
|
||||
}), { status: 200 });
|
||||
};
|
||||
|
||||
try {
|
||||
const model = createOllamaModel({
|
||||
provider: 'ollama',
|
||||
model: 'qwen3:8b',
|
||||
});
|
||||
|
||||
const result = await model.doGenerate({
|
||||
prompt: [{ role: 'user', content: [{ type: 'text', text: 'What is the weather in London?' }] }],
|
||||
tools: [{
|
||||
type: 'function' as const,
|
||||
name: 'get_weather',
|
||||
description: 'Get weather for a location',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
location: { type: 'string' },
|
||||
unit: { type: 'string' },
|
||||
},
|
||||
},
|
||||
}],
|
||||
} as any);
|
||||
|
||||
const toolCalls = result.content.filter(c => c.type === 'tool-call');
|
||||
expect(toolCalls.length).toEqual(1);
|
||||
expect((toolCalls[0] as any).toolName).toEqual('get_weather');
|
||||
expect(JSON.parse((toolCalls[0] as any).input)).toEqual({ location: 'London', unit: 'celsius' });
|
||||
expect(result.finishReason.unified).toEqual('tool-calls');
|
||||
} finally {
|
||||
globalThis.fetch = originalFetch;
|
||||
}
|
||||
});
|
||||
|
||||
tap.test('doStream produces correct stream parts', async () => {
|
||||
const originalFetch = globalThis.fetch;
|
||||
|
||||
// Simulate Ollama's newline-delimited JSON streaming
|
||||
const chunks = [
|
||||
JSON.stringify({ message: { content: 'Hello', role: 'assistant' }, done: false }) + '\n',
|
||||
JSON.stringify({ message: { content: ' world', role: 'assistant' }, done: false }) + '\n',
|
||||
JSON.stringify({ message: { content: '!', role: 'assistant' }, done: true, prompt_eval_count: 5, eval_count: 3 }) + '\n',
|
||||
];
|
||||
|
||||
globalThis.fetch = async (input: RequestInfo | URL, init?: RequestInit) => {
|
||||
const encoder = new TextEncoder();
|
||||
const stream = new ReadableStream({
|
||||
start(controller) {
|
||||
for (const chunk of chunks) {
|
||||
controller.enqueue(encoder.encode(chunk));
|
||||
}
|
||||
controller.close();
|
||||
},
|
||||
});
|
||||
return new Response(stream, { status: 200 });
|
||||
};
|
||||
|
||||
try {
|
||||
const model = createOllamaModel({
|
||||
provider: 'ollama',
|
||||
model: 'llama3:8b',
|
||||
});
|
||||
|
||||
const result = await model.doStream({
|
||||
prompt: [{ role: 'user', content: [{ type: 'text', text: 'hello' }] }],
|
||||
} as any);
|
||||
|
||||
const parts: any[] = [];
|
||||
const reader = result.stream.getReader();
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
parts.push(value);
|
||||
}
|
||||
|
||||
// Should have: text-start, text-delta x3, text-end, finish
|
||||
const textDeltas = parts.filter(p => p.type === 'text-delta');
|
||||
const finishParts = parts.filter(p => p.type === 'finish');
|
||||
const textStarts = parts.filter(p => p.type === 'text-start');
|
||||
const textEnds = parts.filter(p => p.type === 'text-end');
|
||||
|
||||
expect(textStarts.length).toEqual(1);
|
||||
expect(textDeltas.length).toEqual(3);
|
||||
expect(textDeltas.map((d: any) => d.delta).join('')).toEqual('Hello world!');
|
||||
expect(textEnds.length).toEqual(1);
|
||||
expect(finishParts.length).toEqual(1);
|
||||
expect(finishParts[0].finishReason.unified).toEqual('stop');
|
||||
} finally {
|
||||
globalThis.fetch = originalFetch;
|
||||
}
|
||||
});
|
||||
|
||||
tap.test('doStream handles thinking/reasoning in stream', async () => {
|
||||
const originalFetch = globalThis.fetch;
|
||||
|
||||
const chunks = [
|
||||
JSON.stringify({ message: { thinking: 'Let me think...', content: '', role: 'assistant' }, done: false }) + '\n',
|
||||
JSON.stringify({ message: { thinking: ' about this.', content: '', role: 'assistant' }, done: false }) + '\n',
|
||||
JSON.stringify({ message: { content: 'The answer.', role: 'assistant' }, done: false }) + '\n',
|
||||
JSON.stringify({ message: { content: '', role: 'assistant' }, done: true, prompt_eval_count: 10, eval_count: 8 }) + '\n',
|
||||
];
|
||||
|
||||
globalThis.fetch = async (input: RequestInfo | URL, init?: RequestInit) => {
|
||||
const encoder = new TextEncoder();
|
||||
const stream = new ReadableStream({
|
||||
start(controller) {
|
||||
for (const chunk of chunks) {
|
||||
controller.enqueue(encoder.encode(chunk));
|
||||
}
|
||||
controller.close();
|
||||
},
|
||||
});
|
||||
return new Response(stream, { status: 200 });
|
||||
};
|
||||
|
||||
try {
|
||||
const model = createOllamaModel({
|
||||
provider: 'ollama',
|
||||
model: 'qwen3:8b',
|
||||
ollamaOptions: { think: true },
|
||||
});
|
||||
|
||||
const result = await model.doStream({
|
||||
prompt: [{ role: 'user', content: [{ type: 'text', text: 'think about this' }] }],
|
||||
} as any);
|
||||
|
||||
const parts: any[] = [];
|
||||
const reader = result.stream.getReader();
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
parts.push(value);
|
||||
}
|
||||
|
||||
const reasoningStarts = parts.filter(p => p.type === 'reasoning-start');
|
||||
const reasoningDeltas = parts.filter(p => p.type === 'reasoning-delta');
|
||||
const reasoningEnds = parts.filter(p => p.type === 'reasoning-end');
|
||||
const textDeltas = parts.filter(p => p.type === 'text-delta');
|
||||
|
||||
expect(reasoningStarts.length).toEqual(1);
|
||||
expect(reasoningDeltas.length).toEqual(2);
|
||||
expect(reasoningDeltas.map((d: any) => d.delta).join('')).toEqual('Let me think... about this.');
|
||||
expect(reasoningEnds.length).toEqual(1);
|
||||
expect(textDeltas.length).toEqual(1);
|
||||
expect(textDeltas[0].delta).toEqual('The answer.');
|
||||
} finally {
|
||||
globalThis.fetch = originalFetch;
|
||||
}
|
||||
});
|
||||
|
||||
tap.test('message conversion handles system, assistant, and tool messages', async () => {
|
||||
const originalFetch = globalThis.fetch;
|
||||
let capturedBody: Record<string, unknown> | undefined;
|
||||
|
||||
globalThis.fetch = async (input: RequestInfo | URL, init?: RequestInit) => {
|
||||
capturedBody = JSON.parse(init?.body as string);
|
||||
return new Response(JSON.stringify({
|
||||
message: { content: 'response', role: 'assistant' },
|
||||
done: true,
|
||||
}), { status: 200 });
|
||||
};
|
||||
|
||||
try {
|
||||
const model = createOllamaModel({
|
||||
provider: 'ollama',
|
||||
model: 'llama3:8b',
|
||||
});
|
||||
|
||||
await model.doGenerate({
|
||||
prompt: [
|
||||
{ role: 'system', content: 'You are helpful.' },
|
||||
{ role: 'user', content: [{ type: 'text', text: 'Hi' }] },
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{ type: 'text', text: 'Let me check.' },
|
||||
{ type: 'tool-call', toolCallId: 'tc1', toolName: 'search', input: '{"q":"test"}' },
|
||||
],
|
||||
},
|
||||
{
|
||||
role: 'tool',
|
||||
content: [
|
||||
{ type: 'tool-result', toolCallId: 'tc1', output: { type: 'text', value: 'result data' } },
|
||||
],
|
||||
},
|
||||
{ role: 'user', content: [{ type: 'text', text: 'What did you find?' }] },
|
||||
],
|
||||
} as any);
|
||||
|
||||
const messages = capturedBody!.messages as Array<Record<string, unknown>>;
|
||||
expect(messages.length).toEqual(5);
|
||||
expect(messages[0].role).toEqual('system');
|
||||
expect(messages[0].content).toEqual('You are helpful.');
|
||||
expect(messages[1].role).toEqual('user');
|
||||
expect(messages[1].content).toEqual('Hi');
|
||||
expect(messages[2].role).toEqual('assistant');
|
||||
expect(messages[2].content).toEqual('Let me check.');
|
||||
expect((messages[2].tool_calls as any[]).length).toEqual(1);
|
||||
expect((messages[2].tool_calls as any[])[0].function.name).toEqual('search');
|
||||
expect(messages[3].role).toEqual('tool');
|
||||
expect(messages[3].content).toEqual('result data');
|
||||
expect(messages[4].role).toEqual('user');
|
||||
expect(messages[4].content).toEqual('What did you find?');
|
||||
} finally {
|
||||
globalThis.fetch = originalFetch;
|
||||
}
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -1,223 +0,0 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
import * as smartai from '../ts/index.js';
|
||||
import * as path from 'path';
|
||||
import { promises as fs } from 'fs';
|
||||
|
||||
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||
|
||||
// Helper function to save research results
|
||||
async function saveResearchResult(testName: string, result: any) {
|
||||
const sanitizedName = testName.replace(/[^a-z0-9]/gi, '_').toLowerCase();
|
||||
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
|
||||
const filename = `${sanitizedName}_${timestamp}.json`;
|
||||
const filepath = path.join('.nogit', 'testresults', 'research', filename);
|
||||
|
||||
await fs.mkdir(path.dirname(filepath), { recursive: true });
|
||||
await fs.writeFile(filepath, JSON.stringify(result, null, 2), 'utf-8');
|
||||
|
||||
console.log(` 💾 Saved to: ${filepath}`);
|
||||
}
|
||||
|
||||
let anthropicProvider: smartai.AnthropicProvider;
|
||||
|
||||
tap.test('Anthropic Research: should initialize provider with web search', async () => {
|
||||
anthropicProvider = new smartai.AnthropicProvider({
|
||||
anthropicToken: await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN'),
|
||||
enableWebSearch: true
|
||||
});
|
||||
|
||||
await anthropicProvider.start();
|
||||
expect(anthropicProvider).toBeInstanceOf(smartai.AnthropicProvider);
|
||||
expect(typeof anthropicProvider.research).toEqual('function');
|
||||
});
|
||||
|
||||
tap.test('Anthropic Research: should perform basic research query', async () => {
|
||||
const result = await anthropicProvider.research({
|
||||
query: 'What is machine learning and its main applications?',
|
||||
searchDepth: 'basic'
|
||||
});
|
||||
|
||||
console.log('Anthropic Basic Research:');
|
||||
console.log('- Answer length:', result.answer.length);
|
||||
console.log('- Sources found:', result.sources.length);
|
||||
console.log('- First 200 chars:', result.answer.substring(0, 200));
|
||||
|
||||
await saveResearchResult('basic_research_machine_learning', result);
|
||||
|
||||
expect(result).toBeTruthy();
|
||||
expect(result.answer).toBeTruthy();
|
||||
expect(result.answer.toLowerCase()).toInclude('machine learning');
|
||||
expect(result.sources).toBeArray();
|
||||
expect(result.metadata).toBeTruthy();
|
||||
});
|
||||
|
||||
tap.test('Anthropic Research: should perform research with web search', async () => {
|
||||
const result = await anthropicProvider.research({
|
||||
query: 'What are the latest developments in renewable energy technology?',
|
||||
searchDepth: 'advanced',
|
||||
includeWebSearch: true,
|
||||
maxSources: 5
|
||||
});
|
||||
|
||||
console.log('Anthropic Web Search Research:');
|
||||
console.log('- Answer length:', result.answer.length);
|
||||
console.log('- Sources:', result.sources.length);
|
||||
if (result.searchQueries) {
|
||||
console.log('- Search queries:', result.searchQueries);
|
||||
}
|
||||
|
||||
await saveResearchResult('web_search_renewable_energy', result);
|
||||
|
||||
expect(result.answer).toBeTruthy();
|
||||
expect(result.answer.toLowerCase()).toInclude('renewable');
|
||||
|
||||
// Check if sources were extracted
|
||||
if (result.sources.length > 0) {
|
||||
console.log('- Example source:', result.sources[0]);
|
||||
expect(result.sources[0]).toHaveProperty('url');
|
||||
}
|
||||
});
|
||||
|
||||
tap.test('Anthropic Research: should handle deep research queries', async () => {
|
||||
const result = await anthropicProvider.research({
|
||||
query: 'Explain the differences between REST and GraphQL APIs',
|
||||
searchDepth: 'deep'
|
||||
});
|
||||
|
||||
console.log('Anthropic Deep Research:');
|
||||
console.log('- Answer length:', result.answer.length);
|
||||
console.log('- Token usage:', result.metadata?.tokensUsed);
|
||||
|
||||
await saveResearchResult('deep_research_rest_vs_graphql', result);
|
||||
|
||||
expect(result.answer).toBeTruthy();
|
||||
expect(result.answer.length).toBeGreaterThan(300);
|
||||
expect(result.answer.toLowerCase()).toInclude('rest');
|
||||
expect(result.answer.toLowerCase()).toInclude('graphql');
|
||||
});
|
||||
|
||||
tap.test('Anthropic Research: should extract citations from response', async () => {
|
||||
const result = await anthropicProvider.research({
|
||||
query: 'What is Docker and how does containerization work?',
|
||||
searchDepth: 'basic',
|
||||
maxSources: 3
|
||||
});
|
||||
|
||||
console.log('Anthropic Citation Extraction:');
|
||||
console.log('- Sources found:', result.sources.length);
|
||||
console.log('- Answer includes Docker:', result.answer.toLowerCase().includes('docker'));
|
||||
|
||||
await saveResearchResult('citation_extraction_docker', result);
|
||||
|
||||
expect(result.answer).toInclude('Docker');
|
||||
|
||||
// Check for URL extraction (both markdown and plain URLs)
|
||||
const hasUrls = result.answer.includes('http') || result.sources.length > 0;
|
||||
console.log('- Contains URLs or sources:', hasUrls);
|
||||
});
|
||||
|
||||
tap.test('Anthropic Research: should use domain filtering when configured', async () => {
|
||||
// Create a new provider with domain restrictions
|
||||
const filteredProvider = new smartai.AnthropicProvider({
|
||||
anthropicToken: await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN'),
|
||||
enableWebSearch: true,
|
||||
searchDomainAllowList: ['wikipedia.org', 'docs.microsoft.com'],
|
||||
searchDomainBlockList: ['reddit.com']
|
||||
});
|
||||
|
||||
await filteredProvider.start();
|
||||
|
||||
const result = await filteredProvider.research({
|
||||
query: 'What is JavaScript?',
|
||||
searchDepth: 'basic'
|
||||
});
|
||||
|
||||
console.log('Anthropic Domain Filtering Test:');
|
||||
console.log('- Answer length:', result.answer.length);
|
||||
console.log('- Applied domain filters (allow: wikipedia, docs.microsoft)');
|
||||
|
||||
await saveResearchResult('domain_filtering_javascript', result);
|
||||
|
||||
expect(result.answer).toBeTruthy();
|
||||
expect(result.answer.toLowerCase()).toInclude('javascript');
|
||||
|
||||
await filteredProvider.stop();
|
||||
});
|
||||
|
||||
tap.test('Anthropic Research: should handle errors gracefully', async () => {
|
||||
let errorCaught = false;
|
||||
|
||||
try {
|
||||
await anthropicProvider.research({
|
||||
query: '', // Empty query
|
||||
searchDepth: 'basic'
|
||||
});
|
||||
} catch (error) {
|
||||
errorCaught = true;
|
||||
console.log('Expected error for empty query:', error.message.substring(0, 100));
|
||||
}
|
||||
|
||||
// Anthropic might handle empty queries differently
|
||||
console.log(`Empty query error test - Error caught: ${errorCaught}`);
|
||||
});
|
||||
|
||||
tap.test('Anthropic Research: should handle different search depths', async () => {
|
||||
// Test basic search depth
|
||||
const basicResult = await anthropicProvider.research({
|
||||
query: 'What is Python?',
|
||||
searchDepth: 'basic'
|
||||
});
|
||||
|
||||
// Test advanced search depth
|
||||
const advancedResult = await anthropicProvider.research({
|
||||
query: 'What is Python?',
|
||||
searchDepth: 'advanced'
|
||||
});
|
||||
|
||||
console.log('Anthropic Search Depth Comparison:');
|
||||
console.log('- Basic answer length:', basicResult.answer.length);
|
||||
console.log('- Advanced answer length:', advancedResult.answer.length);
|
||||
console.log('- Basic tokens:', basicResult.metadata?.tokensUsed);
|
||||
console.log('- Advanced tokens:', advancedResult.metadata?.tokensUsed);
|
||||
|
||||
await saveResearchResult('search_depth_python_basic', basicResult);
|
||||
await saveResearchResult('search_depth_python_advanced', advancedResult);
|
||||
|
||||
expect(basicResult.answer).toBeTruthy();
|
||||
expect(advancedResult.answer).toBeTruthy();
|
||||
|
||||
// Advanced search typically produces longer answers
|
||||
// But this isn't guaranteed, so we just check they exist
|
||||
expect(basicResult.answer.toLowerCase()).toInclude('python');
|
||||
expect(advancedResult.answer.toLowerCase()).toInclude('python');
|
||||
});
|
||||
|
||||
tap.test('Anthropic Research: ARM vs. Qualcomm comparison', async () => {
|
||||
const result = await anthropicProvider.research({
|
||||
query: 'Compare ARM and Qualcomm: their technologies, market positions, and recent developments in the mobile and computing sectors',
|
||||
searchDepth: 'advanced',
|
||||
includeWebSearch: true,
|
||||
maxSources: 10
|
||||
});
|
||||
|
||||
console.log('ARM vs. Qualcomm Research:');
|
||||
console.log('- Answer length:', result.answer.length);
|
||||
console.log('- Sources found:', result.sources.length);
|
||||
console.log('- First 300 chars:', result.answer.substring(0, 300));
|
||||
|
||||
await saveResearchResult('arm_vs_qualcomm_comparison', result);
|
||||
|
||||
expect(result.answer).toBeTruthy();
|
||||
expect(result.answer.length).toBeGreaterThan(500);
|
||||
expect(result.answer.toLowerCase()).toInclude('arm');
|
||||
expect(result.answer.toLowerCase()).toInclude('qualcomm');
|
||||
expect(result.sources.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
tap.test('Anthropic Research: should clean up provider', async () => {
|
||||
await anthropicProvider.stop();
|
||||
console.log('Anthropic research provider stopped successfully');
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -1,172 +0,0 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
import * as smartai from '../ts/index.js';
|
||||
import * as path from 'path';
|
||||
import { promises as fs } from 'fs';
|
||||
|
||||
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||
|
||||
// Helper function to save research results
|
||||
async function saveResearchResult(testName: string, result: any) {
|
||||
const sanitizedName = testName.replace(/[^a-z0-9]/gi, '_').toLowerCase();
|
||||
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
|
||||
const filename = `openai_${sanitizedName}_${timestamp}.json`;
|
||||
const filepath = path.join('.nogit', 'testresults', 'research', filename);
|
||||
|
||||
await fs.mkdir(path.dirname(filepath), { recursive: true });
|
||||
await fs.writeFile(filepath, JSON.stringify(result, null, 2), 'utf-8');
|
||||
|
||||
console.log(` 💾 Saved to: ${filepath}`);
|
||||
}
|
||||
|
||||
let openaiProvider: smartai.OpenAiProvider;
|
||||
|
||||
tap.test('OpenAI Research: should initialize provider with research capabilities', async () => {
|
||||
openaiProvider = new smartai.OpenAiProvider({
|
||||
openaiToken: await testQenv.getEnvVarOnDemand('OPENAI_TOKEN'),
|
||||
researchModel: 'o4-mini-deep-research-2025-06-26',
|
||||
enableWebSearch: true
|
||||
});
|
||||
|
||||
await openaiProvider.start();
|
||||
expect(openaiProvider).toBeInstanceOf(smartai.OpenAiProvider);
|
||||
expect(typeof openaiProvider.research).toEqual('function');
|
||||
});
|
||||
|
||||
tap.test('OpenAI Research: should perform basic research query', async () => {
|
||||
const result = await openaiProvider.research({
|
||||
query: 'What is TypeScript and why is it useful for web development?',
|
||||
searchDepth: 'basic'
|
||||
});
|
||||
|
||||
console.log('OpenAI Basic Research:');
|
||||
console.log('- Answer length:', result.answer.length);
|
||||
console.log('- Sources found:', result.sources.length);
|
||||
console.log('- First 200 chars:', result.answer.substring(0, 200));
|
||||
|
||||
await saveResearchResult('basic_research_typescript', result);
|
||||
|
||||
expect(result).toBeTruthy();
|
||||
expect(result.answer).toBeTruthy();
|
||||
expect(result.answer.toLowerCase()).toInclude('typescript');
|
||||
expect(result.sources).toBeArray();
|
||||
expect(result.metadata).toBeTruthy();
|
||||
expect(result.metadata.model).toBeTruthy();
|
||||
});
|
||||
|
||||
tap.test('OpenAI Research: should perform research with web search enabled', async () => {
|
||||
const result = await openaiProvider.research({
|
||||
query: 'What are the latest features in ECMAScript 2024?',
|
||||
searchDepth: 'advanced',
|
||||
includeWebSearch: true,
|
||||
maxSources: 5
|
||||
});
|
||||
|
||||
console.log('OpenAI Web Search Research:');
|
||||
console.log('- Answer length:', result.answer.length);
|
||||
console.log('- Sources:', result.sources.length);
|
||||
if (result.searchQueries) {
|
||||
console.log('- Search queries used:', result.searchQueries);
|
||||
}
|
||||
|
||||
await saveResearchResult('web_search_ecmascript', result);
|
||||
|
||||
expect(result.answer).toBeTruthy();
|
||||
expect(result.answer.toLowerCase()).toInclude('ecmascript');
|
||||
|
||||
// The model might include sources or search queries
|
||||
if (result.sources.length > 0) {
|
||||
expect(result.sources[0]).toHaveProperty('url');
|
||||
expect(result.sources[0]).toHaveProperty('title');
|
||||
}
|
||||
});
|
||||
|
||||
tap.test('OpenAI Research: should handle deep research for complex topics', async () => {
|
||||
// Skip this test if it takes too long or costs too much
|
||||
// You can enable it for thorough testing
|
||||
const skipDeepResearch = true;
|
||||
|
||||
if (skipDeepResearch) {
|
||||
console.log('Skipping deep research test to save API costs');
|
||||
return;
|
||||
}
|
||||
|
||||
const result = await openaiProvider.research({
|
||||
query: 'Compare the pros and cons of microservices vs monolithic architecture',
|
||||
searchDepth: 'deep',
|
||||
includeWebSearch: true
|
||||
});
|
||||
|
||||
console.log('OpenAI Deep Research:');
|
||||
console.log('- Answer length:', result.answer.length);
|
||||
console.log('- Token usage:', result.metadata?.tokensUsed);
|
||||
|
||||
expect(result.answer).toBeTruthy();
|
||||
expect(result.answer.length).toBeGreaterThan(500);
|
||||
expect(result.answer.toLowerCase()).toInclude('microservices');
|
||||
expect(result.answer.toLowerCase()).toInclude('monolithic');
|
||||
});
|
||||
|
||||
tap.test('OpenAI Research: should extract sources from markdown links', async () => {
|
||||
const result = await openaiProvider.research({
|
||||
query: 'What is Node.js and provide some official documentation links?',
|
||||
searchDepth: 'basic',
|
||||
maxSources: 3
|
||||
});
|
||||
|
||||
console.log('OpenAI Source Extraction:');
|
||||
console.log('- Sources found:', result.sources.length);
|
||||
|
||||
await saveResearchResult('source_extraction_nodejs', result);
|
||||
|
||||
if (result.sources.length > 0) {
|
||||
console.log('- Example source:', result.sources[0]);
|
||||
expect(result.sources[0].url).toBeTruthy();
|
||||
expect(result.sources[0].title).toBeTruthy();
|
||||
}
|
||||
|
||||
expect(result.answer).toInclude('Node.js');
|
||||
});
|
||||
|
||||
tap.test('OpenAI Research: should handle research errors gracefully', async () => {
|
||||
// Test with an extremely long query that might cause issues
|
||||
const longQuery = 'a'.repeat(10000);
|
||||
|
||||
let errorCaught = false;
|
||||
try {
|
||||
await openaiProvider.research({
|
||||
query: longQuery,
|
||||
searchDepth: 'basic'
|
||||
});
|
||||
} catch (error) {
|
||||
errorCaught = true;
|
||||
console.log('Expected error for long query:', error.message.substring(0, 100));
|
||||
expect(error.message).toBeTruthy();
|
||||
}
|
||||
|
||||
// OpenAI might handle long queries, so we don't assert the error
|
||||
console.log(`Long query error test - Error caught: ${errorCaught}`);
|
||||
});
|
||||
|
||||
tap.test('OpenAI Research: should respect maxSources parameter', async () => {
|
||||
const maxSources = 3;
|
||||
const result = await openaiProvider.research({
|
||||
query: 'List popular JavaScript frameworks',
|
||||
searchDepth: 'basic',
|
||||
maxSources: maxSources
|
||||
});
|
||||
|
||||
console.log(`OpenAI Max Sources Test - Requested: ${maxSources}, Found: ${result.sources.length}`);
|
||||
|
||||
// The API might not always return exactly maxSources, but should respect it as a limit
|
||||
if (result.sources.length > 0) {
|
||||
expect(result.sources.length).toBeLessThanOrEqual(maxSources * 2); // Allow some flexibility
|
||||
}
|
||||
});
|
||||
|
||||
tap.test('OpenAI Research: should clean up provider', async () => {
|
||||
await openaiProvider.stop();
|
||||
console.log('OpenAI research provider stopped successfully');
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -1,80 +0,0 @@
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as smartai from '../ts/index.js';
|
||||
|
||||
// Test research method stubs for providers without full implementation
|
||||
// These providers have research methods that throw "not yet supported" errors
|
||||
|
||||
tap.test('Research Stubs: Perplexity provider should have research method', async () => {
|
||||
const perplexityProvider = new smartai.PerplexityProvider({
|
||||
perplexityToken: 'test-token'
|
||||
});
|
||||
|
||||
// Perplexity has a basic implementation with Sonar models
|
||||
expect(typeof perplexityProvider.research).toEqual('function');
|
||||
});
|
||||
|
||||
tap.test('Research Stubs: Groq provider should throw not supported error', async () => {
|
||||
const groqProvider = new smartai.GroqProvider({
|
||||
groqToken: 'test-token'
|
||||
});
|
||||
|
||||
expect(typeof groqProvider.research).toEqual('function');
|
||||
|
||||
let errorCaught = false;
|
||||
try {
|
||||
await groqProvider.research({ query: 'test' });
|
||||
} catch (error) {
|
||||
errorCaught = true;
|
||||
expect(error.message).toInclude('not yet supported');
|
||||
}
|
||||
expect(errorCaught).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('Research Stubs: Ollama provider should throw not supported error', async () => {
|
||||
const ollamaProvider = new smartai.OllamaProvider({});
|
||||
|
||||
expect(typeof ollamaProvider.research).toEqual('function');
|
||||
|
||||
let errorCaught = false;
|
||||
try {
|
||||
await ollamaProvider.research({ query: 'test' });
|
||||
} catch (error) {
|
||||
errorCaught = true;
|
||||
expect(error.message).toInclude('not yet supported');
|
||||
}
|
||||
expect(errorCaught).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('Research Stubs: xAI provider should throw not supported error', async () => {
|
||||
const xaiProvider = new smartai.XAIProvider({
|
||||
xaiToken: 'test-token'
|
||||
});
|
||||
|
||||
expect(typeof xaiProvider.research).toEqual('function');
|
||||
|
||||
let errorCaught = false;
|
||||
try {
|
||||
await xaiProvider.research({ query: 'test' });
|
||||
} catch (error) {
|
||||
errorCaught = true;
|
||||
expect(error.message).toInclude('not yet supported');
|
||||
}
|
||||
expect(errorCaught).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('Research Stubs: Exo provider should throw not supported error', async () => {
|
||||
const exoProvider = new smartai.ExoProvider({});
|
||||
|
||||
expect(typeof exoProvider.research).toEqual('function');
|
||||
|
||||
let errorCaught = false;
|
||||
try {
|
||||
await exoProvider.research({ query: 'test' });
|
||||
} catch (error) {
|
||||
errorCaught = true;
|
||||
expect(error.message).toInclude('not yet supported');
|
||||
}
|
||||
expect(errorCaught).toBeTrue();
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
31
test/test.research.ts
Normal file
31
test/test.research.ts
Normal file
@@ -0,0 +1,31 @@
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
import { research } from '../ts_research/index.js';
|
||||
|
||||
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||
|
||||
tap.test('research should return answer and sources', async () => {
|
||||
const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN');
|
||||
if (!apiKey) {
|
||||
console.log('ANTHROPIC_TOKEN not set, skipping test');
|
||||
return;
|
||||
}
|
||||
|
||||
const result = await research({
|
||||
apiKey,
|
||||
query: 'What is the current version of Node.js?',
|
||||
searchDepth: 'basic',
|
||||
});
|
||||
|
||||
console.log('Research answer:', result.answer.substring(0, 200));
|
||||
console.log('Research sources:', result.sources.length);
|
||||
if (result.searchQueries) {
|
||||
console.log('Search queries:', result.searchQueries);
|
||||
}
|
||||
|
||||
expect(result.answer).toBeTruthy();
|
||||
expect(result.answer.length).toBeGreaterThan(10);
|
||||
expect(result.sources).toBeArray();
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
161
test/test.smartai.ts
Normal file
161
test/test.smartai.ts
Normal file
@@ -0,0 +1,161 @@
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
import * as smartai from '../ts/index.js';
|
||||
|
||||
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||
|
||||
tap.test('getModel should return a LanguageModelV3 for anthropic', async () => {
|
||||
const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN');
|
||||
if (!apiKey) {
|
||||
console.log('ANTHROPIC_TOKEN not set, skipping test');
|
||||
return;
|
||||
}
|
||||
|
||||
const model = smartai.getModel({
|
||||
provider: 'anthropic',
|
||||
model: 'claude-sonnet-4-5-20250929',
|
||||
apiKey,
|
||||
});
|
||||
|
||||
expect(model).toHaveProperty('specificationVersion');
|
||||
expect(model).toHaveProperty('provider');
|
||||
expect(model).toHaveProperty('modelId');
|
||||
expect(model).toHaveProperty('doGenerate');
|
||||
expect(model).toHaveProperty('doStream');
|
||||
});
|
||||
|
||||
tap.test('getModel with anthropic prompt caching returns wrapped model', async () => {
|
||||
const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN');
|
||||
if (!apiKey) {
|
||||
console.log('ANTHROPIC_TOKEN not set, skipping test');
|
||||
return;
|
||||
}
|
||||
|
||||
// Default: prompt caching enabled
|
||||
const model = smartai.getModel({
|
||||
provider: 'anthropic',
|
||||
model: 'claude-sonnet-4-5-20250929',
|
||||
apiKey,
|
||||
});
|
||||
|
||||
// With caching disabled
|
||||
const modelNoCaching = smartai.getModel({
|
||||
provider: 'anthropic',
|
||||
model: 'claude-sonnet-4-5-20250929',
|
||||
apiKey,
|
||||
promptCaching: false,
|
||||
});
|
||||
|
||||
// Both should be valid models
|
||||
expect(model).toHaveProperty('doGenerate');
|
||||
expect(modelNoCaching).toHaveProperty('doGenerate');
|
||||
});
|
||||
|
||||
tap.test('generateText with anthropic model', async () => {
|
||||
const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN');
|
||||
if (!apiKey) {
|
||||
console.log('ANTHROPIC_TOKEN not set, skipping test');
|
||||
return;
|
||||
}
|
||||
|
||||
const model = smartai.getModel({
|
||||
provider: 'anthropic',
|
||||
model: 'claude-sonnet-4-5-20250929',
|
||||
apiKey,
|
||||
});
|
||||
|
||||
const result = await smartai.generateText({
|
||||
model,
|
||||
prompt: 'Say hello in exactly 3 words.',
|
||||
});
|
||||
|
||||
console.log('Anthropic response:', result.text);
|
||||
expect(result.text).toBeTruthy();
|
||||
expect(result.text.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
tap.test('getModel should return a LanguageModelV3 for openai', async () => {
|
||||
const apiKey = await testQenv.getEnvVarOnDemand('OPENAI_TOKEN');
|
||||
if (!apiKey) {
|
||||
console.log('OPENAI_TOKEN not set, skipping test');
|
||||
return;
|
||||
}
|
||||
|
||||
const model = smartai.getModel({
|
||||
provider: 'openai',
|
||||
model: 'gpt-4o-mini',
|
||||
apiKey,
|
||||
});
|
||||
|
||||
expect(model).toHaveProperty('doGenerate');
|
||||
expect(model).toHaveProperty('doStream');
|
||||
});
|
||||
|
||||
tap.test('streamText with anthropic model', async () => {
|
||||
const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN');
|
||||
if (!apiKey) {
|
||||
console.log('ANTHROPIC_TOKEN not set, skipping test');
|
||||
return;
|
||||
}
|
||||
|
||||
const model = smartai.getModel({
|
||||
provider: 'anthropic',
|
||||
model: 'claude-sonnet-4-5-20250929',
|
||||
apiKey,
|
||||
});
|
||||
|
||||
const result = await smartai.streamText({
|
||||
model,
|
||||
prompt: 'Count from 1 to 5.',
|
||||
});
|
||||
|
||||
const tokens: string[] = [];
|
||||
for await (const chunk of result.textStream) {
|
||||
tokens.push(chunk);
|
||||
}
|
||||
|
||||
const fullText = tokens.join('');
|
||||
console.log('Streamed text:', fullText);
|
||||
expect(fullText).toBeTruthy();
|
||||
expect(fullText.length).toBeGreaterThan(0);
|
||||
expect(tokens.length).toBeGreaterThan(1); // Should have multiple chunks
|
||||
});
|
||||
|
||||
tap.test('generateText with openai model', async () => {
|
||||
const apiKey = await testQenv.getEnvVarOnDemand('OPENAI_TOKEN');
|
||||
if (!apiKey) {
|
||||
console.log('OPENAI_TOKEN not set, skipping test');
|
||||
return;
|
||||
}
|
||||
|
||||
const model = smartai.getModel({
|
||||
provider: 'openai',
|
||||
model: 'gpt-4o-mini',
|
||||
apiKey,
|
||||
});
|
||||
|
||||
const result = await smartai.generateText({
|
||||
model,
|
||||
prompt: 'What is 2+2? Reply with just the number.',
|
||||
});
|
||||
|
||||
console.log('OpenAI response:', result.text);
|
||||
expect(result.text).toBeTruthy();
|
||||
expect(result.text).toInclude('4');
|
||||
});
|
||||
|
||||
tap.test('getModel should throw for unknown provider', async () => {
|
||||
let threw = false;
|
||||
try {
|
||||
smartai.getModel({
|
||||
provider: 'nonexistent' as any,
|
||||
model: 'test',
|
||||
});
|
||||
} catch (e) {
|
||||
threw = true;
|
||||
expect(e.message).toInclude('Unknown provider');
|
||||
}
|
||||
expect(threw).toBeTrue();
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -1,151 +0,0 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
|
||||
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||
|
||||
import * as smartai from '../ts/index.js';
|
||||
|
||||
let anthropicProviderQuick: smartai.AnthropicProvider;
|
||||
let anthropicProviderNormal: smartai.AnthropicProvider;
|
||||
let anthropicProviderDeep: smartai.AnthropicProvider;
|
||||
let anthropicProviderOff: smartai.AnthropicProvider;
|
||||
|
||||
// Test 'quick' mode
|
||||
tap.test('Extended Thinking: should create Anthropic provider with quick mode', async () => {
|
||||
anthropicProviderQuick = new smartai.AnthropicProvider({
|
||||
anthropicToken: await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN'),
|
||||
extendedThinking: 'quick',
|
||||
});
|
||||
await anthropicProviderQuick.start();
|
||||
expect(anthropicProviderQuick).toBeInstanceOf(smartai.AnthropicProvider);
|
||||
});
|
||||
|
||||
tap.test('Extended Thinking: should chat with quick mode (2048 tokens)', async () => {
|
||||
const userMessage = 'Explain quantum entanglement in simple terms.';
|
||||
const response = await anthropicProviderQuick.chat({
|
||||
systemMessage: 'You are a helpful physics teacher.',
|
||||
userMessage: userMessage,
|
||||
messageHistory: [],
|
||||
});
|
||||
console.log(`Quick Mode - User: ${userMessage}`);
|
||||
console.log(`Quick Mode - Response length: ${response.message.length} chars`);
|
||||
expect(response.role).toEqual('assistant');
|
||||
expect(response.message).toBeTruthy();
|
||||
expect(response.message.toLowerCase()).toInclude('quantum');
|
||||
});
|
||||
|
||||
tap.test('Extended Thinking: should stop quick mode provider', async () => {
|
||||
await anthropicProviderQuick.stop();
|
||||
});
|
||||
|
||||
// Test 'normal' mode (default)
|
||||
tap.test('Extended Thinking: should create Anthropic provider with normal mode (default)', async () => {
|
||||
anthropicProviderNormal = new smartai.AnthropicProvider({
|
||||
anthropicToken: await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN'),
|
||||
// extendedThinking not specified, should default to 'normal'
|
||||
});
|
||||
await anthropicProviderNormal.start();
|
||||
expect(anthropicProviderNormal).toBeInstanceOf(smartai.AnthropicProvider);
|
||||
});
|
||||
|
||||
tap.test('Extended Thinking: should chat with normal mode (8000 tokens default)', async () => {
|
||||
const userMessage = 'What are the implications of the P vs NP problem?';
|
||||
const response = await anthropicProviderNormal.chat({
|
||||
systemMessage: 'You are a helpful computer science expert.',
|
||||
userMessage: userMessage,
|
||||
messageHistory: [],
|
||||
});
|
||||
console.log(`Normal Mode - User: ${userMessage}`);
|
||||
console.log(`Normal Mode - Response length: ${response.message.length} chars`);
|
||||
expect(response.role).toEqual('assistant');
|
||||
expect(response.message).toBeTruthy();
|
||||
expect(response.message.length).toBeGreaterThan(50);
|
||||
});
|
||||
|
||||
tap.test('Extended Thinking: should stop normal mode provider', async () => {
|
||||
await anthropicProviderNormal.stop();
|
||||
});
|
||||
|
||||
// Test 'deep' mode
|
||||
tap.test('Extended Thinking: should create Anthropic provider with deep mode', async () => {
|
||||
anthropicProviderDeep = new smartai.AnthropicProvider({
|
||||
anthropicToken: await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN'),
|
||||
extendedThinking: 'deep',
|
||||
});
|
||||
await anthropicProviderDeep.start();
|
||||
expect(anthropicProviderDeep).toBeInstanceOf(smartai.AnthropicProvider);
|
||||
});
|
||||
|
||||
tap.test('Extended Thinking: should chat with deep mode (16000 tokens)', async () => {
|
||||
const userMessage = 'Analyze the philosophical implications of artificial consciousness.';
|
||||
const response = await anthropicProviderDeep.chat({
|
||||
systemMessage: 'You are a philosopher and cognitive scientist.',
|
||||
userMessage: userMessage,
|
||||
messageHistory: [],
|
||||
});
|
||||
console.log(`Deep Mode - User: ${userMessage}`);
|
||||
console.log(`Deep Mode - Response length: ${response.message.length} chars`);
|
||||
expect(response.role).toEqual('assistant');
|
||||
expect(response.message).toBeTruthy();
|
||||
expect(response.message.length).toBeGreaterThan(100);
|
||||
});
|
||||
|
||||
tap.test('Extended Thinking: should stop deep mode provider', async () => {
|
||||
await anthropicProviderDeep.stop();
|
||||
});
|
||||
|
||||
// Test 'off' mode
|
||||
tap.test('Extended Thinking: should create Anthropic provider with thinking disabled', async () => {
|
||||
anthropicProviderOff = new smartai.AnthropicProvider({
|
||||
anthropicToken: await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN'),
|
||||
extendedThinking: 'off',
|
||||
});
|
||||
await anthropicProviderOff.start();
|
||||
expect(anthropicProviderOff).toBeInstanceOf(smartai.AnthropicProvider);
|
||||
});
|
||||
|
||||
tap.test('Extended Thinking: should chat with thinking disabled', async () => {
|
||||
const userMessage = 'What is 2 + 2?';
|
||||
const response = await anthropicProviderOff.chat({
|
||||
systemMessage: 'You are a helpful assistant.',
|
||||
userMessage: userMessage,
|
||||
messageHistory: [],
|
||||
});
|
||||
console.log(`Thinking Off - User: ${userMessage}`);
|
||||
console.log(`Thinking Off - Response: ${response.message}`);
|
||||
expect(response.role).toEqual('assistant');
|
||||
expect(response.message).toBeTruthy();
|
||||
expect(response.message).toInclude('4');
|
||||
});
|
||||
|
||||
tap.test('Extended Thinking: should stop off mode provider', async () => {
|
||||
await anthropicProviderOff.stop();
|
||||
});
|
||||
|
||||
// Test with vision method
|
||||
tap.test('Extended Thinking: should work with vision method', async () => {
|
||||
const provider = new smartai.AnthropicProvider({
|
||||
anthropicToken: await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN'),
|
||||
extendedThinking: 'normal',
|
||||
});
|
||||
await provider.start();
|
||||
|
||||
// Create a simple test image (1x1 red pixel PNG)
|
||||
const redPixelPng = Buffer.from(
|
||||
'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8DwHwAFBQIAX8jx0gAAAABJRU5ErkJggg==',
|
||||
'base64'
|
||||
);
|
||||
|
||||
const response = await provider.vision({
|
||||
image: redPixelPng,
|
||||
prompt: 'What color is this image?',
|
||||
});
|
||||
|
||||
console.log(`Vision with Thinking - Response: ${response}`);
|
||||
expect(response).toBeTruthy();
|
||||
expect(response.toLowerCase()).toInclude('red');
|
||||
|
||||
await provider.stop();
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -1,96 +0,0 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
import { SmartFs, SmartFsProviderNode } from '@push.rocks/smartfs';
|
||||
|
||||
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||
const smartfs = new SmartFs(new SmartFsProviderNode());
|
||||
|
||||
import * as smartai from '../ts/index.js';
|
||||
|
||||
let anthropicProvider: smartai.AnthropicProvider;
|
||||
|
||||
tap.test('Anthropic Vision: should create and start Anthropic provider', async () => {
|
||||
anthropicProvider = new smartai.AnthropicProvider({
|
||||
anthropicToken: await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN'),
|
||||
});
|
||||
await anthropicProvider.start();
|
||||
expect(anthropicProvider).toBeInstanceOf(smartai.AnthropicProvider);
|
||||
});
|
||||
|
||||
tap.test('Anthropic Vision: should analyze coffee image with latte art', async () => {
|
||||
// Test 1: Coffee image from Unsplash by Dani
|
||||
const imagePath = './test/testimages/coffee-dani/coffee.jpg';
|
||||
console.log(`Loading coffee image from: ${imagePath}`);
|
||||
|
||||
const imageBuffer = await smartfs.file(imagePath).read();
|
||||
console.log(`Image loaded, size: ${imageBuffer.length} bytes`);
|
||||
|
||||
const result = await anthropicProvider.vision({
|
||||
image: imageBuffer,
|
||||
prompt: 'Describe this coffee image. What do you see in terms of the cup, foam pattern, and overall composition?'
|
||||
});
|
||||
|
||||
console.log(`Anthropic Vision (Coffee) - Result: ${result}`);
|
||||
expect(result).toBeTruthy();
|
||||
expect(typeof result).toEqual('string');
|
||||
expect(result.toLowerCase()).toInclude('coffee');
|
||||
// The image has a heart pattern in the latte art
|
||||
const mentionsLatte = result.toLowerCase().includes('heart') ||
|
||||
result.toLowerCase().includes('latte') ||
|
||||
result.toLowerCase().includes('foam');
|
||||
expect(mentionsLatte).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('Anthropic Vision: should analyze laptop/workspace image', async () => {
|
||||
// Test 2: Laptop image from Unsplash by Nicolas Bichon
|
||||
const imagePath = './test/testimages/laptop-nicolas/laptop.jpg';
|
||||
console.log(`Loading laptop image from: ${imagePath}`);
|
||||
|
||||
const imageBuffer = await smartfs.file(imagePath).read();
|
||||
console.log(`Image loaded, size: ${imageBuffer.length} bytes`);
|
||||
|
||||
const result = await anthropicProvider.vision({
|
||||
image: imageBuffer,
|
||||
prompt: 'Describe the technology and workspace setup in this image. What devices and equipment can you see?'
|
||||
});
|
||||
|
||||
console.log(`Anthropic Vision (Laptop) - Result: ${result}`);
|
||||
expect(result).toBeTruthy();
|
||||
expect(typeof result).toEqual('string');
|
||||
// Should mention laptop, computer, keyboard, or desk
|
||||
const mentionsTech = result.toLowerCase().includes('laptop') ||
|
||||
result.toLowerCase().includes('computer') ||
|
||||
result.toLowerCase().includes('keyboard') ||
|
||||
result.toLowerCase().includes('desk');
|
||||
expect(mentionsTech).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('Anthropic Vision: should analyze receipt/document image', async () => {
|
||||
// Test 3: Receipt image from Unsplash by Annie Spratt
|
||||
const imagePath = './test/testimages/receipt-annie/receipt.jpg';
|
||||
console.log(`Loading receipt image from: ${imagePath}`);
|
||||
|
||||
const imageBuffer = await smartfs.file(imagePath).read();
|
||||
console.log(`Image loaded, size: ${imageBuffer.length} bytes`);
|
||||
|
||||
const result = await anthropicProvider.vision({
|
||||
image: imageBuffer,
|
||||
prompt: 'What type of document is this? Can you identify any text or numbers visible in the image?'
|
||||
});
|
||||
|
||||
console.log(`Anthropic Vision (Receipt) - Result: ${result}`);
|
||||
expect(result).toBeTruthy();
|
||||
expect(typeof result).toEqual('string');
|
||||
// Should mention receipt, document, text, or paper
|
||||
const mentionsDocument = result.toLowerCase().includes('receipt') ||
|
||||
result.toLowerCase().includes('document') ||
|
||||
result.toLowerCase().includes('text') ||
|
||||
result.toLowerCase().includes('paper');
|
||||
expect(mentionsDocument).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('Anthropic Vision: should stop the provider', async () => {
|
||||
await anthropicProvider.stop();
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
66
test/test.vision.ts
Normal file
66
test/test.vision.ts
Normal file
@@ -0,0 +1,66 @@
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import { getModel } from '../ts/index.js';
|
||||
import { analyzeImage } from '../ts_vision/index.js';
|
||||
|
||||
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||
|
||||
tap.test('analyzeImage should describe a test image', async () => {
|
||||
const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN');
|
||||
if (!apiKey) {
|
||||
console.log('ANTHROPIC_TOKEN not set, skipping test');
|
||||
return;
|
||||
}
|
||||
|
||||
// Find an image file recursively in testimages/
|
||||
const testImageDir = path.join(process.cwd(), 'test', 'testimages');
|
||||
if (!fs.existsSync(testImageDir)) {
|
||||
console.log('No test images directory found, skipping test');
|
||||
return;
|
||||
}
|
||||
|
||||
const findImage = (dir: string): string | null => {
|
||||
for (const entry of fs.readdirSync(dir, { withFileTypes: true })) {
|
||||
const fullPath = path.join(dir, entry.name);
|
||||
if (entry.isDirectory()) {
|
||||
const found = findImage(fullPath);
|
||||
if (found) return found;
|
||||
} else if (/\.(jpg|jpeg|png)$/i.test(entry.name)) {
|
||||
return fullPath;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
};
|
||||
|
||||
const imagePath = findImage(testImageDir);
|
||||
if (!imagePath) {
|
||||
console.log('No test images found, skipping test');
|
||||
return;
|
||||
}
|
||||
|
||||
const imageBuffer = fs.readFileSync(imagePath);
|
||||
const ext = path.extname(imagePath).toLowerCase();
|
||||
const mediaType = ext === '.png' ? 'image/png' : 'image/jpeg';
|
||||
|
||||
const model = getModel({
|
||||
provider: 'anthropic',
|
||||
model: 'claude-sonnet-4-5-20250929',
|
||||
apiKey,
|
||||
promptCaching: false,
|
||||
});
|
||||
|
||||
const result = await analyzeImage({
|
||||
model,
|
||||
image: imageBuffer,
|
||||
prompt: 'Describe this image briefly.',
|
||||
mediaType: mediaType as 'image/jpeg' | 'image/png',
|
||||
});
|
||||
|
||||
console.log('Vision result:', result);
|
||||
expect(result).toBeTruthy();
|
||||
expect(result.length).toBeGreaterThan(10);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
Reference in New Issue
Block a user