Compare commits

..

19 Commits

Author SHA1 Message Date
2791d738d6 0.7.3
Some checks failed
Default (tags) / security (push) Failing after 22s
Default (tags) / test (push) Failing after 14s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-10-03 14:21:25 +00:00
3fbd054985 fix(tests): Add extensive provider/feature tests and local Claude CI permissions 2025-10-03 14:21:25 +00:00
8e8830ef92 0.7.2
Some checks failed
Default (tags) / security (push) Failing after 14s
Default (tags) / test (push) Failing after 14s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-10-03 13:51:49 +00:00
34931875ad fix(anthropic): Update Anthropic provider branding to Claude Sonnet 4.5 and add local Claude permissions 2025-10-03 13:51:49 +00:00
2672509d3f 0.7.1
Some checks failed
Default (tags) / security (push) Failing after 23s
Default (tags) / test (push) Failing after 13s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-10-03 13:49:46 +00:00
ee3a635852 fix(docs): Add README image generation docs and .claude local settings 2025-10-03 13:49:46 +00:00
a222b1c2fa 0.7.0
Some checks failed
Default (tags) / security (push) Failing after 24s
Default (tags) / test (push) Failing after 15s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-10-03 13:43:29 +00:00
f0556e89f3 feat(providers): Add research API and image generation/editing support; extend providers and tests 2025-10-03 13:43:29 +00:00
fe8540c8ba feat(research): Implement research APIs. 2025-10-03 12:50:42 +00:00
e34bf19698 0.6.1
Some checks failed
Default (tags) / security (push) Failing after 21s
Default (tags) / test (push) Failing after 13s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-09-28 15:51:50 +00:00
f70353e6ca fix(provider.anthropic): Fix Anthropic research tool identifier and add tests + local Claude permissions 2025-09-28 15:51:50 +00:00
0403443634 0.6.0
Some checks failed
Default (tags) / security (push) Failing after 23s
Default (tags) / test (push) Failing after 13s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-09-28 15:06:07 +00:00
e2ed429aac feat(research): Introduce research API with provider implementations, docs and tests 2025-09-28 15:06:07 +00:00
5c856ec3ed 0.5.11
Some checks failed
Default (tags) / security (push) Failing after 21s
Default (tags) / test (push) Failing after 12s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-08-12 13:15:37 +00:00
052f37294d fix(openaiProvider): Update default chat model to gpt-5-mini and bump dependency versions 2025-08-12 13:15:36 +00:00
93bb375059 fix(dependencies): Update SmartPdf to v4.1.1 for enhanced PDF processing capabilities
Some checks failed
Default (tags) / security (push) Failing after 19s
Default (tags) / test (push) Failing after 18s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-08-03 08:17:24 +00:00
574f7a594c fix(documentation): remove contribution section from readme
Some checks failed
Default (tags) / security (push) Failing after 23s
Default (tags) / test (push) Failing after 12s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-08-01 18:37:26 +00:00
0b2a058550 fix(core): improve SmartPdf lifecycle management and update dependencies
Some checks failed
Default (tags) / security (push) Failing after 19s
Default (tags) / test (push) Failing after 16s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-08-01 18:25:46 +00:00
88d15c89e5 0.5.6
Some checks failed
Default (tags) / security (push) Failing after 24s
Default (tags) / test (push) Failing after 13s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-07-26 16:17:11 +00:00
34 changed files with 4007 additions and 487 deletions

View File

@@ -1,5 +1,98 @@
# Changelog # Changelog
## 2025-10-03 - 0.7.3 - fix(tests)
Add extensive provider/feature tests and local Claude CI permissions
- Add many focused test files covering providers and features: OpenAI, Anthropic, Perplexity, Groq, Ollama, Exo, XAI (chat, audio, vision, document, research, image generation, stubs, interfaces, basic)
- Introduce .claude/settings.local.json to declare allowed permissions for local Claude/CI actions
- Replace older aggregated test files with modular per-feature tests (removed legacy combined tests and split into smaller suites)
- No changes to library runtime code — this change adds tests and CI/local agent configuration only
## 2025-10-03 - 0.7.2 - fix(anthropic)
Update Anthropic provider branding to Claude Sonnet 4.5 and add local Claude permissions
- Docs: Replace 'Claude 3 Opus' with 'Claude Sonnet 4.5' in README provider capabilities matrix.
- Config: Add .claude/settings.local.json to define local Claude permissions for tests and development commands.
## 2025-10-03 - 0.7.1 - fix(docs)
Add README image generation docs and .claude local settings
- Add .claude/settings.local.json with permission allow-list for local assistant tooling and web search
- Update README provider capabilities table to include an Images column and reference gpt-image-1
- Add Image Generation & Editing section with examples, options, and gpt-image-1 advantages
- Mark image generation support as implemented in the roadmap and remove duplicate entry
## 2025-10-03 - 0.7.0 - feat(providers)
Add research API and image generation/editing support; extend providers and tests
- Introduce ResearchOptions and ResearchResponse to the MultiModalModel interface and implement research() where supported
- OpenAiProvider: implement research(), add imageGenerate() and imageEdit() methods (gpt-image-1 / DALL·E support), and expose imageModel option
- AnthropicProvider: implement research() and vision handling; explicitly throw for unsupported image generation/editing
- PerplexityProvider: implement research() (sonar / sonar-pro support) and expose citation parsing
- Add image/document-related interfaces (ImageGenerateOptions, ImageEditOptions, ImageResponse) to abstract API
- Add image generation/editing/no-op stubs for other providers (Exo, Groq, Ollama, XAI) that throw informative errors to preserve API compatibility
- Add comprehensive OpenAI image generation tests and helper to save test outputs (test/test.image.openai.ts)
- Update README with Research & Web Search documentation, capability matrix, and roadmap entry for Research & Web Search API
- Add local Claude agent permissions file (.claude/settings.local.json) and various provider type/import updates
## 2025-09-28 - 0.6.1 - fix(provider.anthropic)
Fix Anthropic research tool identifier and add tests + local Claude permissions
- Replace Anthropic research tool type from 'computer_20241022' to 'web_search_20250305' to match the expected web-search tool schema.
- Add comprehensive test suites and fixtures for providers and research features (new/updated tests under test/ including anthropic, openai, research.* and stubs).
- Fix test usage of XAI provider class name (use XAIProvider) and adjust basic provider test expectations (provider instantiation moved to start()).
- Add .claude/settings.local.json with local Claude permissions to allow common CI/dev commands and web search during testing.
## 2025-09-28 - 0.6.0 - feat(research)
Introduce research API with provider implementations, docs and tests
- Add ResearchOptions and ResearchResponse interfaces and a new abstract research() method to MultiModalModel
- Implement research() for OpenAiProvider (deep research model selection, optional web search/tools, background flag, source extraction)
- Implement research() for AnthropicProvider (web search tool support, domain filters, citation extraction)
- Implement research() for PerplexityProvider (sonar / sonar-pro model usage and citation parsing)
- Add research() stubs to Exo, Groq, Ollama and XAI providers that throw a clear 'not yet supported' error to preserve interface compatibility
- Add tests for research interfaces and provider research methods (test files updated/added)
- Add documentation: readme.research.md describing the research API, usage and configuration
- Export additional providers from ts/index.ts and update provider typings/imports across files
- Add a 'typecheck' script to package.json
- Add .claude/settings.local.json (local agent permissions for CI/dev tasks)
## 2025-08-12 - 0.5.11 - fix(openaiProvider)
Update default chat model to gpt-5-mini and bump dependency versions
- Changed default chat model in OpenAiProvider from 'o3-mini' and 'o4-mini' to 'gpt-5-mini'
- Upgraded @anthropic-ai/sdk from ^0.57.0 to ^0.59.0
- Upgraded openai from ^5.11.0 to ^5.12.2
- Added new local Claude settings configuration (.claude/settings.local.json)
## 2025-08-03 - 0.5.10 - fix(dependencies)
Update SmartPdf to v4.1.1 for enhanced PDF processing capabilities
- Updated @push.rocks/smartpdf from ^3.3.0 to ^4.1.1
- Enhanced PDF conversion with improved scale options and quality controls
- Dependency updates for better performance and compatibility
## 2025-08-01 - 0.5.9 - fix(documentation)
Remove contribution section from readme
- Removed the contribution section from readme.md as requested
- Kept the roadmap section for future development plans
## 2025-08-01 - 0.5.8 - fix(core)
Fix SmartPdf lifecycle management and update dependencies
- Moved SmartPdf instance management to the MultiModalModel base class for better resource sharing
- Fixed memory leaks by properly implementing cleanup in the base class stop() method
- Updated SmartAi class to properly stop all providers on shutdown
- Updated @push.rocks/smartrequest from v2.1.0 to v4.2.1 with migration to new API
- Enhanced readme with professional documentation and feature matrix
## 2025-07-26 - 0.5.7 - fix(provider.openai)
Fix stream type mismatch in audio method
- Fixed type error where OpenAI SDK returns a web ReadableStream but the audio method needs to return a Node.js ReadableStream
- Added conversion using Node.js's built-in Readable.fromWeb() method
## 2025-07-25 - 0.5.5 - feat(documentation) ## 2025-07-25 - 0.5.5 - feat(documentation)
Comprehensive documentation enhancement and test improvements Comprehensive documentation enhancement and test improvements

View File

@@ -1,6 +1,6 @@
{ {
"name": "@push.rocks/smartai", "name": "@push.rocks/smartai",
"version": "0.5.5", "version": "0.7.3",
"private": false, "private": false,
"description": "SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.", "description": "SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.",
"main": "dist_ts/index.js", "main": "dist_ts/index.js",
@@ -10,6 +10,7 @@
"license": "MIT", "license": "MIT",
"scripts": { "scripts": {
"test": "(tstest test/ --web --verbose)", "test": "(tstest test/ --web --verbose)",
"typecheck": "tsbuild check",
"build": "(tsbuild --web --allowimplicitany)", "build": "(tsbuild --web --allowimplicitany)",
"buildDocs": "(tsdoc)" "buildDocs": "(tsdoc)"
}, },
@@ -23,15 +24,15 @@
"@types/node": "^22.15.17" "@types/node": "^22.15.17"
}, },
"dependencies": { "dependencies": {
"@anthropic-ai/sdk": "^0.57.0", "@anthropic-ai/sdk": "^0.59.0",
"@push.rocks/smartarray": "^1.1.0", "@push.rocks/smartarray": "^1.1.0",
"@push.rocks/smartfile": "^11.2.5", "@push.rocks/smartfile": "^11.2.5",
"@push.rocks/smartpath": "^5.0.18", "@push.rocks/smartpath": "^6.0.0",
"@push.rocks/smartpdf": "^3.2.2", "@push.rocks/smartpdf": "^4.1.1",
"@push.rocks/smartpromise": "^4.2.3", "@push.rocks/smartpromise": "^4.2.3",
"@push.rocks/smartrequest": "^2.1.0", "@push.rocks/smartrequest": "^4.2.1",
"@push.rocks/webstream": "^1.0.10", "@push.rocks/webstream": "^1.0.10",
"openai": "^5.10.2" "openai": "^5.12.2"
}, },
"repository": { "repository": {
"type": "git", "type": "git",

1354
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

804
readme.md
View File

@@ -1,393 +1,595 @@
# @push.rocks/smartai # @push.rocks/smartai
**One API to rule them all** 🚀
SmartAi is a powerful TypeScript library that provides a unified interface for integrating with multiple AI providers including OpenAI, Anthropic, Perplexity, Ollama, Groq, XAI, and Exo. It offers comprehensive support for chat interactions, streaming conversations, text-to-speech, document analysis, and vision processing. [![npm version](https://img.shields.io/npm/v/@push.rocks/smartai.svg)](https://www.npmjs.com/package/@push.rocks/smartai)
[![TypeScript](https://img.shields.io/badge/TypeScript-5.x-blue.svg)](https://www.typescriptlang.org/)
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
## Install SmartAI unifies the world's leading AI providers - OpenAI, Anthropic, Perplexity, Ollama, Groq, XAI, and Exo - under a single, elegant TypeScript interface. Build AI applications at lightning speed without vendor lock-in.
To install SmartAi into your project, use pnpm: ## 🎯 Why SmartAI?
- **🔌 Universal Interface**: Write once, run with any AI provider. Switch between GPT-4, Claude, Llama, or Grok with a single line change.
- **🛡️ Type-Safe**: Full TypeScript support with comprehensive type definitions for all operations
- **🌊 Streaming First**: Built for real-time applications with native streaming support
- **🎨 Multi-Modal**: Seamlessly work with text, images, audio, and documents
- **🏠 Local & Cloud**: Support for both cloud providers and local models via Ollama
- **⚡ Zero Lock-In**: Your code remains portable across all AI providers
## 🚀 Quick Start
```bash ```bash
pnpm install @push.rocks/smartai npm install @push.rocks/smartai
``` ```
## Usage
SmartAi provides a clean, consistent API across all supported AI providers. This documentation covers all features with practical examples for each provider and capability.
### Initialization
First, initialize SmartAi with the API tokens and configuration for the providers you want to use:
```typescript ```typescript
import { SmartAi } from '@push.rocks/smartai'; import { SmartAi } from '@push.rocks/smartai';
const smartAi = new SmartAi({ // Initialize with your favorite providers
// OpenAI - for GPT models, DALL-E, and TTS const ai = new SmartAi({
openaiToken: 'your-openai-api-key', openaiToken: 'sk-...',
anthropicToken: 'sk-ant-...'
// Anthropic - for Claude models
anthropicToken: 'your-anthropic-api-key',
// Perplexity - for research-focused AI
perplexityToken: 'your-perplexity-api-key',
// Groq - for fast inference
groqToken: 'your-groq-api-key',
// XAI - for Grok models
xaiToken: 'your-xai-api-key',
// Ollama - for local models
ollama: {
baseUrl: 'http://localhost:11434',
model: 'llama2', // default model for chat
visionModel: 'llava' // default model for vision
},
// Exo - for distributed inference
exo: {
baseUrl: 'http://localhost:8080/v1',
apiKey: 'your-exo-api-key'
}
}); });
// Start the SmartAi instance await ai.start();
await smartAi.start();
```
## Supported Providers // Same API, multiple providers
const response = await ai.openaiProvider.chat({
SmartAi supports the following AI providers:
| Provider | Use Case | Key Features |
|----------|----------|--------------|
| **OpenAI** | General purpose, GPT models | Chat, streaming, TTS, vision, documents |
| **Anthropic** | Claude models, safety-focused | Chat, streaming, vision, documents |
| **Perplexity** | Research and factual queries | Chat, streaming, documents |
| **Groq** | Fast inference | Chat, streaming |
| **XAI** | Grok models | Chat, streaming |
| **Ollama** | Local models | Chat, streaming, vision |
| **Exo** | Distributed inference | Chat, streaming |
## Core Features
### 1. Chat Interactions
SmartAi provides both synchronous and streaming chat capabilities across all supported providers.
#### Synchronous Chat
Simple request-response interactions with any provider:
```typescript
// OpenAI Example
const openAiResponse = await smartAi.openaiProvider.chat({
systemMessage: 'You are a helpful assistant.', systemMessage: 'You are a helpful assistant.',
userMessage: 'What is the capital of France?', userMessage: 'Explain quantum computing in simple terms',
messageHistory: [] messageHistory: []
}); });
console.log(openAiResponse.message); // "The capital of France is Paris."
// Anthropic Example
const anthropicResponse = await smartAi.anthropicProvider.chat({
systemMessage: 'You are a knowledgeable historian.',
userMessage: 'Tell me about the French Revolution',
messageHistory: []
});
console.log(anthropicResponse.message);
// Using message history for context
const contextualResponse = await smartAi.openaiProvider.chat({
systemMessage: 'You are a math tutor.',
userMessage: 'What about multiplication?',
messageHistory: [
{ role: 'user', content: 'Can you teach me math?' },
{ role: 'assistant', content: 'Of course! What would you like to learn?' }
]
});
``` ```
#### Streaming Chat ## 📊 Provider Capabilities Matrix
For real-time, token-by-token responses: Choose the right provider for your use case:
| Provider | Chat | Streaming | TTS | Vision | Documents | Research | Images | Highlights |
|----------|:----:|:---------:|:---:|:------:|:---------:|:--------:|:------:|------------|
| **OpenAI** | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | • gpt-image-1<br>• DALL-E 3<br>• Deep research API |
| **Anthropic** | ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ❌ | • Claude Sonnet 4.5<br>• Superior reasoning<br>• Web search API |
| **Ollama** | ✅ | ✅ | ❌ | ✅ | ✅ | ❌ | ❌ | • 100% local<br>• Privacy-first<br>• No API costs |
| **XAI** | ✅ | ✅ | ❌ | ❌ | ✅ | ❌ | ❌ | • Grok models<br>• Real-time data<br>• Uncensored |
| **Perplexity** | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ❌ | • Web-aware<br>• Research-focused<br>• Sonar Pro models |
| **Groq** | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | • 10x faster<br>• LPU inference<br>• Low latency |
| **Exo** | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | • Distributed<br>• P2P compute<br>• Decentralized |
## 🎮 Core Features
### 💬 Universal Chat Interface
Works identically across all providers:
```typescript ```typescript
// Create a readable stream for input // Use GPT-4 for complex reasoning
const { readable, writable } = new TransformStream(); const gptResponse = await ai.openaiProvider.chat({
const writer = writable.getWriter(); systemMessage: 'You are a expert physicist.',
userMessage: 'Explain the implications of quantum entanglement',
messageHistory: []
});
// Send a message // Use Claude for safety-critical applications
const encoder = new TextEncoder(); const claudeResponse = await ai.anthropicProvider.chat({
await writer.write(encoder.encode(JSON.stringify({ systemMessage: 'You are a medical advisor.',
role: 'user', userMessage: 'Review this patient data for concerns',
content: 'Write a haiku about programming' messageHistory: []
}))); });
await writer.close();
// Get streaming response // Use Groq for lightning-fast responses
const responseStream = await smartAi.openaiProvider.chatStream(readable); const groqResponse = await ai.groqProvider.chat({
const reader = responseStream.getReader(); systemMessage: 'You are a code reviewer.',
const decoder = new TextDecoder(); userMessage: 'Quick! Find the bug in this code: ...',
messageHistory: []
});
```
// Read the stream ### 🌊 Real-Time Streaming
Build responsive chat interfaces with token-by-token streaming:
```typescript
// Create a chat stream
const stream = await ai.openaiProvider.chatStream(inputStream);
const reader = stream.getReader();
// Display responses as they arrive
while (true) { while (true) {
const { done, value } = await reader.read(); const { done, value } = await reader.read();
if (done) break; if (done) break;
process.stdout.write(value); // Print each chunk as it arrives
// Update UI in real-time
process.stdout.write(value);
} }
``` ```
### 2. Text-to-Speech (Audio Generation) ### 🎙️ Text-to-Speech
Convert text to natural-sounding speech (currently supported by OpenAI): Generate natural voices with OpenAI:
```typescript ```typescript
import * as fs from 'fs'; const audioStream = await ai.openaiProvider.audio({
message: 'Welcome to the future of AI development!'
// Generate speech from text
const audioStream = await smartAi.openaiProvider.audio({
message: 'Hello world! This is a test of the text-to-speech system.'
}); });
// Save to file // Stream directly to speakers
const writeStream = fs.createWriteStream('output.mp3'); audioStream.pipe(speakerOutput);
audioStream.pipe(writeStream);
// Or use in your application directly // Or save to file
audioStream.on('data', (chunk) => { audioStream.pipe(fs.createWriteStream('welcome.mp3'));
// Process audio chunks ```
### 👁️ Vision Analysis
Understand images with multiple providers:
```typescript
const image = fs.readFileSync('product-photo.jpg');
// OpenAI: General purpose vision
const gptVision = await ai.openaiProvider.vision({
image,
prompt: 'Describe this product and suggest marketing angles'
});
// Anthropic: Detailed analysis
const claudeVision = await ai.anthropicProvider.vision({
image,
prompt: 'Identify any safety concerns or defects'
});
// Ollama: Private, local analysis
const ollamaVision = await ai.ollamaProvider.vision({
image,
prompt: 'Extract all text and categorize the content'
}); });
``` ```
### 3. Vision Processing ### 📄 Document Intelligence
Analyze images and get detailed descriptions: Extract insights from PDFs with AI:
```typescript ```typescript
import * as fs from 'fs'; const contract = fs.readFileSync('contract.pdf');
const invoice = fs.readFileSync('invoice.pdf');
// Read an image file // Analyze documents
const imageBuffer = fs.readFileSync('image.jpg'); const analysis = await ai.openaiProvider.document({
// OpenAI Vision
const openAiVision = await smartAi.openaiProvider.vision({
image: imageBuffer,
prompt: 'What is in this image? Describe in detail.'
});
console.log('OpenAI:', openAiVision);
// Anthropic Vision
const anthropicVision = await smartAi.anthropicProvider.vision({
image: imageBuffer,
prompt: 'Analyze this image and identify any text or objects.'
});
console.log('Anthropic:', anthropicVision);
// Ollama Vision (using local model)
const ollamaVision = await smartAi.ollamaProvider.vision({
image: imageBuffer,
prompt: 'Describe the colors and composition of this image.'
});
console.log('Ollama:', ollamaVision);
```
### 4. Document Analysis
Process and analyze PDF documents with AI:
```typescript
import * as fs from 'fs';
// Read PDF documents
const pdfBuffer = fs.readFileSync('document.pdf');
// Analyze with OpenAI
const openAiAnalysis = await smartAi.openaiProvider.document({
systemMessage: 'You are a document analyst. Extract key information.',
userMessage: 'Summarize this document and list the main points.',
messageHistory: [],
pdfDocuments: [pdfBuffer]
});
console.log('OpenAI Analysis:', openAiAnalysis.message);
// Analyze with Anthropic
const anthropicAnalysis = await smartAi.anthropicProvider.document({
systemMessage: 'You are a legal expert.', systemMessage: 'You are a legal expert.',
userMessage: 'Identify any legal terms or implications in this document.', userMessage: 'Compare these documents and highlight key differences',
messageHistory: [], messageHistory: [],
pdfDocuments: [pdfBuffer] pdfDocuments: [contract, invoice]
}); });
console.log('Anthropic Analysis:', anthropicAnalysis.message);
// Process multiple documents // Multi-document analysis
const doc1 = fs.readFileSync('contract1.pdf'); const taxDocs = [form1099, w2, receipts];
const doc2 = fs.readFileSync('contract2.pdf'); const taxAnalysis = await ai.anthropicProvider.document({
systemMessage: 'You are a tax advisor.',
const comparison = await smartAi.openaiProvider.document({ userMessage: 'Prepare a tax summary from these documents',
systemMessage: 'You are a contract analyst.',
userMessage: 'Compare these two contracts and highlight the differences.',
messageHistory: [], messageHistory: [],
pdfDocuments: [doc1, doc2] pdfDocuments: taxDocs
}); });
console.log('Comparison:', comparison.message);
``` ```
### 5. Conversation Management ### 🔬 Research & Web Search
Create persistent conversation sessions with any provider: Perform deep research with web search capabilities across multiple providers:
```typescript ```typescript
// Create a conversation with OpenAI // OpenAI Deep Research - Comprehensive analysis
const conversation = smartAi.createConversation('openai'); const deepResearch = await ai.openaiProvider.research({
query: 'What are the latest developments in quantum computing?',
searchDepth: 'deep',
includeWebSearch: true
});
// Set the system message console.log(deepResearch.answer);
await conversation.setSystemMessage('You are a helpful coding assistant.'); console.log('Sources:', deepResearch.sources);
// Get input and output streams // Anthropic Web Search - Domain-filtered research
const inputWriter = conversation.getInputStreamWriter(); const anthropic = new AnthropicProvider({
const outputStream = conversation.getOutputStream(); anthropicToken: 'sk-ant-...',
enableWebSearch: true,
searchDomainAllowList: ['nature.com', 'science.org']
});
// Set up output reader const scientificResearch = await anthropic.research({
const reader = outputStream.getReader(); query: 'Latest breakthroughs in CRISPR gene editing',
const decoder = new TextDecoder(); searchDepth: 'advanced'
});
// Send messages // Perplexity - Research-focused with citations
await inputWriter.write('How do I create a REST API in Node.js?'); const perplexityResearch = await ai.perplexityProvider.research({
query: 'Current state of autonomous vehicle technology',
searchDepth: 'deep' // Uses Sonar Pro model
});
```
// Read responses **Research Options:**
while (true) { - `searchDepth`: 'basic' | 'advanced' | 'deep'
const { done, value } = await reader.read(); - `maxSources`: Number of sources to include
if (done) break; - `includeWebSearch`: Enable web search (OpenAI)
console.log('Assistant:', decoder.decode(value)); - `background`: Run as background task (OpenAI)
}
**Supported Providers:**
- **OpenAI**: Deep Research API with specialized models (`o3-deep-research-2025-06-26`, `o4-mini-deep-research-2025-06-26`)
- **Anthropic**: Web Search API with domain filtering
- **Perplexity**: Sonar and Sonar Pro models with built-in citations
### 🎨 Image Generation & Editing
Generate and edit images with OpenAI's cutting-edge models:
```typescript
// Basic image generation with gpt-image-1
const image = await ai.openaiProvider.imageGenerate({
prompt: 'A futuristic robot assistant in a modern office, digital art',
model: 'gpt-image-1',
quality: 'high',
size: '1024x1024'
});
// Save the generated image
const imageBuffer = Buffer.from(image.images[0].b64_json!, 'base64');
fs.writeFileSync('robot.png', imageBuffer);
// Advanced: Transparent background with custom format
const logo = await ai.openaiProvider.imageGenerate({
prompt: 'Minimalist mountain peak logo, geometric design',
model: 'gpt-image-1',
quality: 'high',
size: '1024x1024',
background: 'transparent',
outputFormat: 'png'
});
// WebP with compression for web use
const webImage = await ai.openaiProvider.imageGenerate({
prompt: 'Product showcase: sleek smartphone on marble surface',
model: 'gpt-image-1',
quality: 'high',
size: '1536x1024',
outputFormat: 'webp',
outputCompression: 85
});
// Superior text rendering (gpt-image-1's strength)
const signage = await ai.openaiProvider.imageGenerate({
prompt: 'Vintage cafe sign saying "COFFEE & CODE" in hand-lettered typography',
model: 'gpt-image-1',
quality: 'high',
size: '1024x1024'
});
// Generate multiple variations at once
const variations = await ai.openaiProvider.imageGenerate({
prompt: 'Abstract geometric pattern, colorful minimalist art',
model: 'gpt-image-1',
n: 3,
quality: 'medium',
size: '1024x1024'
});
// Edit an existing image
const editedImage = await ai.openaiProvider.imageEdit({
image: originalImageBuffer,
prompt: 'Add sunglasses and change the background to a beach sunset',
model: 'gpt-image-1',
quality: 'high'
});
```
**Image Generation Options:**
- `model`: 'gpt-image-1' | 'dall-e-3' | 'dall-e-2'
- `quality`: 'low' | 'medium' | 'high' | 'auto'
- `size`: Multiple aspect ratios up to 4096×4096
- `background`: 'transparent' | 'opaque' | 'auto'
- `outputFormat`: 'png' | 'jpeg' | 'webp'
- `outputCompression`: 0-100 for webp/jpeg
- `moderation`: 'low' | 'auto'
- `n`: Number of images (1-10)
**gpt-image-1 Advantages:**
- Superior text rendering in images
- Up to 4096×4096 resolution
- Transparent background support
- Advanced output formats (WebP with compression)
- Better prompt understanding
- Streaming support for progressive rendering
### 🔄 Persistent Conversations
Maintain context across interactions:
```typescript
// Create a coding assistant conversation
const assistant = ai.createConversation('openai');
await assistant.setSystemMessage('You are an expert TypeScript developer.');
// First question
const inputWriter = assistant.getInputStreamWriter();
await inputWriter.write('How do I implement a singleton pattern?');
// Continue the conversation // Continue the conversation
await inputWriter.write('Can you show me an example with Express?'); await inputWriter.write('Now show me how to make it thread-safe');
// Create conversations with different providers // The assistant remembers the entire context
const anthropicConversation = smartAi.createConversation('anthropic');
const groqConversation = smartAi.createConversation('groq');
``` ```
## Advanced Usage ## 🚀 Real-World Examples
### Error Handling ### Build a Customer Support Bot
Always wrap AI operations in try-catch blocks for robust error handling:
```typescript ```typescript
try { const supportBot = new SmartAi({
const response = await smartAi.openaiProvider.chat({ anthropicToken: process.env.ANTHROPIC_KEY // Claude for empathetic responses
systemMessage: 'You are an assistant.', });
userMessage: 'Hello!',
async function handleCustomerQuery(query: string, history: ChatMessage[]) {
try {
const response = await supportBot.anthropicProvider.chat({
systemMessage: `You are a helpful customer support agent.
Be empathetic, professional, and solution-oriented.`,
userMessage: query,
messageHistory: history
});
return response.message;
} catch (error) {
// Fallback to another provider if needed
return await supportBot.openaiProvider.chat({...});
}
}
```
### Create a Code Review Assistant
```typescript
const codeReviewer = new SmartAi({
groqToken: process.env.GROQ_KEY // Groq for speed
});
async function reviewCode(code: string, language: string) {
const startTime = Date.now();
const review = await codeReviewer.groqProvider.chat({
systemMessage: `You are a ${language} expert. Review code for:
- Security vulnerabilities
- Performance issues
- Best practices
- Potential bugs`,
userMessage: `Review this code:\n\n${code}`,
messageHistory: [] messageHistory: []
}); });
console.log(response.message);
} catch (error) { console.log(`Review completed in ${Date.now() - startTime}ms`);
if (error.code === 'rate_limit_exceeded') { return review.message;
console.error('Rate limit hit, please retry later'); }
} else if (error.code === 'invalid_api_key') { ```
console.error('Invalid API key provided');
} else { ### Build a Research Assistant
console.error('Unexpected error:', error.message);
```typescript
const researcher = new SmartAi({
perplexityToken: process.env.PERPLEXITY_KEY
});
async function research(topic: string) {
// Perplexity excels at web-aware research
const findings = await researcher.perplexityProvider.chat({
systemMessage: 'You are a research assistant. Provide factual, cited information.',
userMessage: `Research the latest developments in ${topic}`,
messageHistory: []
});
return findings.message;
}
```
### Local AI for Sensitive Data
```typescript
const localAI = new SmartAi({
ollama: {
baseUrl: 'http://localhost:11434',
model: 'llama2',
visionModel: 'llava'
}
});
// Process sensitive documents without leaving your infrastructure
async function analyzeSensitiveDoc(pdfBuffer: Buffer) {
const analysis = await localAI.ollamaProvider.document({
systemMessage: 'Extract and summarize key information.',
userMessage: 'Analyze this confidential document',
messageHistory: [],
pdfDocuments: [pdfBuffer]
});
// Data never leaves your servers
return analysis.message;
}
```
## ⚡ Performance Tips
### 1. Provider Selection Strategy
```typescript
class SmartAIRouter {
constructor(private ai: SmartAi) {}
async query(message: string, requirements: {
speed?: boolean;
accuracy?: boolean;
cost?: boolean;
privacy?: boolean;
}) {
if (requirements.privacy) {
return this.ai.ollamaProvider.chat({...}); // Local only
}
if (requirements.speed) {
return this.ai.groqProvider.chat({...}); // 10x faster
}
if (requirements.accuracy) {
return this.ai.anthropicProvider.chat({...}); // Best reasoning
}
// Default fallback
return this.ai.openaiProvider.chat({...});
} }
} }
``` ```
### Streaming with Custom Processing ### 2. Streaming for Large Responses
Implement custom transformations on streaming responses:
```typescript ```typescript
// Create a custom transform stream // Don't wait for the entire response
const customTransform = new TransformStream({ async function streamResponse(userQuery: string) {
transform(chunk, controller) { const stream = await ai.openaiProvider.chatStream(createInputStream(userQuery));
// Example: Add timestamps to each chunk
const timestamp = new Date().toISOString(); // Process tokens as they arrive
controller.enqueue(`[${timestamp}] ${chunk}`); for await (const chunk of stream) {
updateUI(chunk); // Immediate feedback
await processChunk(chunk); // Parallel processing
} }
});
// Apply to streaming chat
const inputStream = new ReadableStream({
start(controller) {
controller.enqueue(new TextEncoder().encode(JSON.stringify({
role: 'user',
content: 'Tell me a story'
})));
controller.close();
}
});
const responseStream = await smartAi.openaiProvider.chatStream(inputStream);
const processedStream = responseStream.pipeThrough(customTransform);
// Read processed stream
const reader = processedStream.getReader();
while (true) {
const { done, value } = await reader.read();
if (done) break;
console.log(value);
} }
``` ```
### Provider-Specific Features ### 3. Parallel Multi-Provider Queries
Each provider may have unique capabilities. Here's how to leverage them:
```typescript ```typescript
// OpenAI - Use specific models // Get the best answer from multiple AIs
const gpt4Response = await smartAi.openaiProvider.chat({ async function consensusQuery(question: string) {
systemMessage: 'You are a helpful assistant.', const providers = [
userMessage: 'Explain quantum computing', ai.openaiProvider.chat({...}),
messageHistory: [] ai.anthropicProvider.chat({...}),
}); ai.perplexityProvider.chat({...})
];
// Anthropic - Use Claude's strength in analysis
const codeReview = await smartAi.anthropicProvider.chat({ const responses = await Promise.all(providers);
systemMessage: 'You are a code reviewer.', return synthesizeResponses(responses);
userMessage: 'Review this code for security issues: ...', }
messageHistory: []
});
// Perplexity - Best for research and current events
const research = await smartAi.perplexityProvider.chat({
systemMessage: 'You are a research assistant.',
userMessage: 'What are the latest developments in renewable energy?',
messageHistory: []
});
// Groq - Optimized for speed
const quickResponse = await smartAi.groqProvider.chat({
systemMessage: 'You are a quick helper.',
userMessage: 'Give me a one-line summary of photosynthesis',
messageHistory: []
});
``` ```
### Performance Optimization ## 🛠️ Advanced Features
Tips for optimal performance: ### Custom Streaming Transformations
```typescript ```typescript
// 1. Reuse providers instead of creating new instances // Add real-time translation
const smartAi = new SmartAi({ /* config */ }); const translationStream = new TransformStream({
await smartAi.start(); // Initialize once async transform(chunk, controller) {
const translated = await translateChunk(chunk);
controller.enqueue(translated);
}
});
// 2. Use streaming for long responses const responseStream = await ai.openaiProvider.chatStream(input);
// Streaming reduces time-to-first-token and memory usage const translatedStream = responseStream.pipeThrough(translationStream);
// 3. Batch operations when possible
const promises = [
smartAi.openaiProvider.chat({ /* ... */ }),
smartAi.anthropicProvider.chat({ /* ... */ })
];
const results = await Promise.all(promises);
// 4. Clean up resources
await smartAi.stop(); // When done
``` ```
### Error Handling & Fallbacks
```typescript
class ResilientAI {
private providers = ['openai', 'anthropic', 'groq'];
async query(opts: ChatOptions): Promise<ChatResponse> {
for (const provider of this.providers) {
try {
return await this.ai[`${provider}Provider`].chat(opts);
} catch (error) {
console.warn(`${provider} failed, trying next...`);
continue;
}
}
throw new Error('All providers failed');
}
}
```
### Token Counting & Cost Management
```typescript
// Track usage across providers
class UsageTracker {
async trackedChat(provider: string, options: ChatOptions) {
const start = Date.now();
const response = await ai[`${provider}Provider`].chat(options);
const usage = {
provider,
duration: Date.now() - start,
inputTokens: estimateTokens(options),
outputTokens: estimateTokens(response.message)
};
await this.logUsage(usage);
return response;
}
}
```
## 📦 Installation & Setup
### Prerequisites
- Node.js 16+
- TypeScript 4.5+
- API keys for your chosen providers
### Environment Setup
```bash
# Install
npm install @push.rocks/smartai
# Set up environment variables
export OPENAI_API_KEY=sk-...
export ANTHROPIC_API_KEY=sk-ant-...
export PERPLEXITY_API_KEY=pplx-...
# ... etc
```
### TypeScript Configuration
```json
{
"compilerOptions": {
"target": "ES2022",
"module": "NodeNext",
"lib": ["ES2022"],
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true
}
}
```
## 🎯 Choosing the Right Provider
| Use Case | Recommended Provider | Why |
|----------|---------------------|-----|
| **General Purpose** | OpenAI | Most features, stable, well-documented |
| **Complex Reasoning** | Anthropic | Superior logical thinking, safer outputs |
| **Research & Facts** | Perplexity | Web-aware, provides citations |
| **Deep Research** | OpenAI | Deep Research API with comprehensive analysis |
| **Speed Critical** | Groq | 10x faster inference, sub-second responses |
| **Privacy Critical** | Ollama | 100% local, no data leaves your servers |
| **Real-time Data** | XAI | Access to current information |
| **Cost Sensitive** | Ollama/Exo | Free (local) or distributed compute |
## 📈 Roadmap
- [x] Research & Web Search API
- [x] Image generation support (gpt-image-1, DALL-E 3, DALL-E 2)
- [ ] Streaming function calls
- [ ] Voice input processing
- [ ] Fine-tuning integration
- [ ] Embedding support
- [ ] Agent framework
- [ ] More providers (Cohere, AI21, etc.)
## License and Legal Information ## License and Legal Information
This repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository. This repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository.
@@ -405,4 +607,4 @@ Registered at District court Bremen HRB 35230 HB, Germany
For any legal inquiries or if you require further information, please contact us via email at hello@task.vc. For any legal inquiries or if you require further information, please contact us via email at hello@task.vc.
By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works. By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.

39
test/test.audio.openai.ts Normal file
View File

@@ -0,0 +1,39 @@
import { expect, tap } from '@push.rocks/tapbundle';
import * as qenv from '@push.rocks/qenv';
import * as smartfile from '@push.rocks/smartfile';
const testQenv = new qenv.Qenv('./', './.nogit/');
import * as smartai from '../ts/index.js';
let testSmartai: smartai.SmartAi;
tap.test('OpenAI Audio: should create a smartai instance with OpenAI provider', async () => {
testSmartai = new smartai.SmartAi({
openaiToken: await testQenv.getEnvVarOnDemand('OPENAI_TOKEN'),
});
await testSmartai.start();
});
tap.test('OpenAI Audio: should create audio response', async () => {
// Call the audio method with a sample message.
const audioStream = await testSmartai.openaiProvider.audio({
message: 'This is a test of audio generation.',
});
// Read all chunks from the stream.
const chunks: Uint8Array[] = [];
for await (const chunk of audioStream) {
chunks.push(chunk as Uint8Array);
}
const audioBuffer = Buffer.concat(chunks);
await smartfile.fs.toFs(audioBuffer, './.nogit/testoutput.mp3');
console.log(`Audio Buffer length: ${audioBuffer.length}`);
// Assert that the resulting buffer is not empty.
expect(audioBuffer.length).toBeGreaterThan(0);
});
tap.test('OpenAI Audio: should stop the smartai instance', async () => {
await testSmartai.stop();
});
export default tap.start();

36
test/test.audio.stubs.ts Normal file
View File

@@ -0,0 +1,36 @@
import { expect, tap } from '@push.rocks/tapbundle';
import * as qenv from '@push.rocks/qenv';
const testQenv = new qenv.Qenv('./', './.nogit/');
import * as smartai from '../ts/index.js';
let anthropicProvider: smartai.AnthropicProvider;
tap.test('Audio Stubs: should create Anthropic provider', async () => {
anthropicProvider = new smartai.AnthropicProvider({
anthropicToken: await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN'),
});
await anthropicProvider.start();
});
tap.test('Audio Stubs: Anthropic audio should throw not supported error', async () => {
let errorCaught = false;
try {
await anthropicProvider.audio({
message: 'This should fail'
});
} catch (error) {
errorCaught = true;
expect(error.message).toInclude('not yet supported');
}
expect(errorCaught).toBeTrue();
});
tap.test('Audio Stubs: should stop Anthropic provider', async () => {
await anthropicProvider.stop();
});
export default tap.start();

93
test/test.basic.ts Normal file
View File

@@ -0,0 +1,93 @@
import { tap, expect } from '@push.rocks/tapbundle';
import * as smartai from '../ts/index.js';
// Basic instantiation tests that don't require API tokens
// These tests can run in CI/CD environments without credentials
tap.test('Basic: should create SmartAi instance', async () => {
const testSmartai = new smartai.SmartAi({
openaiToken: 'dummy-token-for-testing'
});
expect(testSmartai).toBeInstanceOf(smartai.SmartAi);
// Provider is only created after calling start()
expect(testSmartai.options.openaiToken).toEqual('dummy-token-for-testing');
});
tap.test('Basic: should instantiate OpenAI provider', async () => {
const openaiProvider = new smartai.OpenAiProvider({
openaiToken: 'dummy-token'
});
expect(openaiProvider).toBeInstanceOf(smartai.OpenAiProvider);
expect(typeof openaiProvider.chat).toEqual('function');
expect(typeof openaiProvider.audio).toEqual('function');
expect(typeof openaiProvider.vision).toEqual('function');
expect(typeof openaiProvider.document).toEqual('function');
expect(typeof openaiProvider.research).toEqual('function');
});
tap.test('Basic: should instantiate Anthropic provider', async () => {
const anthropicProvider = new smartai.AnthropicProvider({
anthropicToken: 'dummy-token'
});
expect(anthropicProvider).toBeInstanceOf(smartai.AnthropicProvider);
expect(typeof anthropicProvider.chat).toEqual('function');
expect(typeof anthropicProvider.audio).toEqual('function');
expect(typeof anthropicProvider.vision).toEqual('function');
expect(typeof anthropicProvider.document).toEqual('function');
expect(typeof anthropicProvider.research).toEqual('function');
});
tap.test('Basic: should instantiate Perplexity provider', async () => {
const perplexityProvider = new smartai.PerplexityProvider({
perplexityToken: 'dummy-token'
});
expect(perplexityProvider).toBeInstanceOf(smartai.PerplexityProvider);
expect(typeof perplexityProvider.chat).toEqual('function');
expect(typeof perplexityProvider.research).toEqual('function');
});
tap.test('Basic: should instantiate Groq provider', async () => {
const groqProvider = new smartai.GroqProvider({
groqToken: 'dummy-token'
});
expect(groqProvider).toBeInstanceOf(smartai.GroqProvider);
expect(typeof groqProvider.chat).toEqual('function');
expect(typeof groqProvider.research).toEqual('function');
});
tap.test('Basic: should instantiate Ollama provider', async () => {
const ollamaProvider = new smartai.OllamaProvider({
baseUrl: 'http://localhost:11434'
});
expect(ollamaProvider).toBeInstanceOf(smartai.OllamaProvider);
expect(typeof ollamaProvider.chat).toEqual('function');
expect(typeof ollamaProvider.research).toEqual('function');
});
tap.test('Basic: should instantiate xAI provider', async () => {
const xaiProvider = new smartai.XAIProvider({
xaiToken: 'dummy-token'
});
expect(xaiProvider).toBeInstanceOf(smartai.XAIProvider);
expect(typeof xaiProvider.chat).toEqual('function');
expect(typeof xaiProvider.research).toEqual('function');
});
tap.test('Basic: should instantiate Exo provider', async () => {
const exoProvider = new smartai.ExoProvider({
exoBaseUrl: 'http://localhost:8000'
});
expect(exoProvider).toBeInstanceOf(smartai.ExoProvider);
expect(typeof exoProvider.chat).toEqual('function');
expect(typeof exoProvider.research).toEqual('function');
});
tap.test('Basic: all providers should extend MultiModalModel', async () => {
const openai = new smartai.OpenAiProvider({ openaiToken: 'test' });
const anthropic = new smartai.AnthropicProvider({ anthropicToken: 'test' });
expect(openai).toBeInstanceOf(smartai.MultiModalModel);
expect(anthropic).toBeInstanceOf(smartai.MultiModalModel);
});
export default tap.start();

View File

@@ -0,0 +1,72 @@
import { expect, tap } from '@push.rocks/tapbundle';
import * as qenv from '@push.rocks/qenv';
const testQenv = new qenv.Qenv('./', './.nogit/');
import * as smartai from '../ts/index.js';
let anthropicProvider: smartai.AnthropicProvider;
tap.test('Anthropic Chat: should create and start Anthropic provider', async () => {
anthropicProvider = new smartai.AnthropicProvider({
anthropicToken: await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN'),
});
await anthropicProvider.start();
expect(anthropicProvider).toBeInstanceOf(smartai.AnthropicProvider);
});
tap.test('Anthropic Chat: should create chat response', async () => {
const userMessage = 'What is the capital of France? Answer in one word.';
const response = await anthropicProvider.chat({
systemMessage: 'You are a helpful assistant. Be concise.',
userMessage: userMessage,
messageHistory: [],
});
console.log(`Anthropic Chat - User: ${userMessage}`);
console.log(`Anthropic Chat - Response: ${response.message}`);
expect(response.role).toEqual('assistant');
expect(response.message).toBeTruthy();
expect(response.message.toLowerCase()).toInclude('paris');
});
tap.test('Anthropic Chat: should handle message history', async () => {
const messageHistory: smartai.ChatMessage[] = [
{ role: 'user', content: 'My name is Claude Test' },
{ role: 'assistant', content: 'Nice to meet you, Claude Test!' }
];
const response = await anthropicProvider.chat({
systemMessage: 'You are a helpful assistant with good memory.',
userMessage: 'What is my name?',
messageHistory: messageHistory,
});
console.log(`Anthropic Memory Test - Response: ${response.message}`);
expect(response.message.toLowerCase()).toInclude('claude test');
});
tap.test('Anthropic Chat: should handle errors gracefully', async () => {
// Test with invalid message (empty)
let errorCaught = false;
try {
await anthropicProvider.chat({
systemMessage: '',
userMessage: '',
messageHistory: [],
});
} catch (error) {
errorCaught = true;
console.log('Expected error caught:', error.message);
}
// Anthropic might handle empty messages, so we don't assert error
console.log(`Error handling test - Error caught: ${errorCaught}`);
});
tap.test('Anthropic Chat: should stop the provider', async () => {
await anthropicProvider.stop();
});
export default tap.start();

34
test/test.chat.openai.ts Normal file
View File

@@ -0,0 +1,34 @@
import { expect, tap } from '@push.rocks/tapbundle';
import * as qenv from '@push.rocks/qenv';
const testQenv = new qenv.Qenv('./', './.nogit/');
import * as smartai from '../ts/index.js';
let testSmartai: smartai.SmartAi;
tap.test('OpenAI Chat: should create a smartai instance with OpenAI provider', async () => {
testSmartai = new smartai.SmartAi({
openaiToken: await testQenv.getEnvVarOnDemand('OPENAI_TOKEN'),
});
await testSmartai.start();
});
tap.test('OpenAI Chat: should create chat response', async () => {
const userMessage = 'How are you?';
const response = await testSmartai.openaiProvider.chat({
systemMessage: 'Hello',
userMessage: userMessage,
messageHistory: [],
});
console.log(`userMessage: ${userMessage}`);
console.log(response.message);
expect(response.role).toEqual('assistant');
expect(response.message).toBeTruthy();
});
tap.test('OpenAI Chat: should stop the smartai instance', async () => {
await testSmartai.stop();
});
export default tap.start();

View File

@@ -0,0 +1,78 @@
import { expect, tap } from '@push.rocks/tapbundle';
import * as qenv from '@push.rocks/qenv';
import * as smartrequest from '@push.rocks/smartrequest';
import * as smartfile from '@push.rocks/smartfile';
const testQenv = new qenv.Qenv('./', './.nogit/');
import * as smartai from '../ts/index.js';
let anthropicProvider: smartai.AnthropicProvider;
tap.test('Anthropic Document: should create and start Anthropic provider', async () => {
anthropicProvider = new smartai.AnthropicProvider({
anthropicToken: await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN'),
});
await anthropicProvider.start();
expect(anthropicProvider).toBeInstanceOf(smartai.AnthropicProvider);
});
tap.test('Anthropic Document: should document a PDF', async () => {
const pdfUrl = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf';
const pdfResponse = await smartrequest.SmartRequest.create()
.url(pdfUrl)
.get();
const result = await anthropicProvider.document({
systemMessage: 'Classify the document. Only the following answers are allowed: "invoice", "bank account statement", "contract", "test document", "other". The answer should only contain the keyword for machine use.',
userMessage: 'Classify this document.',
messageHistory: [],
pdfDocuments: [Buffer.from(await pdfResponse.arrayBuffer())],
});
console.log(`Anthropic Document - Result:`, result);
expect(result).toBeTruthy();
expect(result.message).toBeTruthy();
});
tap.test('Anthropic Document: should handle complex document analysis', async () => {
// Test with the demo PDF if it exists
const pdfPath = './.nogit/demo_without_textlayer.pdf';
let pdfBuffer: Uint8Array;
try {
pdfBuffer = await smartfile.fs.toBuffer(pdfPath);
} catch (error) {
// If the file doesn't exist, use the dummy PDF
console.log('Demo PDF not found, using dummy PDF instead');
const pdfUrl = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf';
const pdfResponse = await smartrequest.SmartRequest.create()
.url(pdfUrl)
.get();
pdfBuffer = Buffer.from(await pdfResponse.arrayBuffer());
}
const result = await anthropicProvider.document({
systemMessage: `
Analyze this document and provide a JSON response with the following structure:
{
"documentType": "string",
"hasText": boolean,
"summary": "string"
}
`,
userMessage: 'Analyze this document.',
messageHistory: [],
pdfDocuments: [pdfBuffer],
});
console.log(`Anthropic Complex Document Analysis:`, result);
expect(result).toBeTruthy();
expect(result.message).toBeTruthy();
});
tap.test('Anthropic Document: should stop the provider', async () => {
await anthropicProvider.stop();
});
export default tap.start();

View File

@@ -9,37 +9,29 @@ import * as smartai from '../ts/index.js';
let testSmartai: smartai.SmartAi; let testSmartai: smartai.SmartAi;
tap.test('should create a smartai instance', async () => { tap.test('OpenAI Document: should create a smartai instance with OpenAI provider', async () => {
testSmartai = new smartai.SmartAi({ testSmartai = new smartai.SmartAi({
openaiToken: await testQenv.getEnvVarOnDemand('OPENAI_TOKEN'), openaiToken: await testQenv.getEnvVarOnDemand('OPENAI_TOKEN'),
}); });
await testSmartai.start(); await testSmartai.start();
}); });
tap.test('should create chat response with openai', async () => { tap.test('OpenAI Document: should document a pdf', async () => {
const userMessage = 'How are you?';
const response = await testSmartai.openaiProvider.chat({
systemMessage: 'Hello',
userMessage: userMessage,
messageHistory: [],
});
console.log(`userMessage: ${userMessage}`);
console.log(response.message);
});
tap.test('should document a pdf', async () => {
const pdfUrl = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf'; const pdfUrl = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf';
const pdfResponse = await smartrequest.getBinary(pdfUrl); const pdfResponse = await smartrequest.SmartRequest.create()
.url(pdfUrl)
.get();
const result = await testSmartai.openaiProvider.document({ const result = await testSmartai.openaiProvider.document({
systemMessage: 'Classify the document. Only the following answers are allowed: "invoice", "bank account statement", "contract", "other". The answer should only contain the keyword for machine use.', systemMessage: 'Classify the document. Only the following answers are allowed: "invoice", "bank account statement", "contract", "other". The answer should only contain the keyword for machine use.',
userMessage: "Classify the document.", userMessage: "Classify the document.",
messageHistory: [], messageHistory: [],
pdfDocuments: [pdfResponse.body], pdfDocuments: [Buffer.from(await pdfResponse.arrayBuffer())],
}); });
console.log(result); console.log(result);
expect(result.message).toBeTruthy();
}); });
tap.test('should recognize companies in a pdf', async () => { tap.test('OpenAI Document: should recognize companies in a pdf', async () => {
const pdfBuffer = await smartfile.fs.toBuffer('./.nogit/demo_without_textlayer.pdf'); const pdfBuffer = await smartfile.fs.toBuffer('./.nogit/demo_without_textlayer.pdf');
const result = await testSmartai.openaiProvider.document({ const result = await testSmartai.openaiProvider.document({
systemMessage: ` systemMessage: `
@@ -74,27 +66,11 @@ tap.test('should recognize companies in a pdf', async () => {
pdfDocuments: [pdfBuffer], pdfDocuments: [pdfBuffer],
}); });
console.log(result); console.log(result);
expect(result.message).toBeTruthy();
}); });
tap.test('should create audio response with openai', async () => { tap.test('OpenAI Document: should stop the smartai instance', async () => {
// Call the audio method with a sample message.
const audioStream = await testSmartai.openaiProvider.audio({
message: 'This is a test of audio generation.',
});
// Read all chunks from the stream.
const chunks: Uint8Array[] = [];
for await (const chunk of audioStream) {
chunks.push(chunk as Uint8Array);
}
const audioBuffer = Buffer.concat(chunks);
await smartfile.fs.toFs(audioBuffer, './.nogit/testoutput.mp3');
console.log(`Audio Buffer length: ${audioBuffer.length}`);
// Assert that the resulting buffer is not empty.
expect(audioBuffer.length).toBeGreaterThan(0);
});
tap.test('should stop the smartai instance', async () => {
await testSmartai.stop(); await testSmartai.stop();
}); });
export default tap.start(); export default tap.start();

203
test/test.image.openai.ts Normal file
View File

@@ -0,0 +1,203 @@
import { expect, tap } from '@push.rocks/tapbundle';
import * as qenv from '@push.rocks/qenv';
import * as smartai from '../ts/index.js';
import * as path from 'path';
import { promises as fs } from 'fs';
const testQenv = new qenv.Qenv('./', './.nogit/');
let openaiProvider: smartai.OpenAiProvider;
// Helper function to save image results
async function saveImageResult(testName: string, result: any) {
const sanitizedName = testName.replace(/[^a-z0-9]/gi, '_').toLowerCase();
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
const filename = `openai_${sanitizedName}_${timestamp}.json`;
const filepath = path.join('.nogit', 'testresults', 'images', filename);
await fs.mkdir(path.dirname(filepath), { recursive: true });
await fs.writeFile(filepath, JSON.stringify(result, null, 2), 'utf-8');
console.log(` 💾 Saved to: ${filepath}`);
// Also save the actual image if b64_json is present
if (result.images && result.images[0]?.b64_json) {
const imageFilename = `openai_${sanitizedName}_${timestamp}.png`;
const imageFilepath = path.join('.nogit', 'testresults', 'images', imageFilename);
await fs.writeFile(imageFilepath, Buffer.from(result.images[0].b64_json, 'base64'));
console.log(` 🖼️ Image saved to: ${imageFilepath}`);
}
}
tap.test('OpenAI Image Generation: should initialize provider', async () => {
const openaiToken = await testQenv.getEnvVarOnDemand('OPENAI_TOKEN');
expect(openaiToken).toBeTruthy();
openaiProvider = new smartai.OpenAiProvider({
openaiToken,
imageModel: 'gpt-image-1'
});
await openaiProvider.start();
expect(openaiProvider).toBeInstanceOf(smartai.OpenAiProvider);
});
tap.test('OpenAI Image: Basic generation with gpt-image-1', async () => {
const result = await openaiProvider.imageGenerate({
prompt: 'A cute robot reading a book in a cozy library, digital art style',
model: 'gpt-image-1',
quality: 'medium',
size: '1024x1024'
});
console.log('Basic gpt-image-1 Generation:');
console.log('- Images generated:', result.images.length);
console.log('- Model used:', result.metadata?.model);
console.log('- Quality:', result.metadata?.quality);
console.log('- Size:', result.metadata?.size);
console.log('- Tokens used:', result.metadata?.tokensUsed);
await saveImageResult('basic_generation_gptimage1', result);
expect(result.images).toBeTruthy();
expect(result.images.length).toEqual(1);
expect(result.images[0].b64_json).toBeTruthy();
expect(result.metadata?.model).toEqual('gpt-image-1');
});
tap.test('OpenAI Image: High quality with transparent background', async () => {
const result = await openaiProvider.imageGenerate({
prompt: 'A simple geometric logo of a mountain peak, minimal design, clean lines',
model: 'gpt-image-1',
quality: 'high',
size: '1024x1024',
background: 'transparent',
outputFormat: 'png'
});
console.log('High Quality Transparent:');
console.log('- Quality:', result.metadata?.quality);
console.log('- Background: transparent');
console.log('- Format:', result.metadata?.outputFormat);
console.log('- Tokens used:', result.metadata?.tokensUsed);
await saveImageResult('high_quality_transparent', result);
expect(result.images.length).toEqual(1);
expect(result.images[0].b64_json).toBeTruthy();
});
tap.test('OpenAI Image: WebP format with compression', async () => {
const result = await openaiProvider.imageGenerate({
prompt: 'A futuristic cityscape at sunset with flying cars, photorealistic',
model: 'gpt-image-1',
quality: 'high',
size: '1536x1024',
outputFormat: 'webp',
outputCompression: 85
});
console.log('WebP with Compression:');
console.log('- Format:', result.metadata?.outputFormat);
console.log('- Compression: 85%');
console.log('- Size:', result.metadata?.size);
await saveImageResult('webp_compression', result);
expect(result.images.length).toEqual(1);
expect(result.images[0].b64_json).toBeTruthy();
});
tap.test('OpenAI Image: Text rendering with gpt-image-1', async () => {
const result = await openaiProvider.imageGenerate({
prompt: 'A vintage cafe sign that says "COFFEE & CODE" in elegant hand-lettered typography, warm colors',
model: 'gpt-image-1',
quality: 'high',
size: '1024x1024'
});
console.log('Text Rendering:');
console.log('- Prompt includes text: "COFFEE & CODE"');
console.log('- gpt-image-1 has superior text rendering');
console.log('- Tokens used:', result.metadata?.tokensUsed);
await saveImageResult('text_rendering', result);
expect(result.images.length).toEqual(1);
expect(result.images[0].b64_json).toBeTruthy();
});
tap.test('OpenAI Image: Multiple images generation', async () => {
const result = await openaiProvider.imageGenerate({
prompt: 'Abstract colorful geometric patterns, modern minimalist art',
model: 'gpt-image-1',
n: 2,
quality: 'medium',
size: '1024x1024'
});
console.log('Multiple Images:');
console.log('- Images requested: 2');
console.log('- Images generated:', result.images.length);
await saveImageResult('multiple_images', result);
expect(result.images.length).toEqual(2);
expect(result.images[0].b64_json).toBeTruthy();
expect(result.images[1].b64_json).toBeTruthy();
});
tap.test('OpenAI Image: Low moderation setting', async () => {
const result = await openaiProvider.imageGenerate({
prompt: 'A fantasy battle scene with warriors and dragons',
model: 'gpt-image-1',
moderation: 'low',
quality: 'medium'
});
console.log('Low Moderation:');
console.log('- Moderation: low (less restrictive filtering)');
console.log('- Tokens used:', result.metadata?.tokensUsed);
await saveImageResult('low_moderation', result);
expect(result.images.length).toEqual(1);
expect(result.images[0].b64_json).toBeTruthy();
});
tap.test('OpenAI Image Editing: edit with gpt-image-1', async () => {
// First, generate a base image
const baseResult = await openaiProvider.imageGenerate({
prompt: 'A simple white cat sitting on a red cushion',
model: 'gpt-image-1',
quality: 'low',
size: '1024x1024'
});
const baseImageBuffer = Buffer.from(baseResult.images[0].b64_json!, 'base64');
// Now edit it
const editResult = await openaiProvider.imageEdit({
image: baseImageBuffer,
prompt: 'Change the cat to orange and add stylish sunglasses',
model: 'gpt-image-1',
quality: 'medium'
});
console.log('Image Editing:');
console.log('- Base image created');
console.log('- Edit: change color and add sunglasses');
console.log('- Result images:', editResult.images.length);
await saveImageResult('image_edit', editResult);
expect(editResult.images.length).toEqual(1);
expect(editResult.images[0].b64_json).toBeTruthy();
});
tap.test('OpenAI Image: should clean up provider', async () => {
await openaiProvider.stop();
console.log('OpenAI image provider stopped successfully');
});
export default tap.start();

140
test/test.interfaces.ts Normal file
View File

@@ -0,0 +1,140 @@
import { tap, expect } from '@push.rocks/tapbundle';
import * as smartai from '../ts/index.js';
// Test interface exports and type checking
// These tests verify that all interfaces are properly exported and usable
tap.test('Interfaces: ResearchOptions should be properly typed', async () => {
const testOptions: smartai.ResearchOptions = {
query: 'test query',
searchDepth: 'basic',
maxSources: 10,
includeWebSearch: true,
background: false
};
expect(testOptions).toBeInstanceOf(Object);
expect(testOptions.query).toEqual('test query');
expect(testOptions.searchDepth).toEqual('basic');
});
tap.test('Interfaces: ResearchResponse should be properly typed', async () => {
const testResponse: smartai.ResearchResponse = {
answer: 'test answer',
sources: [
{
url: 'https://example.com',
title: 'Example Source',
snippet: 'This is a snippet'
}
],
searchQueries: ['query1', 'query2'],
metadata: {
model: 'test-model',
tokensUsed: 100
}
};
expect(testResponse).toBeInstanceOf(Object);
expect(testResponse.answer).toEqual('test answer');
expect(testResponse.sources).toBeArray();
expect(testResponse.sources[0].url).toEqual('https://example.com');
});
tap.test('Interfaces: ChatOptions should be properly typed', async () => {
const testChatOptions: smartai.ChatOptions = {
systemMessage: 'You are a helpful assistant',
userMessage: 'Hello',
messageHistory: [
{ role: 'user', content: 'Previous message' },
{ role: 'assistant', content: 'Previous response' }
]
};
expect(testChatOptions).toBeInstanceOf(Object);
expect(testChatOptions.systemMessage).toBeTruthy();
expect(testChatOptions.messageHistory).toBeArray();
});
tap.test('Interfaces: ChatResponse should be properly typed', async () => {
const testChatResponse: smartai.ChatResponse = {
role: 'assistant',
message: 'This is a response'
};
expect(testChatResponse).toBeInstanceOf(Object);
expect(testChatResponse.role).toEqual('assistant');
expect(testChatResponse.message).toBeTruthy();
});
tap.test('Interfaces: ChatMessage should be properly typed', async () => {
const testMessage: smartai.ChatMessage = {
role: 'user',
content: 'Test message'
};
expect(testMessage).toBeInstanceOf(Object);
expect(testMessage.role).toBeOneOf(['user', 'assistant', 'system']);
expect(testMessage.content).toBeTruthy();
});
tap.test('Interfaces: Provider options should be properly typed', async () => {
// OpenAI options
const openaiOptions: smartai.IOpenaiProviderOptions = {
openaiToken: 'test-token',
chatModel: 'gpt-5-mini',
audioModel: 'tts-1-hd',
visionModel: '04-mini',
researchModel: 'o4-mini-deep-research-2025-06-26',
enableWebSearch: true
};
expect(openaiOptions).toBeInstanceOf(Object);
expect(openaiOptions.openaiToken).toBeTruthy();
// Anthropic options
const anthropicOptions: smartai.IAnthropicProviderOptions = {
anthropicToken: 'test-token',
enableWebSearch: true,
searchDomainAllowList: ['example.com'],
searchDomainBlockList: ['blocked.com']
};
expect(anthropicOptions).toBeInstanceOf(Object);
expect(anthropicOptions.anthropicToken).toBeTruthy();
});
tap.test('Interfaces: Search depth values should be valid', async () => {
const validDepths: smartai.ResearchOptions['searchDepth'][] = ['basic', 'advanced', 'deep'];
for (const depth of validDepths) {
const options: smartai.ResearchOptions = {
query: 'test',
searchDepth: depth
};
expect(options.searchDepth).toBeOneOf(['basic', 'advanced', 'deep', undefined]);
}
});
tap.test('Interfaces: Optional properties should work correctly', async () => {
// Minimal ResearchOptions
const minimalOptions: smartai.ResearchOptions = {
query: 'test query'
};
expect(minimalOptions.query).toBeTruthy();
expect(minimalOptions.searchDepth).toBeUndefined();
expect(minimalOptions.maxSources).toBeUndefined();
// Minimal ChatOptions
const minimalChat: smartai.ChatOptions = {
systemMessage: 'system',
userMessage: 'user',
messageHistory: []
};
expect(minimalChat.messageHistory).toBeArray();
expect(minimalChat.messageHistory.length).toEqual(0);
});
export default tap.start();

View File

@@ -0,0 +1,223 @@
import { expect, tap } from '@push.rocks/tapbundle';
import * as qenv from '@push.rocks/qenv';
import * as smartai from '../ts/index.js';
import * as path from 'path';
import { promises as fs } from 'fs';
const testQenv = new qenv.Qenv('./', './.nogit/');
// Helper function to save research results
async function saveResearchResult(testName: string, result: any) {
const sanitizedName = testName.replace(/[^a-z0-9]/gi, '_').toLowerCase();
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
const filename = `${sanitizedName}_${timestamp}.json`;
const filepath = path.join('.nogit', 'testresults', 'research', filename);
await fs.mkdir(path.dirname(filepath), { recursive: true });
await fs.writeFile(filepath, JSON.stringify(result, null, 2), 'utf-8');
console.log(` 💾 Saved to: ${filepath}`);
}
let anthropicProvider: smartai.AnthropicProvider;
tap.test('Anthropic Research: should initialize provider with web search', async () => {
anthropicProvider = new smartai.AnthropicProvider({
anthropicToken: await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN'),
enableWebSearch: true
});
await anthropicProvider.start();
expect(anthropicProvider).toBeInstanceOf(smartai.AnthropicProvider);
expect(typeof anthropicProvider.research).toEqual('function');
});
tap.test('Anthropic Research: should perform basic research query', async () => {
const result = await anthropicProvider.research({
query: 'What is machine learning and its main applications?',
searchDepth: 'basic'
});
console.log('Anthropic Basic Research:');
console.log('- Answer length:', result.answer.length);
console.log('- Sources found:', result.sources.length);
console.log('- First 200 chars:', result.answer.substring(0, 200));
await saveResearchResult('basic_research_machine_learning', result);
expect(result).toBeTruthy();
expect(result.answer).toBeTruthy();
expect(result.answer.toLowerCase()).toInclude('machine learning');
expect(result.sources).toBeArray();
expect(result.metadata).toBeTruthy();
});
tap.test('Anthropic Research: should perform research with web search', async () => {
const result = await anthropicProvider.research({
query: 'What are the latest developments in renewable energy technology?',
searchDepth: 'advanced',
includeWebSearch: true,
maxSources: 5
});
console.log('Anthropic Web Search Research:');
console.log('- Answer length:', result.answer.length);
console.log('- Sources:', result.sources.length);
if (result.searchQueries) {
console.log('- Search queries:', result.searchQueries);
}
await saveResearchResult('web_search_renewable_energy', result);
expect(result.answer).toBeTruthy();
expect(result.answer.toLowerCase()).toInclude('renewable');
// Check if sources were extracted
if (result.sources.length > 0) {
console.log('- Example source:', result.sources[0]);
expect(result.sources[0]).toHaveProperty('url');
}
});
tap.test('Anthropic Research: should handle deep research queries', async () => {
const result = await anthropicProvider.research({
query: 'Explain the differences between REST and GraphQL APIs',
searchDepth: 'deep'
});
console.log('Anthropic Deep Research:');
console.log('- Answer length:', result.answer.length);
console.log('- Token usage:', result.metadata?.tokensUsed);
await saveResearchResult('deep_research_rest_vs_graphql', result);
expect(result.answer).toBeTruthy();
expect(result.answer.length).toBeGreaterThan(300);
expect(result.answer.toLowerCase()).toInclude('rest');
expect(result.answer.toLowerCase()).toInclude('graphql');
});
tap.test('Anthropic Research: should extract citations from response', async () => {
const result = await anthropicProvider.research({
query: 'What is Docker and how does containerization work?',
searchDepth: 'basic',
maxSources: 3
});
console.log('Anthropic Citation Extraction:');
console.log('- Sources found:', result.sources.length);
console.log('- Answer includes Docker:', result.answer.toLowerCase().includes('docker'));
await saveResearchResult('citation_extraction_docker', result);
expect(result.answer).toInclude('Docker');
// Check for URL extraction (both markdown and plain URLs)
const hasUrls = result.answer.includes('http') || result.sources.length > 0;
console.log('- Contains URLs or sources:', hasUrls);
});
tap.test('Anthropic Research: should use domain filtering when configured', async () => {
// Create a new provider with domain restrictions
const filteredProvider = new smartai.AnthropicProvider({
anthropicToken: await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN'),
enableWebSearch: true,
searchDomainAllowList: ['wikipedia.org', 'docs.microsoft.com'],
searchDomainBlockList: ['reddit.com']
});
await filteredProvider.start();
const result = await filteredProvider.research({
query: 'What is JavaScript?',
searchDepth: 'basic'
});
console.log('Anthropic Domain Filtering Test:');
console.log('- Answer length:', result.answer.length);
console.log('- Applied domain filters (allow: wikipedia, docs.microsoft)');
await saveResearchResult('domain_filtering_javascript', result);
expect(result.answer).toBeTruthy();
expect(result.answer.toLowerCase()).toInclude('javascript');
await filteredProvider.stop();
});
tap.test('Anthropic Research: should handle errors gracefully', async () => {
let errorCaught = false;
try {
await anthropicProvider.research({
query: '', // Empty query
searchDepth: 'basic'
});
} catch (error) {
errorCaught = true;
console.log('Expected error for empty query:', error.message.substring(0, 100));
}
// Anthropic might handle empty queries differently
console.log(`Empty query error test - Error caught: ${errorCaught}`);
});
tap.test('Anthropic Research: should handle different search depths', async () => {
// Test basic search depth
const basicResult = await anthropicProvider.research({
query: 'What is Python?',
searchDepth: 'basic'
});
// Test advanced search depth
const advancedResult = await anthropicProvider.research({
query: 'What is Python?',
searchDepth: 'advanced'
});
console.log('Anthropic Search Depth Comparison:');
console.log('- Basic answer length:', basicResult.answer.length);
console.log('- Advanced answer length:', advancedResult.answer.length);
console.log('- Basic tokens:', basicResult.metadata?.tokensUsed);
console.log('- Advanced tokens:', advancedResult.metadata?.tokensUsed);
await saveResearchResult('search_depth_python_basic', basicResult);
await saveResearchResult('search_depth_python_advanced', advancedResult);
expect(basicResult.answer).toBeTruthy();
expect(advancedResult.answer).toBeTruthy();
// Advanced search typically produces longer answers
// But this isn't guaranteed, so we just check they exist
expect(basicResult.answer.toLowerCase()).toInclude('python');
expect(advancedResult.answer.toLowerCase()).toInclude('python');
});
tap.test('Anthropic Research: ARM vs. Qualcomm comparison', async () => {
const result = await anthropicProvider.research({
query: 'Compare ARM and Qualcomm: their technologies, market positions, and recent developments in the mobile and computing sectors',
searchDepth: 'advanced',
includeWebSearch: true,
maxSources: 10
});
console.log('ARM vs. Qualcomm Research:');
console.log('- Answer length:', result.answer.length);
console.log('- Sources found:', result.sources.length);
console.log('- First 300 chars:', result.answer.substring(0, 300));
await saveResearchResult('arm_vs_qualcomm_comparison', result);
expect(result.answer).toBeTruthy();
expect(result.answer.length).toBeGreaterThan(500);
expect(result.answer.toLowerCase()).toInclude('arm');
expect(result.answer.toLowerCase()).toInclude('qualcomm');
expect(result.sources.length).toBeGreaterThan(0);
});
tap.test('Anthropic Research: should clean up provider', async () => {
await anthropicProvider.stop();
console.log('Anthropic research provider stopped successfully');
});
export default tap.start();

View File

@@ -0,0 +1,172 @@
import { expect, tap } from '@push.rocks/tapbundle';
import * as qenv from '@push.rocks/qenv';
import * as smartai from '../ts/index.js';
import * as path from 'path';
import { promises as fs } from 'fs';
const testQenv = new qenv.Qenv('./', './.nogit/');
// Helper function to save research results
async function saveResearchResult(testName: string, result: any) {
const sanitizedName = testName.replace(/[^a-z0-9]/gi, '_').toLowerCase();
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
const filename = `openai_${sanitizedName}_${timestamp}.json`;
const filepath = path.join('.nogit', 'testresults', 'research', filename);
await fs.mkdir(path.dirname(filepath), { recursive: true });
await fs.writeFile(filepath, JSON.stringify(result, null, 2), 'utf-8');
console.log(` 💾 Saved to: ${filepath}`);
}
let openaiProvider: smartai.OpenAiProvider;
tap.test('OpenAI Research: should initialize provider with research capabilities', async () => {
openaiProvider = new smartai.OpenAiProvider({
openaiToken: await testQenv.getEnvVarOnDemand('OPENAI_TOKEN'),
researchModel: 'o4-mini-deep-research-2025-06-26',
enableWebSearch: true
});
await openaiProvider.start();
expect(openaiProvider).toBeInstanceOf(smartai.OpenAiProvider);
expect(typeof openaiProvider.research).toEqual('function');
});
tap.test('OpenAI Research: should perform basic research query', async () => {
const result = await openaiProvider.research({
query: 'What is TypeScript and why is it useful for web development?',
searchDepth: 'basic'
});
console.log('OpenAI Basic Research:');
console.log('- Answer length:', result.answer.length);
console.log('- Sources found:', result.sources.length);
console.log('- First 200 chars:', result.answer.substring(0, 200));
await saveResearchResult('basic_research_typescript', result);
expect(result).toBeTruthy();
expect(result.answer).toBeTruthy();
expect(result.answer.toLowerCase()).toInclude('typescript');
expect(result.sources).toBeArray();
expect(result.metadata).toBeTruthy();
expect(result.metadata.model).toBeTruthy();
});
tap.test('OpenAI Research: should perform research with web search enabled', async () => {
const result = await openaiProvider.research({
query: 'What are the latest features in ECMAScript 2024?',
searchDepth: 'advanced',
includeWebSearch: true,
maxSources: 5
});
console.log('OpenAI Web Search Research:');
console.log('- Answer length:', result.answer.length);
console.log('- Sources:', result.sources.length);
if (result.searchQueries) {
console.log('- Search queries used:', result.searchQueries);
}
await saveResearchResult('web_search_ecmascript', result);
expect(result.answer).toBeTruthy();
expect(result.answer.toLowerCase()).toInclude('ecmascript');
// The model might include sources or search queries
if (result.sources.length > 0) {
expect(result.sources[0]).toHaveProperty('url');
expect(result.sources[0]).toHaveProperty('title');
}
});
tap.test('OpenAI Research: should handle deep research for complex topics', async () => {
// Skip this test if it takes too long or costs too much
// You can enable it for thorough testing
const skipDeepResearch = true;
if (skipDeepResearch) {
console.log('Skipping deep research test to save API costs');
return;
}
const result = await openaiProvider.research({
query: 'Compare the pros and cons of microservices vs monolithic architecture',
searchDepth: 'deep',
includeWebSearch: true
});
console.log('OpenAI Deep Research:');
console.log('- Answer length:', result.answer.length);
console.log('- Token usage:', result.metadata?.tokensUsed);
expect(result.answer).toBeTruthy();
expect(result.answer.length).toBeGreaterThan(500);
expect(result.answer.toLowerCase()).toInclude('microservices');
expect(result.answer.toLowerCase()).toInclude('monolithic');
});
tap.test('OpenAI Research: should extract sources from markdown links', async () => {
const result = await openaiProvider.research({
query: 'What is Node.js and provide some official documentation links?',
searchDepth: 'basic',
maxSources: 3
});
console.log('OpenAI Source Extraction:');
console.log('- Sources found:', result.sources.length);
await saveResearchResult('source_extraction_nodejs', result);
if (result.sources.length > 0) {
console.log('- Example source:', result.sources[0]);
expect(result.sources[0].url).toBeTruthy();
expect(result.sources[0].title).toBeTruthy();
}
expect(result.answer).toInclude('Node.js');
});
tap.test('OpenAI Research: should handle research errors gracefully', async () => {
// Test with an extremely long query that might cause issues
const longQuery = 'a'.repeat(10000);
let errorCaught = false;
try {
await openaiProvider.research({
query: longQuery,
searchDepth: 'basic'
});
} catch (error) {
errorCaught = true;
console.log('Expected error for long query:', error.message.substring(0, 100));
expect(error.message).toBeTruthy();
}
// OpenAI might handle long queries, so we don't assert the error
console.log(`Long query error test - Error caught: ${errorCaught}`);
});
tap.test('OpenAI Research: should respect maxSources parameter', async () => {
const maxSources = 3;
const result = await openaiProvider.research({
query: 'List popular JavaScript frameworks',
searchDepth: 'basic',
maxSources: maxSources
});
console.log(`OpenAI Max Sources Test - Requested: ${maxSources}, Found: ${result.sources.length}`);
// The API might not always return exactly maxSources, but should respect it as a limit
if (result.sources.length > 0) {
expect(result.sources.length).toBeLessThanOrEqual(maxSources * 2); // Allow some flexibility
}
});
tap.test('OpenAI Research: should clean up provider', async () => {
await openaiProvider.stop();
console.log('OpenAI research provider stopped successfully');
});
export default tap.start();

View File

@@ -0,0 +1,80 @@
import { tap, expect } from '@push.rocks/tapbundle';
import * as smartai from '../ts/index.js';
// Test research method stubs for providers without full implementation
// These providers have research methods that throw "not yet supported" errors
tap.test('Research Stubs: Perplexity provider should have research method', async () => {
const perplexityProvider = new smartai.PerplexityProvider({
perplexityToken: 'test-token'
});
// Perplexity has a basic implementation with Sonar models
expect(typeof perplexityProvider.research).toEqual('function');
});
tap.test('Research Stubs: Groq provider should throw not supported error', async () => {
const groqProvider = new smartai.GroqProvider({
groqToken: 'test-token'
});
expect(typeof groqProvider.research).toEqual('function');
let errorCaught = false;
try {
await groqProvider.research({ query: 'test' });
} catch (error) {
errorCaught = true;
expect(error.message).toInclude('not yet supported');
}
expect(errorCaught).toBeTrue();
});
tap.test('Research Stubs: Ollama provider should throw not supported error', async () => {
const ollamaProvider = new smartai.OllamaProvider({});
expect(typeof ollamaProvider.research).toEqual('function');
let errorCaught = false;
try {
await ollamaProvider.research({ query: 'test' });
} catch (error) {
errorCaught = true;
expect(error.message).toInclude('not yet supported');
}
expect(errorCaught).toBeTrue();
});
tap.test('Research Stubs: xAI provider should throw not supported error', async () => {
const xaiProvider = new smartai.XAIProvider({
xaiToken: 'test-token'
});
expect(typeof xaiProvider.research).toEqual('function');
let errorCaught = false;
try {
await xaiProvider.research({ query: 'test' });
} catch (error) {
errorCaught = true;
expect(error.message).toInclude('not yet supported');
}
expect(errorCaught).toBeTrue();
});
tap.test('Research Stubs: Exo provider should throw not supported error', async () => {
const exoProvider = new smartai.ExoProvider({});
expect(typeof exoProvider.research).toEqual('function');
let errorCaught = false;
try {
await exoProvider.research({ query: 'test' });
} catch (error) {
errorCaught = true;
expect(error.message).toInclude('not yet supported');
}
expect(errorCaught).toBeTrue();
});
export default tap.start();

View File

@@ -0,0 +1,95 @@
import { expect, tap } from '@push.rocks/tapbundle';
import * as qenv from '@push.rocks/qenv';
import * as smartfile from '@push.rocks/smartfile';
const testQenv = new qenv.Qenv('./', './.nogit/');
import * as smartai from '../ts/index.js';
let anthropicProvider: smartai.AnthropicProvider;
tap.test('Anthropic Vision: should create and start Anthropic provider', async () => {
anthropicProvider = new smartai.AnthropicProvider({
anthropicToken: await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN'),
});
await anthropicProvider.start();
expect(anthropicProvider).toBeInstanceOf(smartai.AnthropicProvider);
});
tap.test('Anthropic Vision: should analyze coffee image with latte art', async () => {
// Test 1: Coffee image from Unsplash by Dani
const imagePath = './test/testimages/coffee-dani/coffee.jpg';
console.log(`Loading coffee image from: ${imagePath}`);
const imageBuffer = await smartfile.fs.toBuffer(imagePath);
console.log(`Image loaded, size: ${imageBuffer.length} bytes`);
const result = await anthropicProvider.vision({
image: imageBuffer,
prompt: 'Describe this coffee image. What do you see in terms of the cup, foam pattern, and overall composition?'
});
console.log(`Anthropic Vision (Coffee) - Result: ${result}`);
expect(result).toBeTruthy();
expect(typeof result).toEqual('string');
expect(result.toLowerCase()).toInclude('coffee');
// The image has a heart pattern in the latte art
const mentionsLatte = result.toLowerCase().includes('heart') ||
result.toLowerCase().includes('latte') ||
result.toLowerCase().includes('foam');
expect(mentionsLatte).toBeTrue();
});
tap.test('Anthropic Vision: should analyze laptop/workspace image', async () => {
// Test 2: Laptop image from Unsplash by Nicolas Bichon
const imagePath = './test/testimages/laptop-nicolas/laptop.jpg';
console.log(`Loading laptop image from: ${imagePath}`);
const imageBuffer = await smartfile.fs.toBuffer(imagePath);
console.log(`Image loaded, size: ${imageBuffer.length} bytes`);
const result = await anthropicProvider.vision({
image: imageBuffer,
prompt: 'Describe the technology and workspace setup in this image. What devices and equipment can you see?'
});
console.log(`Anthropic Vision (Laptop) - Result: ${result}`);
expect(result).toBeTruthy();
expect(typeof result).toEqual('string');
// Should mention laptop, computer, keyboard, or desk
const mentionsTech = result.toLowerCase().includes('laptop') ||
result.toLowerCase().includes('computer') ||
result.toLowerCase().includes('keyboard') ||
result.toLowerCase().includes('desk');
expect(mentionsTech).toBeTrue();
});
tap.test('Anthropic Vision: should analyze receipt/document image', async () => {
// Test 3: Receipt image from Unsplash by Annie Spratt
const imagePath = './test/testimages/receipt-annie/receipt.jpg';
console.log(`Loading receipt image from: ${imagePath}`);
const imageBuffer = await smartfile.fs.toBuffer(imagePath);
console.log(`Image loaded, size: ${imageBuffer.length} bytes`);
const result = await anthropicProvider.vision({
image: imageBuffer,
prompt: 'What type of document is this? Can you identify any text or numbers visible in the image?'
});
console.log(`Anthropic Vision (Receipt) - Result: ${result}`);
expect(result).toBeTruthy();
expect(typeof result).toEqual('string');
// Should mention receipt, document, text, or paper
const mentionsDocument = result.toLowerCase().includes('receipt') ||
result.toLowerCase().includes('document') ||
result.toLowerCase().includes('text') ||
result.toLowerCase().includes('paper');
expect(mentionsDocument).toBeTrue();
});
tap.test('Anthropic Vision: should stop the provider', async () => {
await anthropicProvider.stop();
});
export default tap.start();

View File

@@ -0,0 +1,36 @@
# Coffee Image Attribution
## coffee.jpg
**Photographer:** Dani (@frokz)
**Source URL:** https://unsplash.com/photos/cup-of-coffee-on-saucer-ZLqxSzvVr7I
**Direct Link:** https://images.unsplash.com/photo-1506372023823-741c83b836fe
### Metadata
- **Title:** Cup of coffee on saucer
- **Description:** One of many coffee-moments in my life ;)
- **Date Published:** September 25, 2017
- **Location:** Stockholm, Sweden
- **Tags:** coffee, cafe, heart, coffee cup, cup, barista, latte, mug, saucer, food, sweden, stockholm
### License
**Unsplash License** - Free to use
- ✅ Commercial and non-commercial use
- ✅ No permission needed
- ❌ Cannot be sold without significant modification
- ❌ Cannot be used to replicate Unsplash or similar service
Full license: https://unsplash.com/license
### Usage in This Project
This image is used for testing vision/image processing capabilities in the SmartAI library test suite, specifically for:
- Testing coffee/beverage recognition
- Latte art pattern detection (heart shape)
- Scene/environment analysis
- Multi-element image understanding (cup, saucer, table)
### Download Information
- **Downloaded:** September 28, 2025
- **Original Filename:** dani-ZLqxSzvVr7I-unsplash.jpg
- **Resolution:** High resolution (3.7 MB)
- **Format:** JPEG

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.7 MiB

View File

@@ -0,0 +1,40 @@
# Laptop Image Attribution
## laptop.jpg
**Photographer:** Nicolas Bichon (@nicol3a)
**Source URL:** https://unsplash.com/photos/a-laptop-computer-sitting-on-top-of-a-wooden-desk-ZhV4iqAXxyA
**Direct Link:** https://images.unsplash.com/photo-1704230972797-e0e3aba0fce7
### Metadata
- **Title:** A laptop computer sitting on top of a wooden desk
- **Description:** Lifestyle photo I took for my indie app Type, a macOS app to take notes without interrupting your flow. https://usetype.app.
- **Date Published:** January 2, 2024
- **Camera:** FUJIFILM, X-T20
- **Tags:** computer, laptop, mac, keyboard, computer keyboard, computer hardware, furniture, table, electronics, screen, monitor, hardware, display, tabletop, lcd screen, digital display
### Statistics
- **Views:** 183,020
- **Downloads:** 757
### License
**Unsplash License** - Free to use
- ✅ Commercial and non-commercial use
- ✅ No permission needed
- ❌ Cannot be sold without significant modification
- ❌ Cannot be used to replicate Unsplash or similar service
Full license: https://unsplash.com/license
### Usage in This Project
This image is used for testing vision/image processing capabilities in the SmartAI library test suite, specifically for:
- Testing technology/computer equipment recognition
- Workspace/office environment analysis
- Object detection (laptop, keyboard, monitor, table)
- Scene understanding and context analysis
### Download Information
- **Downloaded:** September 28, 2025
- **Original Filename:** nicolas-bichon-ZhV4iqAXxyA-unsplash.jpg
- **Resolution:** High resolution (1.8 MB)
- **Format:** JPEG

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 MiB

View File

@@ -0,0 +1,40 @@
# Receipt Image Attribution
## receipt.jpg
**Photographer:** Annie Spratt (@anniespratt)
**Source URL:** https://unsplash.com/photos/a-receipt-sitting-on-top-of-a-wooden-table-recgFWxDO1Y
**Direct Link:** https://images.unsplash.com/photo-1731686602391-7484df33a03c
### Metadata
- **Title:** A receipt sitting on top of a wooden table
- **Description:** Download this free HD photo of text, document, invoice, and receipt by Annie Spratt
- **Date Published:** November 15, 2024
- **Tags:** text, document, invoice, receipt, diaper
### Statistics
- **Views:** 54,593
- **Downloads:** 764
### License
**Unsplash License** - Free to use
- ✅ Commercial and non-commercial use
- ✅ No permission needed
- ❌ Cannot be sold without significant modification
- ❌ Cannot be used to replicate Unsplash or similar service
Full license: https://unsplash.com/license
### Usage in This Project
This image is used for testing vision/image processing capabilities in the SmartAI library test suite, specifically for:
- Testing text extraction and OCR capabilities
- Document recognition and classification
- Receipt/invoice analysis
- Text-heavy image understanding
- Structured data extraction from documents
### Download Information
- **Downloaded:** September 28, 2025
- **Original Filename:** annie-spratt-recgFWxDO1Y-unsplash.jpg
- **Resolution:** High resolution (3.3 MB)
- **Format:** JPEG

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.3 MiB

View File

@@ -3,6 +3,6 @@
*/ */
export const commitinfo = { export const commitinfo = {
name: '@push.rocks/smartai', name: '@push.rocks/smartai',
version: '0.5.4', version: '0.7.3',
description: 'SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.' description: 'SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.'
} }

View File

@@ -1,3 +1,5 @@
import * as plugins from './plugins.js';
/** /**
* Message format for chat interactions * Message format for chat interactions
*/ */
@@ -23,22 +25,114 @@ export interface ChatResponse {
message: string; message: string;
} }
/**
* Options for research interactions
*/
export interface ResearchOptions {
query: string;
searchDepth?: 'basic' | 'advanced' | 'deep';
maxSources?: number;
includeWebSearch?: boolean;
background?: boolean;
}
/**
* Response format for research interactions
*/
export interface ResearchResponse {
answer: string;
sources: Array<{
url: string;
title: string;
snippet: string;
}>;
searchQueries?: string[];
metadata?: any;
}
/**
* Options for image generation
*/
export interface ImageGenerateOptions {
prompt: string;
model?: 'gpt-image-1' | 'dall-e-3' | 'dall-e-2';
quality?: 'low' | 'medium' | 'high' | 'standard' | 'hd' | 'auto';
size?: '256x256' | '512x512' | '1024x1024' | '1536x1024' | '1024x1536' | '1792x1024' | '1024x1792' | 'auto';
style?: 'vivid' | 'natural';
background?: 'transparent' | 'opaque' | 'auto';
outputFormat?: 'png' | 'jpeg' | 'webp';
outputCompression?: number; // 0-100 for webp/jpeg
moderation?: 'low' | 'auto';
n?: number; // Number of images to generate
stream?: boolean;
partialImages?: number; // 0-3 for streaming
}
/**
* Options for image editing
*/
export interface ImageEditOptions {
image: Buffer;
prompt: string;
mask?: Buffer;
model?: 'gpt-image-1' | 'dall-e-2';
quality?: 'low' | 'medium' | 'high' | 'standard' | 'auto';
size?: '256x256' | '512x512' | '1024x1024' | '1536x1024' | '1024x1536' | 'auto';
background?: 'transparent' | 'opaque' | 'auto';
outputFormat?: 'png' | 'jpeg' | 'webp';
outputCompression?: number;
n?: number;
stream?: boolean;
partialImages?: number;
}
/**
* Response format for image operations
*/
export interface ImageResponse {
images: Array<{
b64_json?: string;
url?: string;
revisedPrompt?: string;
}>;
metadata?: {
model: string;
quality?: string;
size?: string;
outputFormat?: string;
tokensUsed?: number;
};
}
/** /**
* Abstract base class for multi-modal AI models. * Abstract base class for multi-modal AI models.
* Provides a common interface for different AI providers (OpenAI, Anthropic, Perplexity, Ollama) * Provides a common interface for different AI providers (OpenAI, Anthropic, Perplexity, Ollama)
*/ */
export abstract class MultiModalModel { export abstract class MultiModalModel {
/**
* SmartPdf instance for document processing
* Shared across all methods that need PDF functionality
*/
protected smartpdfInstance: plugins.smartpdf.SmartPdf;
/** /**
* Initializes the model and any necessary resources * Initializes the model and any necessary resources
* Should be called before using any other methods * Should be called before using any other methods
*/ */
abstract start(): Promise<void>; public async start(): Promise<void> {
this.smartpdfInstance = new plugins.smartpdf.SmartPdf();
await this.smartpdfInstance.start();
}
/** /**
* Cleans up any resources used by the model * Cleans up any resources used by the model
* Should be called when the model is no longer needed * Should be called when the model is no longer needed
*/ */
abstract stop(): Promise<void>; public async stop(): Promise<void> {
if (this.smartpdfInstance) {
await this.smartpdfInstance.stop();
}
}
/** /**
* Synchronous chat interaction with the model * Synchronous chat interaction with the model
@@ -83,4 +177,28 @@ export abstract class MultiModalModel {
pdfDocuments: Uint8Array[]; pdfDocuments: Uint8Array[];
messageHistory: ChatMessage[]; messageHistory: ChatMessage[];
}): Promise<{ message: any }>; }): Promise<{ message: any }>;
/**
* Research and web search capabilities
* @param optionsArg Options containing the research query and configuration
* @returns Promise resolving to the research results with sources
* @throws Error if the provider doesn't support research capabilities
*/
public abstract research(optionsArg: ResearchOptions): Promise<ResearchResponse>;
/**
* Image generation from text prompts
* @param optionsArg Options containing the prompt and generation parameters
* @returns Promise resolving to the generated image(s)
* @throws Error if the provider doesn't support image generation
*/
public abstract imageGenerate(optionsArg: ImageGenerateOptions): Promise<ImageResponse>;
/**
* Image editing and inpainting
* @param optionsArg Options containing the image, prompt, and editing parameters
* @returns Promise resolving to the edited image(s)
* @throws Error if the provider doesn't support image editing
*/
public abstract imageEdit(optionsArg: ImageEditOptions): Promise<ImageResponse>;
} }

View File

@@ -91,7 +91,29 @@ export class SmartAi {
} }
} }
public async stop() {} public async stop() {
if (this.openaiProvider) {
await this.openaiProvider.stop();
}
if (this.anthropicProvider) {
await this.anthropicProvider.stop();
}
if (this.perplexityProvider) {
await this.perplexityProvider.stop();
}
if (this.groqProvider) {
await this.groqProvider.stop();
}
if (this.xaiProvider) {
await this.xaiProvider.stop();
}
if (this.ollamaProvider) {
await this.ollamaProvider.stop();
}
if (this.exoProvider) {
await this.exoProvider.stop();
}
}
/** /**
* create a new conversation * create a new conversation

View File

@@ -1,3 +1,9 @@
export * from './classes.smartai.js'; export * from './classes.smartai.js';
export * from './abstract.classes.multimodal.js'; export * from './abstract.classes.multimodal.js';
export * from './provider.openai.js'; export * from './provider.openai.js';
export * from './provider.anthropic.js';
export * from './provider.perplexity.js';
export * from './provider.groq.js';
export * from './provider.ollama.js';
export * from './provider.xai.js';
export * from './provider.exo.js';

View File

@@ -1,13 +1,25 @@
import * as plugins from './plugins.js'; import * as plugins from './plugins.js';
import * as paths from './paths.js'; import * as paths from './paths.js';
import { MultiModalModel } from './abstract.classes.multimodal.js'; import { MultiModalModel } from './abstract.classes.multimodal.js';
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js'; import type {
ChatOptions,
ChatResponse,
ChatMessage,
ResearchOptions,
ResearchResponse,
ImageGenerateOptions,
ImageEditOptions,
ImageResponse
} from './abstract.classes.multimodal.js';
import type { ImageBlockParam, TextBlockParam } from '@anthropic-ai/sdk/resources/messages'; import type { ImageBlockParam, TextBlockParam } from '@anthropic-ai/sdk/resources/messages';
type ContentBlock = ImageBlockParam | TextBlockParam; type ContentBlock = ImageBlockParam | TextBlockParam;
export interface IAnthropicProviderOptions { export interface IAnthropicProviderOptions {
anthropicToken: string; anthropicToken: string;
enableWebSearch?: boolean;
searchDomainAllowList?: string[];
searchDomainBlockList?: string[];
} }
export class AnthropicProvider extends MultiModalModel { export class AnthropicProvider extends MultiModalModel {
@@ -20,12 +32,15 @@ export class AnthropicProvider extends MultiModalModel {
} }
async start() { async start() {
await super.start();
this.anthropicApiClient = new plugins.anthropic.default({ this.anthropicApiClient = new plugins.anthropic.default({
apiKey: this.options.anthropicToken, apiKey: this.options.anthropicToken,
}); });
} }
async stop() {} async stop() {
await super.stop();
}
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> { public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
// Create a TextDecoder to handle incoming chunks // Create a TextDecoder to handle incoming chunks
@@ -62,7 +77,7 @@ export class AnthropicProvider extends MultiModalModel {
// If we have a complete message, send it to Anthropic // If we have a complete message, send it to Anthropic
if (currentMessage) { if (currentMessage) {
const stream = await this.anthropicApiClient.messages.create({ const stream = await this.anthropicApiClient.messages.create({
model: 'claude-3-opus-20240229', model: 'claude-sonnet-4-5-20250929',
messages: [{ role: currentMessage.role, content: currentMessage.content }], messages: [{ role: currentMessage.role, content: currentMessage.content }],
system: '', system: '',
stream: true, stream: true,
@@ -106,7 +121,7 @@ export class AnthropicProvider extends MultiModalModel {
})); }));
const result = await this.anthropicApiClient.messages.create({ const result = await this.anthropicApiClient.messages.create({
model: 'claude-3-opus-20240229', model: 'claude-sonnet-4-5-20250929',
system: optionsArg.systemMessage, system: optionsArg.systemMessage,
messages: [ messages: [
...messages, ...messages,
@@ -153,7 +168,7 @@ export class AnthropicProvider extends MultiModalModel {
]; ];
const result = await this.anthropicApiClient.messages.create({ const result = await this.anthropicApiClient.messages.create({
model: 'claude-3-opus-20240229', model: 'claude-sonnet-4-5-20250929',
messages: [{ messages: [{
role: 'user', role: 'user',
content content
@@ -178,11 +193,10 @@ export class AnthropicProvider extends MultiModalModel {
messageHistory: ChatMessage[]; messageHistory: ChatMessage[];
}): Promise<{ message: any }> { }): Promise<{ message: any }> {
// Convert PDF documents to images using SmartPDF // Convert PDF documents to images using SmartPDF
const smartpdfInstance = new plugins.smartpdf.SmartPdf();
let documentImageBytesArray: Uint8Array[] = []; let documentImageBytesArray: Uint8Array[] = [];
for (const pdfDocument of optionsArg.pdfDocuments) { for (const pdfDocument of optionsArg.pdfDocuments) {
const documentImageArray = await smartpdfInstance.convertPDFToPngBytes(pdfDocument); const documentImageArray = await this.smartpdfInstance.convertPDFToPngBytes(pdfDocument);
documentImageBytesArray = documentImageBytesArray.concat(documentImageArray); documentImageBytesArray = documentImageBytesArray.concat(documentImageArray);
} }
@@ -213,7 +227,7 @@ export class AnthropicProvider extends MultiModalModel {
} }
const result = await this.anthropicApiClient.messages.create({ const result = await this.anthropicApiClient.messages.create({
model: 'claude-3-opus-20240229', model: 'claude-sonnet-4-5-20250929',
system: optionsArg.systemMessage, system: optionsArg.systemMessage,
messages: [ messages: [
...messages, ...messages,
@@ -237,4 +251,155 @@ export class AnthropicProvider extends MultiModalModel {
} }
}; };
} }
public async research(optionsArg: ResearchOptions): Promise<ResearchResponse> {
// Prepare the messages for the research request
const systemMessage = `You are a research assistant with web search capabilities.
Provide comprehensive, well-researched answers with citations and sources.
When searching the web, be thorough and cite your sources accurately.`;
try {
// Build the tool configuration for web search
const tools: any[] = [];
if (this.options.enableWebSearch) {
const webSearchTool: any = {
type: 'web_search_20250305',
name: 'web_search'
};
// Add optional parameters
if (optionsArg.maxSources) {
webSearchTool.max_uses = optionsArg.maxSources;
}
if (this.options.searchDomainAllowList?.length) {
webSearchTool.allowed_domains = this.options.searchDomainAllowList;
} else if (this.options.searchDomainBlockList?.length) {
webSearchTool.blocked_domains = this.options.searchDomainBlockList;
}
tools.push(webSearchTool);
}
// Configure the request based on search depth
const maxTokens = optionsArg.searchDepth === 'deep' ? 8192 :
optionsArg.searchDepth === 'advanced' ? 6144 : 4096;
// Create the research request
const requestParams: any = {
model: 'claude-sonnet-4-5-20250929',
system: systemMessage,
messages: [
{
role: 'user' as const,
content: optionsArg.query
}
],
max_tokens: maxTokens,
temperature: 0.7
};
// Add tools if web search is enabled
if (tools.length > 0) {
requestParams.tools = tools;
}
// Execute the research request
const result = await this.anthropicApiClient.messages.create(requestParams);
// Extract the answer from content blocks
let answer = '';
const sources: Array<{ url: string; title: string; snippet: string }> = [];
const searchQueries: string[] = [];
// Process content blocks
for (const block of result.content) {
if ('text' in block) {
// Accumulate text content
answer += block.text;
// Extract citations if present
if ('citations' in block && Array.isArray(block.citations)) {
for (const citation of block.citations) {
if (citation.type === 'web_search_result_location') {
sources.push({
title: citation.title || '',
url: citation.url || '',
snippet: citation.cited_text || ''
});
}
}
}
} else if ('type' in block && block.type === 'server_tool_use') {
// Extract search queries from server tool use
if (block.name === 'web_search' && block.input && typeof block.input === 'object' && 'query' in block.input) {
searchQueries.push((block.input as any).query);
}
} else if ('type' in block && block.type === 'web_search_tool_result') {
// Extract sources from web search results
if (Array.isArray(block.content)) {
for (const result of block.content) {
if (result.type === 'web_search_result') {
// Only add if not already in sources (avoid duplicates from citations)
if (!sources.some(s => s.url === result.url)) {
sources.push({
title: result.title || '',
url: result.url || '',
snippet: '' // Search results don't include snippets, only citations do
});
}
}
}
}
}
}
// Fallback: Parse markdown-style links if no citations found
if (sources.length === 0) {
const urlRegex = /\[([^\]]+)\]\(([^)]+)\)/g;
let match: RegExpExecArray | null;
while ((match = urlRegex.exec(answer)) !== null) {
sources.push({
title: match[1],
url: match[2],
snippet: ''
});
}
}
// Check if web search was used based on usage info
const webSearchCount = result.usage?.server_tool_use?.web_search_requests || 0;
return {
answer,
sources,
searchQueries: searchQueries.length > 0 ? searchQueries : undefined,
metadata: {
model: 'claude-sonnet-4-5-20250929',
searchDepth: optionsArg.searchDepth || 'basic',
tokensUsed: result.usage?.output_tokens,
webSearchesPerformed: webSearchCount
}
};
} catch (error) {
console.error('Anthropic research error:', error);
throw new Error(`Failed to perform research: ${error.message}`);
}
}
/**
* Image generation is not supported by Anthropic
*/
public async imageGenerate(optionsArg: ImageGenerateOptions): Promise<ImageResponse> {
throw new Error('Image generation is not supported by Anthropic. Claude can only analyze images, not generate them. Please use OpenAI provider for image generation.');
}
/**
* Image editing is not supported by Anthropic
*/
public async imageEdit(optionsArg: ImageEditOptions): Promise<ImageResponse> {
throw new Error('Image editing is not supported by Anthropic. Claude can only analyze images, not edit them. Please use OpenAI provider for image editing.');
}
} }

View File

@@ -1,7 +1,16 @@
import * as plugins from './plugins.js'; import * as plugins from './plugins.js';
import * as paths from './paths.js'; import * as paths from './paths.js';
import { MultiModalModel } from './abstract.classes.multimodal.js'; import { MultiModalModel } from './abstract.classes.multimodal.js';
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js'; import type {
ChatOptions,
ChatResponse,
ChatMessage,
ResearchOptions,
ResearchResponse,
ImageGenerateOptions,
ImageEditOptions,
ImageResponse
} from './abstract.classes.multimodal.js';
import type { ChatCompletionMessageParam } from 'openai/resources/chat/completions'; import type { ChatCompletionMessageParam } from 'openai/resources/chat/completions';
export interface IExoProviderOptions { export interface IExoProviderOptions {
@@ -125,4 +134,22 @@ export class ExoProvider extends MultiModalModel {
}): Promise<{ message: any }> { }): Promise<{ message: any }> {
throw new Error('Document processing is not supported by Exo provider'); throw new Error('Document processing is not supported by Exo provider');
} }
public async research(optionsArg: ResearchOptions): Promise<ResearchResponse> {
throw new Error('Research capabilities are not yet supported by Exo provider.');
}
/**
* Image generation is not supported by Exo
*/
public async imageGenerate(optionsArg: ImageGenerateOptions): Promise<ImageResponse> {
throw new Error('Image generation is not supported by Exo. Please use OpenAI provider for image generation.');
}
/**
* Image editing is not supported by Exo
*/
public async imageEdit(optionsArg: ImageEditOptions): Promise<ImageResponse> {
throw new Error('Image editing is not supported by Exo. Please use OpenAI provider for image editing.');
}
} }

View File

@@ -1,7 +1,16 @@
import * as plugins from './plugins.js'; import * as plugins from './plugins.js';
import * as paths from './paths.js'; import * as paths from './paths.js';
import { MultiModalModel } from './abstract.classes.multimodal.js'; import { MultiModalModel } from './abstract.classes.multimodal.js';
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js'; import type {
ChatOptions,
ChatResponse,
ChatMessage,
ResearchOptions,
ResearchResponse,
ImageGenerateOptions,
ImageEditOptions,
ImageResponse
} from './abstract.classes.multimodal.js';
export interface IGroqProviderOptions { export interface IGroqProviderOptions {
groqToken: string; groqToken: string;
@@ -189,4 +198,22 @@ export class GroqProvider extends MultiModalModel {
}): Promise<{ message: any }> { }): Promise<{ message: any }> {
throw new Error('Document processing is not yet supported by Groq.'); throw new Error('Document processing is not yet supported by Groq.');
} }
public async research(optionsArg: ResearchOptions): Promise<ResearchResponse> {
throw new Error('Research capabilities are not yet supported by Groq provider.');
}
/**
* Image generation is not supported by Groq
*/
public async imageGenerate(optionsArg: ImageGenerateOptions): Promise<ImageResponse> {
throw new Error('Image generation is not supported by Groq. Please use OpenAI provider for image generation.');
}
/**
* Image editing is not supported by Groq
*/
public async imageEdit(optionsArg: ImageEditOptions): Promise<ImageResponse> {
throw new Error('Image editing is not supported by Groq. Please use OpenAI provider for image editing.');
}
} }

View File

@@ -1,7 +1,16 @@
import * as plugins from './plugins.js'; import * as plugins from './plugins.js';
import * as paths from './paths.js'; import * as paths from './paths.js';
import { MultiModalModel } from './abstract.classes.multimodal.js'; import { MultiModalModel } from './abstract.classes.multimodal.js';
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js'; import type {
ChatOptions,
ChatResponse,
ChatMessage,
ResearchOptions,
ResearchResponse,
ImageGenerateOptions,
ImageEditOptions,
ImageResponse
} from './abstract.classes.multimodal.js';
export interface IOllamaProviderOptions { export interface IOllamaProviderOptions {
baseUrl?: string; baseUrl?: string;
@@ -24,6 +33,7 @@ export class OllamaProvider extends MultiModalModel {
} }
async start() { async start() {
await super.start();
// Verify Ollama is running // Verify Ollama is running
try { try {
const response = await fetch(`${this.baseUrl}/api/tags`); const response = await fetch(`${this.baseUrl}/api/tags`);
@@ -35,7 +45,9 @@ export class OllamaProvider extends MultiModalModel {
} }
} }
async stop() {} async stop() {
await super.stop();
}
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> { public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
// Create a TextDecoder to handle incoming chunks // Create a TextDecoder to handle incoming chunks
@@ -205,11 +217,10 @@ export class OllamaProvider extends MultiModalModel {
messageHistory: ChatMessage[]; messageHistory: ChatMessage[];
}): Promise<{ message: any }> { }): Promise<{ message: any }> {
// Convert PDF documents to images using SmartPDF // Convert PDF documents to images using SmartPDF
const smartpdfInstance = new plugins.smartpdf.SmartPdf();
let documentImageBytesArray: Uint8Array[] = []; let documentImageBytesArray: Uint8Array[] = [];
for (const pdfDocument of optionsArg.pdfDocuments) { for (const pdfDocument of optionsArg.pdfDocuments) {
const documentImageArray = await smartpdfInstance.convertPDFToPngBytes(pdfDocument); const documentImageArray = await this.smartpdfInstance.convertPDFToPngBytes(pdfDocument);
documentImageBytesArray = documentImageBytesArray.concat(documentImageArray); documentImageBytesArray = documentImageBytesArray.concat(documentImageArray);
} }
@@ -249,4 +260,22 @@ export class OllamaProvider extends MultiModalModel {
} }
}; };
} }
public async research(optionsArg: ResearchOptions): Promise<ResearchResponse> {
throw new Error('Research capabilities are not yet supported by Ollama provider.');
}
/**
* Image generation is not supported by Ollama
*/
public async imageGenerate(optionsArg: ImageGenerateOptions): Promise<ImageResponse> {
throw new Error('Image generation is not supported by Ollama. Please use OpenAI provider for image generation.');
}
/**
* Image editing is not supported by Ollama
*/
public async imageEdit(optionsArg: ImageEditOptions): Promise<ImageResponse> {
throw new Error('Image editing is not supported by Ollama. Please use OpenAI provider for image editing.');
}
} }

View File

@@ -1,5 +1,6 @@
import * as plugins from './plugins.js'; import * as plugins from './plugins.js';
import * as paths from './paths.js'; import * as paths from './paths.js';
import { Readable } from 'stream';
// Custom type definition for chat completion messages // Custom type definition for chat completion messages
export type TChatCompletionRequestMessage = { export type TChatCompletionRequestMessage = {
@@ -8,19 +9,27 @@ export type TChatCompletionRequestMessage = {
}; };
import { MultiModalModel } from './abstract.classes.multimodal.js'; import { MultiModalModel } from './abstract.classes.multimodal.js';
import type {
ResearchOptions,
ResearchResponse,
ImageGenerateOptions,
ImageEditOptions,
ImageResponse
} from './abstract.classes.multimodal.js';
export interface IOpenaiProviderOptions { export interface IOpenaiProviderOptions {
openaiToken: string; openaiToken: string;
chatModel?: string; chatModel?: string;
audioModel?: string; audioModel?: string;
visionModel?: string; visionModel?: string;
// Optionally add more model options (e.g., documentModel) if needed. researchModel?: string;
imageModel?: string;
enableWebSearch?: boolean;
} }
export class OpenAiProvider extends MultiModalModel { export class OpenAiProvider extends MultiModalModel {
private options: IOpenaiProviderOptions; private options: IOpenaiProviderOptions;
public openAiApiClient: plugins.openai.default; public openAiApiClient: plugins.openai.default;
public smartpdfInstance: plugins.smartpdf.SmartPdf;
constructor(optionsArg: IOpenaiProviderOptions) { constructor(optionsArg: IOpenaiProviderOptions) {
super(); super();
@@ -28,14 +37,16 @@ export class OpenAiProvider extends MultiModalModel {
} }
public async start() { public async start() {
await super.start();
this.openAiApiClient = new plugins.openai.default({ this.openAiApiClient = new plugins.openai.default({
apiKey: this.options.openaiToken, apiKey: this.options.openaiToken,
dangerouslyAllowBrowser: true, dangerouslyAllowBrowser: true,
}); });
this.smartpdfInstance = new plugins.smartpdf.SmartPdf();
} }
public async stop() {} public async stop() {
await super.stop();
}
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> { public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
// Create a TextDecoder to handle incoming chunks // Create a TextDecoder to handle incoming chunks
@@ -75,7 +86,7 @@ export class OpenAiProvider extends MultiModalModel {
// If we have a complete message, send it to OpenAI // If we have a complete message, send it to OpenAI
if (currentMessage) { if (currentMessage) {
const messageToSend = { role: "user" as const, content: currentMessage.content }; const messageToSend = { role: "user" as const, content: currentMessage.content };
const chatModel = this.options.chatModel ?? 'o3-mini'; const chatModel = this.options.chatModel ?? 'gpt-5-mini';
const requestParams: any = { const requestParams: any = {
model: chatModel, model: chatModel,
messages: [messageToSend], messages: [messageToSend],
@@ -121,7 +132,7 @@ export class OpenAiProvider extends MultiModalModel {
content: string; content: string;
}[]; }[];
}) { }) {
const chatModel = this.options.chatModel ?? 'o3-mini'; const chatModel = this.options.chatModel ?? 'gpt-5-mini';
const requestParams: any = { const requestParams: any = {
model: chatModel, model: chatModel,
messages: [ messages: [
@@ -148,7 +159,8 @@ export class OpenAiProvider extends MultiModalModel {
speed: 1, speed: 1,
}); });
const stream = result.body; const stream = result.body;
done.resolve(stream); const nodeStream = Readable.fromWeb(stream as any);
done.resolve(nodeStream);
return done.promise; return done.promise;
} }
@@ -164,13 +176,10 @@ export class OpenAiProvider extends MultiModalModel {
let pdfDocumentImageBytesArray: Uint8Array[] = []; let pdfDocumentImageBytesArray: Uint8Array[] = [];
// Convert each PDF into one or more image byte arrays. // Convert each PDF into one or more image byte arrays.
const smartpdfInstance = new plugins.smartpdf.SmartPdf();
await smartpdfInstance.start();
for (const pdfDocument of optionsArg.pdfDocuments) { for (const pdfDocument of optionsArg.pdfDocuments) {
const documentImageArray = await smartpdfInstance.convertPDFToPngBytes(pdfDocument); const documentImageArray = await this.smartpdfInstance.convertPDFToPngBytes(pdfDocument);
pdfDocumentImageBytesArray = pdfDocumentImageBytesArray.concat(documentImageArray); pdfDocumentImageBytesArray = pdfDocumentImageBytesArray.concat(documentImageArray);
} }
await smartpdfInstance.stop();
console.log(`image smartfile array`); console.log(`image smartfile array`);
console.log(pdfDocumentImageBytesArray.map((smartfile) => smartfile.length)); console.log(pdfDocumentImageBytesArray.map((smartfile) => smartfile.length));
@@ -184,7 +193,7 @@ export class OpenAiProvider extends MultiModalModel {
}, },
})); }));
const chatModel = this.options.chatModel ?? 'o4-mini'; const chatModel = this.options.chatModel ?? 'gpt-5-mini';
const requestParams: any = { const requestParams: any = {
model: chatModel, model: chatModel,
messages: [ messages: [
@@ -229,4 +238,218 @@ export class OpenAiProvider extends MultiModalModel {
const result = await this.openAiApiClient.chat.completions.create(requestParams); const result = await this.openAiApiClient.chat.completions.create(requestParams);
return result.choices[0].message.content || ''; return result.choices[0].message.content || '';
} }
public async research(optionsArg: ResearchOptions): Promise<ResearchResponse> {
// Determine which model to use - Deep Research API requires specific models
let model: string;
if (optionsArg.searchDepth === 'deep') {
model = this.options.researchModel || 'o4-mini-deep-research-2025-06-26';
} else {
// For basic/advanced, still use deep research models if web search is needed
if (optionsArg.includeWebSearch) {
model = this.options.researchModel || 'o4-mini-deep-research-2025-06-26';
} else {
model = this.options.chatModel || 'gpt-5-mini';
}
}
const systemMessage = 'You are a research assistant. Provide comprehensive answers with citations and sources when available.';
// Prepare request parameters using Deep Research API format
const requestParams: any = {
model,
instructions: systemMessage,
input: optionsArg.query
};
// Add web search tool if requested
if (optionsArg.includeWebSearch || optionsArg.searchDepth === 'deep') {
requestParams.tools = [
{
type: 'web_search_preview',
search_context_size: optionsArg.searchDepth === 'deep' ? 'high' :
optionsArg.searchDepth === 'advanced' ? 'medium' : 'low'
}
];
}
// Add background flag for deep research
if (optionsArg.background && optionsArg.searchDepth === 'deep') {
requestParams.background = true;
}
try {
// Execute the research request using Deep Research API
const result = await this.openAiApiClient.responses.create(requestParams);
// Extract the answer from output items
let answer = '';
const sources: Array<{ url: string; title: string; snippet: string }> = [];
const searchQueries: string[] = [];
// Process output items
for (const item of result.output || []) {
// Extract message content
if (item.type === 'message' && 'content' in item) {
const messageItem = item as any;
for (const contentItem of messageItem.content || []) {
if (contentItem.type === 'output_text' && 'text' in contentItem) {
answer += contentItem.text;
}
}
}
// Extract web search queries
if (item.type === 'web_search_call' && 'action' in item) {
const searchItem = item as any;
if (searchItem.action && searchItem.action.type === 'search' && 'query' in searchItem.action) {
searchQueries.push(searchItem.action.query);
}
}
}
// Parse sources from markdown links in the answer
const urlRegex = /\[([^\]]+)\]\(([^)]+)\)/g;
let match: RegExpExecArray | null;
while ((match = urlRegex.exec(answer)) !== null) {
sources.push({
title: match[1],
url: match[2],
snippet: ''
});
}
return {
answer,
sources,
searchQueries: searchQueries.length > 0 ? searchQueries : undefined,
metadata: {
model,
searchDepth: optionsArg.searchDepth || 'basic',
tokensUsed: result.usage?.total_tokens
}
};
} catch (error) {
console.error('Research API error:', error);
throw new Error(`Failed to perform research: ${error.message}`);
}
}
/**
* Image generation using OpenAI's gpt-image-1 or DALL-E models
*/
public async imageGenerate(optionsArg: ImageGenerateOptions): Promise<ImageResponse> {
const model = optionsArg.model || this.options.imageModel || 'gpt-image-1';
try {
const requestParams: any = {
model,
prompt: optionsArg.prompt,
n: optionsArg.n || 1,
};
// Add gpt-image-1 specific parameters
if (model === 'gpt-image-1') {
if (optionsArg.quality) requestParams.quality = optionsArg.quality;
if (optionsArg.size) requestParams.size = optionsArg.size;
if (optionsArg.background) requestParams.background = optionsArg.background;
if (optionsArg.outputFormat) requestParams.output_format = optionsArg.outputFormat;
if (optionsArg.outputCompression !== undefined) requestParams.output_compression = optionsArg.outputCompression;
if (optionsArg.moderation) requestParams.moderation = optionsArg.moderation;
if (optionsArg.stream !== undefined) requestParams.stream = optionsArg.stream;
if (optionsArg.partialImages !== undefined) requestParams.partial_images = optionsArg.partialImages;
} else if (model === 'dall-e-3') {
// DALL-E 3 specific parameters
if (optionsArg.quality) requestParams.quality = optionsArg.quality;
if (optionsArg.size) requestParams.size = optionsArg.size;
if (optionsArg.style) requestParams.style = optionsArg.style;
requestParams.response_format = 'b64_json'; // Always use base64 for consistency
} else if (model === 'dall-e-2') {
// DALL-E 2 specific parameters
if (optionsArg.size) requestParams.size = optionsArg.size;
requestParams.response_format = 'b64_json';
}
const result = await this.openAiApiClient.images.generate(requestParams);
const images = (result.data || []).map(img => ({
b64_json: img.b64_json,
url: img.url,
revisedPrompt: img.revised_prompt
}));
return {
images,
metadata: {
model,
quality: result.quality,
size: result.size,
outputFormat: result.output_format,
tokensUsed: result.usage?.total_tokens
}
};
} catch (error) {
console.error('Image generation error:', error);
throw new Error(`Failed to generate image: ${error.message}`);
}
}
/**
* Image editing using OpenAI's gpt-image-1 or DALL-E 2 models
*/
public async imageEdit(optionsArg: ImageEditOptions): Promise<ImageResponse> {
const model = optionsArg.model || this.options.imageModel || 'gpt-image-1';
try {
const requestParams: any = {
model,
image: optionsArg.image,
prompt: optionsArg.prompt,
n: optionsArg.n || 1,
};
// Add mask if provided
if (optionsArg.mask) {
requestParams.mask = optionsArg.mask;
}
// Add gpt-image-1 specific parameters
if (model === 'gpt-image-1') {
if (optionsArg.quality) requestParams.quality = optionsArg.quality;
if (optionsArg.size) requestParams.size = optionsArg.size;
if (optionsArg.background) requestParams.background = optionsArg.background;
if (optionsArg.outputFormat) requestParams.output_format = optionsArg.outputFormat;
if (optionsArg.outputCompression !== undefined) requestParams.output_compression = optionsArg.outputCompression;
if (optionsArg.stream !== undefined) requestParams.stream = optionsArg.stream;
if (optionsArg.partialImages !== undefined) requestParams.partial_images = optionsArg.partialImages;
} else if (model === 'dall-e-2') {
// DALL-E 2 specific parameters
if (optionsArg.size) requestParams.size = optionsArg.size;
requestParams.response_format = 'b64_json';
}
const result = await this.openAiApiClient.images.edit(requestParams);
const images = (result.data || []).map(img => ({
b64_json: img.b64_json,
url: img.url,
revisedPrompt: img.revised_prompt
}));
return {
images,
metadata: {
model,
quality: result.quality,
size: result.size,
outputFormat: result.output_format,
tokensUsed: result.usage?.total_tokens
}
};
} catch (error) {
console.error('Image edit error:', error);
throw new Error(`Failed to edit image: ${error.message}`);
}
}
} }

View File

@@ -1,7 +1,16 @@
import * as plugins from './plugins.js'; import * as plugins from './plugins.js';
import * as paths from './paths.js'; import * as paths from './paths.js';
import { MultiModalModel } from './abstract.classes.multimodal.js'; import { MultiModalModel } from './abstract.classes.multimodal.js';
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js'; import type {
ChatOptions,
ChatResponse,
ChatMessage,
ResearchOptions,
ResearchResponse,
ImageGenerateOptions,
ImageEditOptions,
ImageResponse
} from './abstract.classes.multimodal.js';
export interface IPerplexityProviderOptions { export interface IPerplexityProviderOptions {
perplexityToken: string; perplexityToken: string;
@@ -168,4 +177,83 @@ export class PerplexityProvider extends MultiModalModel {
}): Promise<{ message: any }> { }): Promise<{ message: any }> {
throw new Error('Document processing is not supported by Perplexity.'); throw new Error('Document processing is not supported by Perplexity.');
} }
public async research(optionsArg: ResearchOptions): Promise<ResearchResponse> {
// Perplexity has Sonar models that are optimized for search
// sonar models: sonar, sonar-pro
const model = optionsArg.searchDepth === 'deep' ? 'sonar-pro' : 'sonar';
try {
const response = await fetch('https://api.perplexity.ai/chat/completions', {
method: 'POST',
headers: {
'Authorization': `Bearer ${this.options.perplexityToken}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
model,
messages: [
{
role: 'system',
content: 'You are a helpful research assistant. Provide accurate information with sources.'
},
{
role: 'user',
content: optionsArg.query
}
],
temperature: 0.7,
max_tokens: 4000
}),
});
if (!response.ok) {
throw new Error(`Perplexity API error: ${response.statusText}`);
}
const result = await response.json();
const answer = result.choices[0].message.content;
// Parse citations from the response
const sources: Array<{ url: string; title: string; snippet: string }> = [];
// Perplexity includes citations in the format [1], [2], etc. with sources listed
// This is a simplified parser - could be enhanced based on actual Perplexity response format
if (result.citations) {
for (const citation of result.citations) {
sources.push({
url: citation.url || '',
title: citation.title || '',
snippet: citation.snippet || ''
});
}
}
return {
answer,
sources,
metadata: {
model,
searchDepth: optionsArg.searchDepth || 'basic'
}
};
} catch (error) {
console.error('Perplexity research error:', error);
throw new Error(`Failed to perform research: ${error.message}`);
}
}
/**
* Image generation is not supported by Perplexity
*/
public async imageGenerate(optionsArg: ImageGenerateOptions): Promise<ImageResponse> {
throw new Error('Image generation is not supported by Perplexity. Please use OpenAI provider for image generation.');
}
/**
* Image editing is not supported by Perplexity
*/
public async imageEdit(optionsArg: ImageEditOptions): Promise<ImageResponse> {
throw new Error('Image editing is not supported by Perplexity. Please use OpenAI provider for image editing.');
}
} }

View File

@@ -1,7 +1,16 @@
import * as plugins from './plugins.js'; import * as plugins from './plugins.js';
import * as paths from './paths.js'; import * as paths from './paths.js';
import { MultiModalModel } from './abstract.classes.multimodal.js'; import { MultiModalModel } from './abstract.classes.multimodal.js';
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js'; import type {
ChatOptions,
ChatResponse,
ChatMessage,
ResearchOptions,
ResearchResponse,
ImageGenerateOptions,
ImageEditOptions,
ImageResponse
} from './abstract.classes.multimodal.js';
import type { ChatCompletionMessageParam } from 'openai/resources/chat/completions'; import type { ChatCompletionMessageParam } from 'openai/resources/chat/completions';
export interface IXAIProviderOptions { export interface IXAIProviderOptions {
@@ -11,7 +20,6 @@ export interface IXAIProviderOptions {
export class XAIProvider extends MultiModalModel { export class XAIProvider extends MultiModalModel {
private options: IXAIProviderOptions; private options: IXAIProviderOptions;
public openAiApiClient: plugins.openai.default; public openAiApiClient: plugins.openai.default;
public smartpdfInstance: plugins.smartpdf.SmartPdf;
constructor(optionsArg: IXAIProviderOptions) { constructor(optionsArg: IXAIProviderOptions) {
super(); super();
@@ -19,14 +27,16 @@ export class XAIProvider extends MultiModalModel {
} }
public async start() { public async start() {
await super.start();
this.openAiApiClient = new plugins.openai.default({ this.openAiApiClient = new plugins.openai.default({
apiKey: this.options.xaiToken, apiKey: this.options.xaiToken,
baseURL: 'https://api.x.ai/v1', baseURL: 'https://api.x.ai/v1',
}); });
this.smartpdfInstance = new plugins.smartpdf.SmartPdf();
} }
public async stop() {} public async stop() {
await super.stop();
}
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> { public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
// Create a TextDecoder to handle incoming chunks // Create a TextDecoder to handle incoming chunks
@@ -180,4 +190,22 @@ export class XAIProvider extends MultiModalModel {
message: completion.choices[0]?.message?.content || '' message: completion.choices[0]?.message?.content || ''
}; };
} }
public async research(optionsArg: ResearchOptions): Promise<ResearchResponse> {
throw new Error('Research capabilities are not yet supported by xAI provider.');
}
/**
* Image generation is not supported by xAI
*/
public async imageGenerate(optionsArg: ImageGenerateOptions): Promise<ImageResponse> {
throw new Error('Image generation is not supported by xAI. Please use OpenAI provider for image generation.');
}
/**
* Image editing is not supported by xAI
*/
public async imageEdit(optionsArg: ImageEditOptions): Promise<ImageResponse> {
throw new Error('Image editing is not supported by xAI. Please use OpenAI provider for image editing.');
}
} }