feat(documentation): comprehensive documentation enhancement and test improvements
This commit is contained in:
@@ -1,5 +1,13 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## 2025-07-25 - 0.5.5 - feat(documentation)
|
||||||
|
Comprehensive documentation enhancement and test improvements
|
||||||
|
|
||||||
|
- Completely rewrote readme.md with detailed provider comparisons, advanced usage examples, and performance tips
|
||||||
|
- Added comprehensive examples for all supported providers (OpenAI, Anthropic, Perplexity, Groq, XAI, Ollama, Exo)
|
||||||
|
- Included detailed sections on chat interactions, streaming, TTS, vision processing, and document analysis
|
||||||
|
- Added verbose flag to test script for better debugging
|
||||||
|
|
||||||
## 2025-05-13 - 0.5.4 - fix(provider.openai)
|
## 2025-05-13 - 0.5.4 - fix(provider.openai)
|
||||||
Update dependency versions, clean test imports, and adjust default OpenAI model configurations
|
Update dependency versions, clean test imports, and adjust default OpenAI model configurations
|
||||||
|
|
||||||
|
17
package.json
17
package.json
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@push.rocks/smartai",
|
"name": "@push.rocks/smartai",
|
||||||
"version": "0.5.4",
|
"version": "0.5.5",
|
||||||
"private": false,
|
"private": false,
|
||||||
"description": "SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.",
|
"description": "SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.",
|
||||||
"main": "dist_ts/index.js",
|
"main": "dist_ts/index.js",
|
||||||
@@ -9,29 +9,29 @@
|
|||||||
"author": "Task Venture Capital GmbH",
|
"author": "Task Venture Capital GmbH",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"test": "(tstest test/ --web)",
|
"test": "(tstest test/ --web --verbose)",
|
||||||
"build": "(tsbuild --web --allowimplicitany)",
|
"build": "(tsbuild --web --allowimplicitany)",
|
||||||
"buildDocs": "(tsdoc)"
|
"buildDocs": "(tsdoc)"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@git.zone/tsbuild": "^2.3.2",
|
"@git.zone/tsbuild": "^2.6.4",
|
||||||
"@git.zone/tsbundle": "^2.2.5",
|
"@git.zone/tsbundle": "^2.5.1",
|
||||||
"@git.zone/tsrun": "^1.3.3",
|
"@git.zone/tsrun": "^1.3.3",
|
||||||
"@git.zone/tstest": "^1.0.96",
|
"@git.zone/tstest": "^2.3.2",
|
||||||
"@push.rocks/qenv": "^6.1.0",
|
"@push.rocks/qenv": "^6.1.0",
|
||||||
"@push.rocks/tapbundle": "^6.0.3",
|
"@push.rocks/tapbundle": "^6.0.3",
|
||||||
"@types/node": "^22.15.17"
|
"@types/node": "^22.15.17"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@anthropic-ai/sdk": "^0.50.4",
|
"@anthropic-ai/sdk": "^0.57.0",
|
||||||
"@push.rocks/smartarray": "^1.1.0",
|
"@push.rocks/smartarray": "^1.1.0",
|
||||||
"@push.rocks/smartfile": "^11.2.0",
|
"@push.rocks/smartfile": "^11.2.5",
|
||||||
"@push.rocks/smartpath": "^5.0.18",
|
"@push.rocks/smartpath": "^5.0.18",
|
||||||
"@push.rocks/smartpdf": "^3.2.2",
|
"@push.rocks/smartpdf": "^3.2.2",
|
||||||
"@push.rocks/smartpromise": "^4.2.3",
|
"@push.rocks/smartpromise": "^4.2.3",
|
||||||
"@push.rocks/smartrequest": "^2.1.0",
|
"@push.rocks/smartrequest": "^2.1.0",
|
||||||
"@push.rocks/webstream": "^1.0.10",
|
"@push.rocks/webstream": "^1.0.10",
|
||||||
"openai": "^4.98.0"
|
"openai": "^5.10.2"
|
||||||
},
|
},
|
||||||
"repository": {
|
"repository": {
|
||||||
"type": "git",
|
"type": "git",
|
||||||
@@ -82,6 +82,7 @@
|
|||||||
],
|
],
|
||||||
"pnpm": {
|
"pnpm": {
|
||||||
"onlyBuiltDependencies": [
|
"onlyBuiltDependencies": [
|
||||||
|
"esbuild",
|
||||||
"puppeteer"
|
"puppeteer"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
1569
pnpm-lock.yaml
generated
1569
pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
392
readme.md
392
readme.md
@@ -1,222 +1,392 @@
|
|||||||
# @push.rocks/smartai
|
# @push.rocks/smartai
|
||||||
|
|
||||||
SmartAi is a TypeScript library providing a unified interface for integrating and interacting with multiple AI models, supporting chat interactions, audio and document processing, and vision tasks.
|
SmartAi is a powerful TypeScript library that provides a unified interface for integrating with multiple AI providers including OpenAI, Anthropic, Perplexity, Ollama, Groq, XAI, and Exo. It offers comprehensive support for chat interactions, streaming conversations, text-to-speech, document analysis, and vision processing.
|
||||||
|
|
||||||
## Install
|
## Install
|
||||||
|
|
||||||
To install SmartAi into your project, you need to run the following command in your terminal:
|
To install SmartAi into your project, use pnpm:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
npm install @push.rocks/smartai
|
pnpm install @push.rocks/smartai
|
||||||
```
|
```
|
||||||
|
|
||||||
This command will add the SmartAi library to your project's dependencies, making it available for use in your TypeScript application.
|
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
SmartAi is designed to provide a comprehensive and unified API for working seamlessly with multiple AI providers like OpenAI, Anthropic, Perplexity, and others. Below we will delve into how to make the most out of this library, illustrating the setup and functionality with in-depth examples. Our scenarios will explore synchronous and streaming interactions, audio generation, document handling, and vision tasks with different AI providers.
|
SmartAi provides a clean, consistent API across all supported AI providers. This documentation covers all features with practical examples for each provider and capability.
|
||||||
|
|
||||||
### Initialization
|
### Initialization
|
||||||
|
|
||||||
Initialization is the first step before using any AI functionalities. You should provide API tokens for each provider you plan to utilize.
|
First, initialize SmartAi with the API tokens and configuration for the providers you want to use:
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
import { SmartAi } from '@push.rocks/smartai';
|
import { SmartAi } from '@push.rocks/smartai';
|
||||||
|
|
||||||
const smartAi = new SmartAi({
|
const smartAi = new SmartAi({
|
||||||
openaiToken: 'your-openai-token',
|
// OpenAI - for GPT models, DALL-E, and TTS
|
||||||
anthropicToken: 'your-anthropic-token',
|
openaiToken: 'your-openai-api-key',
|
||||||
perplexityToken: 'your-perplexity-token',
|
|
||||||
xaiToken: 'your-xai-token',
|
// Anthropic - for Claude models
|
||||||
groqToken: 'your-groq-token',
|
anthropicToken: 'your-anthropic-api-key',
|
||||||
|
|
||||||
|
// Perplexity - for research-focused AI
|
||||||
|
perplexityToken: 'your-perplexity-api-key',
|
||||||
|
|
||||||
|
// Groq - for fast inference
|
||||||
|
groqToken: 'your-groq-api-key',
|
||||||
|
|
||||||
|
// XAI - for Grok models
|
||||||
|
xaiToken: 'your-xai-api-key',
|
||||||
|
|
||||||
|
// Ollama - for local models
|
||||||
ollama: {
|
ollama: {
|
||||||
baseUrl: 'http://localhost:11434',
|
baseUrl: 'http://localhost:11434',
|
||||||
model: 'llama2',
|
model: 'llama2', // default model for chat
|
||||||
visionModel: 'llava'
|
visionModel: 'llava' // default model for vision
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// Exo - for distributed inference
|
||||||
exo: {
|
exo: {
|
||||||
baseUrl: 'http://localhost:8080/v1',
|
baseUrl: 'http://localhost:8080/v1',
|
||||||
apiKey: 'your-api-key'
|
apiKey: 'your-exo-api-key'
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Start the SmartAi instance
|
||||||
await smartAi.start();
|
await smartAi.start();
|
||||||
```
|
```
|
||||||
|
|
||||||
### Chat Interactions
|
## Supported Providers
|
||||||
|
|
||||||
Interaction through chat is a key feature. SmartAi caters to both synchronous and asynchronous (streaming) chats across several AI models.
|
SmartAi supports the following AI providers:
|
||||||
|
|
||||||
#### Regular Synchronous Chat
|
| Provider | Use Case | Key Features |
|
||||||
|
|----------|----------|--------------|
|
||||||
|
| **OpenAI** | General purpose, GPT models | Chat, streaming, TTS, vision, documents |
|
||||||
|
| **Anthropic** | Claude models, safety-focused | Chat, streaming, vision, documents |
|
||||||
|
| **Perplexity** | Research and factual queries | Chat, streaming, documents |
|
||||||
|
| **Groq** | Fast inference | Chat, streaming |
|
||||||
|
| **XAI** | Grok models | Chat, streaming |
|
||||||
|
| **Ollama** | Local models | Chat, streaming, vision |
|
||||||
|
| **Exo** | Distributed inference | Chat, streaming |
|
||||||
|
|
||||||
Connect with AI models via straightforward request-response interactions.
|
## Core Features
|
||||||
|
|
||||||
|
### 1. Chat Interactions
|
||||||
|
|
||||||
|
SmartAi provides both synchronous and streaming chat capabilities across all supported providers.
|
||||||
|
|
||||||
|
#### Synchronous Chat
|
||||||
|
|
||||||
|
Simple request-response interactions with any provider:
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
const syncResponse = await smartAi.openaiProvider.chat({
|
// OpenAI Example
|
||||||
|
const openAiResponse = await smartAi.openaiProvider.chat({
|
||||||
systemMessage: 'You are a helpful assistant.',
|
systemMessage: 'You are a helpful assistant.',
|
||||||
userMessage: 'What is the capital of France?',
|
userMessage: 'What is the capital of France?',
|
||||||
messageHistory: [] // Could include context or preceding messages
|
messageHistory: []
|
||||||
});
|
});
|
||||||
|
console.log(openAiResponse.message); // "The capital of France is Paris."
|
||||||
|
|
||||||
console.log(syncResponse.message); // Outputs: "The capital of France is Paris."
|
// Anthropic Example
|
||||||
|
const anthropicResponse = await smartAi.anthropicProvider.chat({
|
||||||
|
systemMessage: 'You are a knowledgeable historian.',
|
||||||
|
userMessage: 'Tell me about the French Revolution',
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
|
console.log(anthropicResponse.message);
|
||||||
|
|
||||||
|
// Using message history for context
|
||||||
|
const contextualResponse = await smartAi.openaiProvider.chat({
|
||||||
|
systemMessage: 'You are a math tutor.',
|
||||||
|
userMessage: 'What about multiplication?',
|
||||||
|
messageHistory: [
|
||||||
|
{ role: 'user', content: 'Can you teach me math?' },
|
||||||
|
{ role: 'assistant', content: 'Of course! What would you like to learn?' }
|
||||||
|
]
|
||||||
|
});
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Real-Time Streaming Chat
|
#### Streaming Chat
|
||||||
|
|
||||||
For continuous interaction and lower latency, engage in streaming chat.
|
For real-time, token-by-token responses:
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
const textEncoder = new TextEncoder();
|
// Create a readable stream for input
|
||||||
const textDecoder = new TextDecoder();
|
const { readable, writable } = new TransformStream();
|
||||||
|
|
||||||
// Establish a transform stream
|
|
||||||
const { writable, readable } = new TransformStream();
|
|
||||||
const writer = writable.getWriter();
|
const writer = writable.getWriter();
|
||||||
|
|
||||||
const message = {
|
// Send a message
|
||||||
|
const encoder = new TextEncoder();
|
||||||
|
await writer.write(encoder.encode(JSON.stringify({
|
||||||
role: 'user',
|
role: 'user',
|
||||||
content: 'Tell me a story about a brave knight'
|
content: 'Write a haiku about programming'
|
||||||
};
|
})));
|
||||||
|
await writer.close();
|
||||||
|
|
||||||
writer.write(textEncoder.encode(JSON.stringify(message) + '\n'));
|
// Get streaming response
|
||||||
|
const responseStream = await smartAi.openaiProvider.chatStream(readable);
|
||||||
// Initiate streaming
|
const reader = responseStream.getReader();
|
||||||
const stream = await smartAi.openaiProvider.chatStream(readable);
|
const decoder = new TextDecoder();
|
||||||
const reader = stream.getReader();
|
|
||||||
|
|
||||||
|
// Read the stream
|
||||||
while (true) {
|
while (true) {
|
||||||
const { done, value } = await reader.read();
|
const { done, value } = await reader.read();
|
||||||
if (done) break;
|
if (done) break;
|
||||||
console.log('AI:', value);
|
process.stdout.write(value); // Print each chunk as it arrives
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Audio Generation
|
### 2. Text-to-Speech (Audio Generation)
|
||||||
|
|
||||||
Audio generation from textual input is possible using providers like OpenAI.
|
Convert text to natural-sounding speech (currently supported by OpenAI):
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
|
import * as fs from 'fs';
|
||||||
|
|
||||||
|
// Generate speech from text
|
||||||
const audioStream = await smartAi.openaiProvider.audio({
|
const audioStream = await smartAi.openaiProvider.audio({
|
||||||
message: 'This is a test message for generating speech.'
|
message: 'Hello world! This is a test of the text-to-speech system.'
|
||||||
});
|
});
|
||||||
|
|
||||||
// Use the audioStream e.g., playing or saving it.
|
// Save to file
|
||||||
|
const writeStream = fs.createWriteStream('output.mp3');
|
||||||
|
audioStream.pipe(writeStream);
|
||||||
|
|
||||||
|
// Or use in your application directly
|
||||||
|
audioStream.on('data', (chunk) => {
|
||||||
|
// Process audio chunks
|
||||||
|
});
|
||||||
```
|
```
|
||||||
|
|
||||||
### Document Analysis
|
### 3. Vision Processing
|
||||||
|
|
||||||
SmartAi can ingest and process documents, extracting meaningful information or performing classifications.
|
Analyze images and get detailed descriptions:
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
const pdfBuffer = await fetchPdf('https://example.com/document.pdf');
|
import * as fs from 'fs';
|
||||||
const documentRes = await smartAi.openaiProvider.document({
|
|
||||||
systemMessage: 'Determine the nature of the document.',
|
// Read an image file
|
||||||
userMessage: 'Classify this document.',
|
const imageBuffer = fs.readFileSync('image.jpg');
|
||||||
|
|
||||||
|
// OpenAI Vision
|
||||||
|
const openAiVision = await smartAi.openaiProvider.vision({
|
||||||
|
image: imageBuffer,
|
||||||
|
prompt: 'What is in this image? Describe in detail.'
|
||||||
|
});
|
||||||
|
console.log('OpenAI:', openAiVision);
|
||||||
|
|
||||||
|
// Anthropic Vision
|
||||||
|
const anthropicVision = await smartAi.anthropicProvider.vision({
|
||||||
|
image: imageBuffer,
|
||||||
|
prompt: 'Analyze this image and identify any text or objects.'
|
||||||
|
});
|
||||||
|
console.log('Anthropic:', anthropicVision);
|
||||||
|
|
||||||
|
// Ollama Vision (using local model)
|
||||||
|
const ollamaVision = await smartAi.ollamaProvider.vision({
|
||||||
|
image: imageBuffer,
|
||||||
|
prompt: 'Describe the colors and composition of this image.'
|
||||||
|
});
|
||||||
|
console.log('Ollama:', ollamaVision);
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Document Analysis
|
||||||
|
|
||||||
|
Process and analyze PDF documents with AI:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import * as fs from 'fs';
|
||||||
|
|
||||||
|
// Read PDF documents
|
||||||
|
const pdfBuffer = fs.readFileSync('document.pdf');
|
||||||
|
|
||||||
|
// Analyze with OpenAI
|
||||||
|
const openAiAnalysis = await smartAi.openaiProvider.document({
|
||||||
|
systemMessage: 'You are a document analyst. Extract key information.',
|
||||||
|
userMessage: 'Summarize this document and list the main points.',
|
||||||
messageHistory: [],
|
messageHistory: [],
|
||||||
pdfDocuments: [pdfBuffer]
|
pdfDocuments: [pdfBuffer]
|
||||||
});
|
});
|
||||||
|
console.log('OpenAI Analysis:', openAiAnalysis.message);
|
||||||
|
|
||||||
console.log(documentRes.message); // Outputs: classified document type
|
// Analyze with Anthropic
|
||||||
```
|
const anthropicAnalysis = await smartAi.anthropicProvider.document({
|
||||||
|
systemMessage: 'You are a legal expert.',
|
||||||
SmartAi allows easy switching between providers, thus giving developers flexibility:
|
userMessage: 'Identify any legal terms or implications in this document.',
|
||||||
|
|
||||||
```typescript
|
|
||||||
const anthopicRes = await smartAi.anthropicProvider.document({
|
|
||||||
systemMessage: 'Analyze this document.',
|
|
||||||
userMessage: 'Extract core points.',
|
|
||||||
messageHistory: [],
|
messageHistory: [],
|
||||||
pdfDocuments: [pdfBuffer]
|
pdfDocuments: [pdfBuffer]
|
||||||
});
|
});
|
||||||
|
console.log('Anthropic Analysis:', anthropicAnalysis.message);
|
||||||
|
|
||||||
console.log(anthopicRes.message); // Outputs: summarized core points
|
// Process multiple documents
|
||||||
|
const doc1 = fs.readFileSync('contract1.pdf');
|
||||||
|
const doc2 = fs.readFileSync('contract2.pdf');
|
||||||
|
|
||||||
|
const comparison = await smartAi.openaiProvider.document({
|
||||||
|
systemMessage: 'You are a contract analyst.',
|
||||||
|
userMessage: 'Compare these two contracts and highlight the differences.',
|
||||||
|
messageHistory: [],
|
||||||
|
pdfDocuments: [doc1, doc2]
|
||||||
|
});
|
||||||
|
console.log('Comparison:', comparison.message);
|
||||||
```
|
```
|
||||||
|
|
||||||
### Vision Processing
|
### 5. Conversation Management
|
||||||
|
|
||||||
Engage AI models in analyzing and describing images:
|
Create persistent conversation sessions with any provider:
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
const imageBuffer = await fetchImage('path/to/image.jpg');
|
// Create a conversation with OpenAI
|
||||||
|
const conversation = smartAi.createConversation('openai');
|
||||||
|
|
||||||
// Using OpenAI's vision capabilities
|
// Set the system message
|
||||||
const visionOutput = await smartAi.openaiProvider.vision({
|
await conversation.setSystemMessage('You are a helpful coding assistant.');
|
||||||
image: imageBuffer,
|
|
||||||
prompt: 'Describe the image.'
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log(visionOutput); // Outputs: image description
|
// Get input and output streams
|
||||||
|
const inputWriter = conversation.getInputStreamWriter();
|
||||||
|
const outputStream = conversation.getOutputStream();
|
||||||
|
|
||||||
|
// Set up output reader
|
||||||
|
const reader = outputStream.getReader();
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
|
||||||
|
// Send messages
|
||||||
|
await inputWriter.write('How do I create a REST API in Node.js?');
|
||||||
|
|
||||||
|
// Read responses
|
||||||
|
while (true) {
|
||||||
|
const { done, value } = await reader.read();
|
||||||
|
if (done) break;
|
||||||
|
console.log('Assistant:', decoder.decode(value));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Continue the conversation
|
||||||
|
await inputWriter.write('Can you show me an example with Express?');
|
||||||
|
|
||||||
|
// Create conversations with different providers
|
||||||
|
const anthropicConversation = smartAi.createConversation('anthropic');
|
||||||
|
const groqConversation = smartAi.createConversation('groq');
|
||||||
```
|
```
|
||||||
|
|
||||||
Use other providers for more varied analysis:
|
## Advanced Usage
|
||||||
|
|
||||||
```typescript
|
|
||||||
const ollamaOutput = await smartAi.ollamaProvider.vision({
|
|
||||||
image: imageBuffer,
|
|
||||||
prompt: 'Detailed analysis required.'
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log(ollamaOutput); // Outputs: detailed analysis results
|
|
||||||
```
|
|
||||||
|
|
||||||
### Error Handling
|
### Error Handling
|
||||||
|
|
||||||
Due to the nature of external integrations, ensure to wrap AI calls within try-catch blocks.
|
Always wrap AI operations in try-catch blocks for robust error handling:
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
try {
|
try {
|
||||||
const response = await smartAi.anthropicProvider.chat({
|
const response = await smartAi.openaiProvider.chat({
|
||||||
systemMessage: 'Hello!',
|
systemMessage: 'You are an assistant.',
|
||||||
userMessage: 'Help me out.',
|
userMessage: 'Hello!',
|
||||||
messageHistory: []
|
messageHistory: []
|
||||||
});
|
});
|
||||||
console.log(response.message);
|
console.log(response.message);
|
||||||
} catch (error: any) {
|
} catch (error) {
|
||||||
console.error('Encountered an error:', error.message);
|
if (error.code === 'rate_limit_exceeded') {
|
||||||
|
console.error('Rate limit hit, please retry later');
|
||||||
|
} else if (error.code === 'invalid_api_key') {
|
||||||
|
console.error('Invalid API key provided');
|
||||||
|
} else {
|
||||||
|
console.error('Unexpected error:', error.message);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Providers and Customization
|
### Streaming with Custom Processing
|
||||||
|
|
||||||
The library supports provider-specific customization, enabling tailored interactions:
|
Implement custom transformations on streaming responses:
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
const smartAi = new SmartAi({
|
// Create a custom transform stream
|
||||||
openaiToken: 'your-openai-token',
|
const customTransform = new TransformStream({
|
||||||
anthropicToken: 'your-anthropic-token',
|
|
||||||
ollama: {
|
|
||||||
baseUrl: 'http://localhost:11434',
|
|
||||||
model: 'llama2',
|
|
||||||
visionModel: 'llava'
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
await smartAi.start();
|
|
||||||
```
|
|
||||||
|
|
||||||
### Advanced Streaming Customization
|
|
||||||
|
|
||||||
Developers can implement real-time processing pipelines with custom transformations:
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
const customProcessingStream = new TransformStream({
|
|
||||||
transform(chunk, controller) {
|
transform(chunk, controller) {
|
||||||
const processed = chunk.toUpperCase(); // Example transformation
|
// Example: Add timestamps to each chunk
|
||||||
controller.enqueue(processed);
|
const timestamp = new Date().toISOString();
|
||||||
|
controller.enqueue(`[${timestamp}] ${chunk}`);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
const processedStream = stream.pipeThrough(customProcessingStream);
|
// Apply to streaming chat
|
||||||
const processedReader = processedStream.getReader();
|
const inputStream = new ReadableStream({
|
||||||
|
start(controller) {
|
||||||
|
controller.enqueue(new TextEncoder().encode(JSON.stringify({
|
||||||
|
role: 'user',
|
||||||
|
content: 'Tell me a story'
|
||||||
|
})));
|
||||||
|
controller.close();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
const responseStream = await smartAi.openaiProvider.chatStream(inputStream);
|
||||||
|
const processedStream = responseStream.pipeThrough(customTransform);
|
||||||
|
|
||||||
|
// Read processed stream
|
||||||
|
const reader = processedStream.getReader();
|
||||||
while (true) {
|
while (true) {
|
||||||
const { done, value } = await processedReader.read();
|
const { done, value } = await reader.read();
|
||||||
if (done) break;
|
if (done) break;
|
||||||
console.log('Processed Output:', value);
|
console.log(value);
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
This approach can facilitate adaptive content processing workflows.
|
### Provider-Specific Features
|
||||||
|
|
||||||
|
Each provider may have unique capabilities. Here's how to leverage them:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// OpenAI - Use specific models
|
||||||
|
const gpt4Response = await smartAi.openaiProvider.chat({
|
||||||
|
systemMessage: 'You are a helpful assistant.',
|
||||||
|
userMessage: 'Explain quantum computing',
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
|
|
||||||
|
// Anthropic - Use Claude's strength in analysis
|
||||||
|
const codeReview = await smartAi.anthropicProvider.chat({
|
||||||
|
systemMessage: 'You are a code reviewer.',
|
||||||
|
userMessage: 'Review this code for security issues: ...',
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
|
|
||||||
|
// Perplexity - Best for research and current events
|
||||||
|
const research = await smartAi.perplexityProvider.chat({
|
||||||
|
systemMessage: 'You are a research assistant.',
|
||||||
|
userMessage: 'What are the latest developments in renewable energy?',
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
|
|
||||||
|
// Groq - Optimized for speed
|
||||||
|
const quickResponse = await smartAi.groqProvider.chat({
|
||||||
|
systemMessage: 'You are a quick helper.',
|
||||||
|
userMessage: 'Give me a one-line summary of photosynthesis',
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### Performance Optimization
|
||||||
|
|
||||||
|
Tips for optimal performance:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// 1. Reuse providers instead of creating new instances
|
||||||
|
const smartAi = new SmartAi({ /* config */ });
|
||||||
|
await smartAi.start(); // Initialize once
|
||||||
|
|
||||||
|
// 2. Use streaming for long responses
|
||||||
|
// Streaming reduces time-to-first-token and memory usage
|
||||||
|
|
||||||
|
// 3. Batch operations when possible
|
||||||
|
const promises = [
|
||||||
|
smartAi.openaiProvider.chat({ /* ... */ }),
|
||||||
|
smartAi.anthropicProvider.chat({ /* ... */ })
|
||||||
|
];
|
||||||
|
const results = await Promise.all(promises);
|
||||||
|
|
||||||
|
// 4. Clean up resources
|
||||||
|
await smartAi.stop(); // When done
|
||||||
|
```
|
||||||
|
|
||||||
## License and Legal Information
|
## License and Legal Information
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user