Compare commits
3 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
0b2a058550 | ||
|
88d15c89e5 | ||
|
4bf7113334 |
23
changelog.md
23
changelog.md
@@ -1,5 +1,28 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## 2025-08-01 - 0.5.8 - fix(core)
|
||||||
|
Fix SmartPdf lifecycle management and update dependencies
|
||||||
|
|
||||||
|
- Moved SmartPdf instance management to the MultiModalModel base class for better resource sharing
|
||||||
|
- Fixed memory leaks by properly implementing cleanup in the base class stop() method
|
||||||
|
- Updated SmartAi class to properly stop all providers on shutdown
|
||||||
|
- Updated @push.rocks/smartrequest from v2.1.0 to v4.2.1 with migration to new API
|
||||||
|
- Enhanced readme with professional documentation and feature matrix
|
||||||
|
|
||||||
|
## 2025-07-26 - 0.5.7 - fix(provider.openai)
|
||||||
|
Fix stream type mismatch in audio method
|
||||||
|
|
||||||
|
- Fixed type error where OpenAI SDK returns a web ReadableStream but the audio method needs to return a Node.js ReadableStream
|
||||||
|
- Added conversion using Node.js's built-in Readable.fromWeb() method
|
||||||
|
|
||||||
|
## 2025-07-25 - 0.5.5 - feat(documentation)
|
||||||
|
Comprehensive documentation enhancement and test improvements
|
||||||
|
|
||||||
|
- Completely rewrote readme.md with detailed provider comparisons, advanced usage examples, and performance tips
|
||||||
|
- Added comprehensive examples for all supported providers (OpenAI, Anthropic, Perplexity, Groq, XAI, Ollama, Exo)
|
||||||
|
- Included detailed sections on chat interactions, streaming, TTS, vision processing, and document analysis
|
||||||
|
- Added verbose flag to test script for better debugging
|
||||||
|
|
||||||
## 2025-05-13 - 0.5.4 - fix(provider.openai)
|
## 2025-05-13 - 0.5.4 - fix(provider.openai)
|
||||||
Update dependency versions, clean test imports, and adjust default OpenAI model configurations
|
Update dependency versions, clean test imports, and adjust default OpenAI model configurations
|
||||||
|
|
||||||
|
23
package.json
23
package.json
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@push.rocks/smartai",
|
"name": "@push.rocks/smartai",
|
||||||
"version": "0.5.4",
|
"version": "0.5.8",
|
||||||
"private": false,
|
"private": false,
|
||||||
"description": "SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.",
|
"description": "SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.",
|
||||||
"main": "dist_ts/index.js",
|
"main": "dist_ts/index.js",
|
||||||
@@ -9,29 +9,29 @@
|
|||||||
"author": "Task Venture Capital GmbH",
|
"author": "Task Venture Capital GmbH",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"test": "(tstest test/ --web)",
|
"test": "(tstest test/ --web --verbose)",
|
||||||
"build": "(tsbuild --web --allowimplicitany)",
|
"build": "(tsbuild --web --allowimplicitany)",
|
||||||
"buildDocs": "(tsdoc)"
|
"buildDocs": "(tsdoc)"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@git.zone/tsbuild": "^2.3.2",
|
"@git.zone/tsbuild": "^2.6.4",
|
||||||
"@git.zone/tsbundle": "^2.2.5",
|
"@git.zone/tsbundle": "^2.5.1",
|
||||||
"@git.zone/tsrun": "^1.3.3",
|
"@git.zone/tsrun": "^1.3.3",
|
||||||
"@git.zone/tstest": "^1.0.96",
|
"@git.zone/tstest": "^2.3.2",
|
||||||
"@push.rocks/qenv": "^6.1.0",
|
"@push.rocks/qenv": "^6.1.0",
|
||||||
"@push.rocks/tapbundle": "^6.0.3",
|
"@push.rocks/tapbundle": "^6.0.3",
|
||||||
"@types/node": "^22.15.17"
|
"@types/node": "^22.15.17"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@anthropic-ai/sdk": "^0.50.4",
|
"@anthropic-ai/sdk": "^0.57.0",
|
||||||
"@push.rocks/smartarray": "^1.1.0",
|
"@push.rocks/smartarray": "^1.1.0",
|
||||||
"@push.rocks/smartfile": "^11.2.0",
|
"@push.rocks/smartfile": "^11.2.5",
|
||||||
"@push.rocks/smartpath": "^5.0.18",
|
"@push.rocks/smartpath": "^6.0.0",
|
||||||
"@push.rocks/smartpdf": "^3.2.2",
|
"@push.rocks/smartpdf": "^3.3.0",
|
||||||
"@push.rocks/smartpromise": "^4.2.3",
|
"@push.rocks/smartpromise": "^4.2.3",
|
||||||
"@push.rocks/smartrequest": "^2.1.0",
|
"@push.rocks/smartrequest": "^4.2.1",
|
||||||
"@push.rocks/webstream": "^1.0.10",
|
"@push.rocks/webstream": "^1.0.10",
|
||||||
"openai": "^4.98.0"
|
"openai": "^5.11.0"
|
||||||
},
|
},
|
||||||
"repository": {
|
"repository": {
|
||||||
"type": "git",
|
"type": "git",
|
||||||
@@ -82,6 +82,7 @@
|
|||||||
],
|
],
|
||||||
"pnpm": {
|
"pnpm": {
|
||||||
"onlyBuiltDependencies": [
|
"onlyBuiltDependencies": [
|
||||||
|
"esbuild",
|
||||||
"puppeteer"
|
"puppeteer"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
2048
pnpm-lock.yaml
generated
2048
pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
513
readme.md
513
readme.md
@@ -1,189 +1,269 @@
|
|||||||
# @push.rocks/smartai
|
# @push.rocks/smartai
|
||||||
|
**One API to rule them all** 🚀
|
||||||
|
|
||||||
SmartAi is a TypeScript library providing a unified interface for integrating and interacting with multiple AI models, supporting chat interactions, audio and document processing, and vision tasks.
|
[](https://www.npmjs.com/package/@push.rocks/smartai)
|
||||||
|
[](https://www.typescriptlang.org/)
|
||||||
|
[](https://opensource.org/licenses/MIT)
|
||||||
|
|
||||||
## Install
|
SmartAI unifies the world's leading AI providers - OpenAI, Anthropic, Perplexity, Ollama, Groq, XAI, and Exo - under a single, elegant TypeScript interface. Build AI applications at lightning speed without vendor lock-in.
|
||||||
|
|
||||||
To install SmartAi into your project, you need to run the following command in your terminal:
|
## 🎯 Why SmartAI?
|
||||||
|
|
||||||
|
- **🔌 Universal Interface**: Write once, run with any AI provider. Switch between GPT-4, Claude, Llama, or Grok with a single line change.
|
||||||
|
- **🛡️ Type-Safe**: Full TypeScript support with comprehensive type definitions for all operations
|
||||||
|
- **🌊 Streaming First**: Built for real-time applications with native streaming support
|
||||||
|
- **🎨 Multi-Modal**: Seamlessly work with text, images, audio, and documents
|
||||||
|
- **🏠 Local & Cloud**: Support for both cloud providers and local models via Ollama
|
||||||
|
- **⚡ Zero Lock-In**: Your code remains portable across all AI providers
|
||||||
|
|
||||||
|
## 🚀 Quick Start
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
npm install @push.rocks/smartai
|
npm install @push.rocks/smartai
|
||||||
```
|
```
|
||||||
|
|
||||||
This command will add the SmartAi library to your project's dependencies, making it available for use in your TypeScript application.
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
SmartAi is designed to provide a comprehensive and unified API for working seamlessly with multiple AI providers like OpenAI, Anthropic, Perplexity, and others. Below we will delve into how to make the most out of this library, illustrating the setup and functionality with in-depth examples. Our scenarios will explore synchronous and streaming interactions, audio generation, document handling, and vision tasks with different AI providers.
|
|
||||||
|
|
||||||
### Initialization
|
|
||||||
|
|
||||||
Initialization is the first step before using any AI functionalities. You should provide API tokens for each provider you plan to utilize.
|
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
import { SmartAi } from '@push.rocks/smartai';
|
import { SmartAi } from '@push.rocks/smartai';
|
||||||
|
|
||||||
const smartAi = new SmartAi({
|
// Initialize with your favorite providers
|
||||||
openaiToken: 'your-openai-token',
|
const ai = new SmartAi({
|
||||||
anthropicToken: 'your-anthropic-token',
|
openaiToken: 'sk-...',
|
||||||
perplexityToken: 'your-perplexity-token',
|
anthropicToken: 'sk-ant-...'
|
||||||
xaiToken: 'your-xai-token',
|
|
||||||
groqToken: 'your-groq-token',
|
|
||||||
ollama: {
|
|
||||||
baseUrl: 'http://localhost:11434',
|
|
||||||
model: 'llama2',
|
|
||||||
visionModel: 'llava'
|
|
||||||
},
|
|
||||||
exo: {
|
|
||||||
baseUrl: 'http://localhost:8080/v1',
|
|
||||||
apiKey: 'your-api-key'
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
|
|
||||||
await smartAi.start();
|
await ai.start();
|
||||||
```
|
|
||||||
|
|
||||||
### Chat Interactions
|
// Same API, multiple providers
|
||||||
|
const response = await ai.openaiProvider.chat({
|
||||||
Interaction through chat is a key feature. SmartAi caters to both synchronous and asynchronous (streaming) chats across several AI models.
|
|
||||||
|
|
||||||
#### Regular Synchronous Chat
|
|
||||||
|
|
||||||
Connect with AI models via straightforward request-response interactions.
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
const syncResponse = await smartAi.openaiProvider.chat({
|
|
||||||
systemMessage: 'You are a helpful assistant.',
|
systemMessage: 'You are a helpful assistant.',
|
||||||
userMessage: 'What is the capital of France?',
|
userMessage: 'Explain quantum computing in simple terms',
|
||||||
messageHistory: [] // Could include context or preceding messages
|
messageHistory: []
|
||||||
});
|
});
|
||||||
|
|
||||||
console.log(syncResponse.message); // Outputs: "The capital of France is Paris."
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Real-Time Streaming Chat
|
## 📊 Provider Capabilities Matrix
|
||||||
|
|
||||||
For continuous interaction and lower latency, engage in streaming chat.
|
Choose the right provider for your use case:
|
||||||
|
|
||||||
|
| Provider | Chat | Streaming | TTS | Vision | Documents | Highlights |
|
||||||
|
|----------|:----:|:---------:|:---:|:------:|:---------:|------------|
|
||||||
|
| **OpenAI** | ✅ | ✅ | ✅ | ✅ | ✅ | • GPT-4, DALL-E 3<br>• Industry standard<br>• Most features |
|
||||||
|
| **Anthropic** | ✅ | ✅ | ❌ | ✅ | ✅ | • Claude 3 Opus<br>• Superior reasoning<br>• 200k context |
|
||||||
|
| **Ollama** | ✅ | ✅ | ❌ | ✅ | ✅ | • 100% local<br>• Privacy-first<br>• No API costs |
|
||||||
|
| **XAI** | ✅ | ✅ | ❌ | ❌ | ✅ | • Grok models<br>• Real-time data<br>• Uncensored |
|
||||||
|
| **Perplexity** | ✅ | ✅ | ❌ | ❌ | ❌ | • Web-aware<br>• Research-focused<br>• Citations |
|
||||||
|
| **Groq** | ✅ | ✅ | ❌ | ❌ | ❌ | • 10x faster<br>• LPU inference<br>• Low latency |
|
||||||
|
| **Exo** | ✅ | ✅ | ❌ | ❌ | ❌ | • Distributed<br>• P2P compute<br>• Decentralized |
|
||||||
|
|
||||||
|
## 🎮 Core Features
|
||||||
|
|
||||||
|
### 💬 Universal Chat Interface
|
||||||
|
|
||||||
|
Works identically across all providers:
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
const textEncoder = new TextEncoder();
|
// Use GPT-4 for complex reasoning
|
||||||
const textDecoder = new TextDecoder();
|
const gptResponse = await ai.openaiProvider.chat({
|
||||||
|
systemMessage: 'You are a expert physicist.',
|
||||||
|
userMessage: 'Explain the implications of quantum entanglement',
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
|
|
||||||
// Establish a transform stream
|
// Use Claude for safety-critical applications
|
||||||
const { writable, readable } = new TransformStream();
|
const claudeResponse = await ai.anthropicProvider.chat({
|
||||||
const writer = writable.getWriter();
|
systemMessage: 'You are a medical advisor.',
|
||||||
|
userMessage: 'Review this patient data for concerns',
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
|
|
||||||
const message = {
|
// Use Groq for lightning-fast responses
|
||||||
role: 'user',
|
const groqResponse = await ai.groqProvider.chat({
|
||||||
content: 'Tell me a story about a brave knight'
|
systemMessage: 'You are a code reviewer.',
|
||||||
};
|
userMessage: 'Quick! Find the bug in this code: ...',
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
writer.write(textEncoder.encode(JSON.stringify(message) + '\n'));
|
### 🌊 Real-Time Streaming
|
||||||
|
|
||||||
// Initiate streaming
|
Build responsive chat interfaces with token-by-token streaming:
|
||||||
const stream = await smartAi.openaiProvider.chatStream(readable);
|
|
||||||
|
```typescript
|
||||||
|
// Create a chat stream
|
||||||
|
const stream = await ai.openaiProvider.chatStream(inputStream);
|
||||||
const reader = stream.getReader();
|
const reader = stream.getReader();
|
||||||
|
|
||||||
|
// Display responses as they arrive
|
||||||
while (true) {
|
while (true) {
|
||||||
const { done, value } = await reader.read();
|
const { done, value } = await reader.read();
|
||||||
if (done) break;
|
if (done) break;
|
||||||
console.log('AI:', value);
|
|
||||||
|
// Update UI in real-time
|
||||||
|
process.stdout.write(value);
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Audio Generation
|
### 🎙️ Text-to-Speech
|
||||||
|
|
||||||
Audio generation from textual input is possible using providers like OpenAI.
|
Generate natural voices with OpenAI:
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
const audioStream = await smartAi.openaiProvider.audio({
|
const audioStream = await ai.openaiProvider.audio({
|
||||||
message: 'This is a test message for generating speech.'
|
message: 'Welcome to the future of AI development!'
|
||||||
});
|
});
|
||||||
|
|
||||||
// Use the audioStream e.g., playing or saving it.
|
// Stream directly to speakers
|
||||||
|
audioStream.pipe(speakerOutput);
|
||||||
|
|
||||||
|
// Or save to file
|
||||||
|
audioStream.pipe(fs.createWriteStream('welcome.mp3'));
|
||||||
```
|
```
|
||||||
|
|
||||||
### Document Analysis
|
### 👁️ Vision Analysis
|
||||||
|
|
||||||
SmartAi can ingest and process documents, extracting meaningful information or performing classifications.
|
Understand images with multiple providers:
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
const pdfBuffer = await fetchPdf('https://example.com/document.pdf');
|
const image = fs.readFileSync('product-photo.jpg');
|
||||||
const documentRes = await smartAi.openaiProvider.document({
|
|
||||||
systemMessage: 'Determine the nature of the document.',
|
// OpenAI: General purpose vision
|
||||||
userMessage: 'Classify this document.',
|
const gptVision = await ai.openaiProvider.vision({
|
||||||
|
image,
|
||||||
|
prompt: 'Describe this product and suggest marketing angles'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Anthropic: Detailed analysis
|
||||||
|
const claudeVision = await ai.anthropicProvider.vision({
|
||||||
|
image,
|
||||||
|
prompt: 'Identify any safety concerns or defects'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Ollama: Private, local analysis
|
||||||
|
const ollamaVision = await ai.ollamaProvider.vision({
|
||||||
|
image,
|
||||||
|
prompt: 'Extract all text and categorize the content'
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### 📄 Document Intelligence
|
||||||
|
|
||||||
|
Extract insights from PDFs with AI:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const contract = fs.readFileSync('contract.pdf');
|
||||||
|
const invoice = fs.readFileSync('invoice.pdf');
|
||||||
|
|
||||||
|
// Analyze documents
|
||||||
|
const analysis = await ai.openaiProvider.document({
|
||||||
|
systemMessage: 'You are a legal expert.',
|
||||||
|
userMessage: 'Compare these documents and highlight key differences',
|
||||||
messageHistory: [],
|
messageHistory: [],
|
||||||
pdfDocuments: [pdfBuffer]
|
pdfDocuments: [contract, invoice]
|
||||||
});
|
});
|
||||||
|
|
||||||
console.log(documentRes.message); // Outputs: classified document type
|
// Multi-document analysis
|
||||||
```
|
const taxDocs = [form1099, w2, receipts];
|
||||||
|
const taxAnalysis = await ai.anthropicProvider.document({
|
||||||
SmartAi allows easy switching between providers, thus giving developers flexibility:
|
systemMessage: 'You are a tax advisor.',
|
||||||
|
userMessage: 'Prepare a tax summary from these documents',
|
||||||
```typescript
|
|
||||||
const anthopicRes = await smartAi.anthropicProvider.document({
|
|
||||||
systemMessage: 'Analyze this document.',
|
|
||||||
userMessage: 'Extract core points.',
|
|
||||||
messageHistory: [],
|
messageHistory: [],
|
||||||
pdfDocuments: [pdfBuffer]
|
pdfDocuments: taxDocs
|
||||||
});
|
});
|
||||||
|
|
||||||
console.log(anthopicRes.message); // Outputs: summarized core points
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Vision Processing
|
### 🔄 Persistent Conversations
|
||||||
|
|
||||||
Engage AI models in analyzing and describing images:
|
Maintain context across interactions:
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
const imageBuffer = await fetchImage('path/to/image.jpg');
|
// Create a coding assistant conversation
|
||||||
|
const assistant = ai.createConversation('openai');
|
||||||
|
await assistant.setSystemMessage('You are an expert TypeScript developer.');
|
||||||
|
|
||||||
// Using OpenAI's vision capabilities
|
// First question
|
||||||
const visionOutput = await smartAi.openaiProvider.vision({
|
const inputWriter = assistant.getInputStreamWriter();
|
||||||
image: imageBuffer,
|
await inputWriter.write('How do I implement a singleton pattern?');
|
||||||
prompt: 'Describe the image.'
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log(visionOutput); // Outputs: image description
|
// Continue the conversation
|
||||||
|
await inputWriter.write('Now show me how to make it thread-safe');
|
||||||
|
|
||||||
|
// The assistant remembers the entire context
|
||||||
```
|
```
|
||||||
|
|
||||||
Use other providers for more varied analysis:
|
## 🚀 Real-World Examples
|
||||||
|
|
||||||
|
### Build a Customer Support Bot
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
const ollamaOutput = await smartAi.ollamaProvider.vision({
|
const supportBot = new SmartAi({
|
||||||
image: imageBuffer,
|
anthropicToken: process.env.ANTHROPIC_KEY // Claude for empathetic responses
|
||||||
prompt: 'Detailed analysis required.'
|
|
||||||
});
|
});
|
||||||
|
|
||||||
console.log(ollamaOutput); // Outputs: detailed analysis results
|
async function handleCustomerQuery(query: string, history: ChatMessage[]) {
|
||||||
```
|
|
||||||
|
|
||||||
### Error Handling
|
|
||||||
|
|
||||||
Due to the nature of external integrations, ensure to wrap AI calls within try-catch blocks.
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
try {
|
try {
|
||||||
const response = await smartAi.anthropicProvider.chat({
|
const response = await supportBot.anthropicProvider.chat({
|
||||||
systemMessage: 'Hello!',
|
systemMessage: `You are a helpful customer support agent.
|
||||||
userMessage: 'Help me out.',
|
Be empathetic, professional, and solution-oriented.`,
|
||||||
|
userMessage: query,
|
||||||
|
messageHistory: history
|
||||||
|
});
|
||||||
|
|
||||||
|
return response.message;
|
||||||
|
} catch (error) {
|
||||||
|
// Fallback to another provider if needed
|
||||||
|
return await supportBot.openaiProvider.chat({...});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create a Code Review Assistant
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const codeReviewer = new SmartAi({
|
||||||
|
groqToken: process.env.GROQ_KEY // Groq for speed
|
||||||
|
});
|
||||||
|
|
||||||
|
async function reviewCode(code: string, language: string) {
|
||||||
|
const startTime = Date.now();
|
||||||
|
|
||||||
|
const review = await codeReviewer.groqProvider.chat({
|
||||||
|
systemMessage: `You are a ${language} expert. Review code for:
|
||||||
|
- Security vulnerabilities
|
||||||
|
- Performance issues
|
||||||
|
- Best practices
|
||||||
|
- Potential bugs`,
|
||||||
|
userMessage: `Review this code:\n\n${code}`,
|
||||||
messageHistory: []
|
messageHistory: []
|
||||||
});
|
});
|
||||||
console.log(response.message);
|
|
||||||
} catch (error: any) {
|
console.log(`Review completed in ${Date.now() - startTime}ms`);
|
||||||
console.error('Encountered an error:', error.message);
|
return review.message;
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Providers and Customization
|
### Build a Research Assistant
|
||||||
|
|
||||||
The library supports provider-specific customization, enabling tailored interactions:
|
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
const smartAi = new SmartAi({
|
const researcher = new SmartAi({
|
||||||
openaiToken: 'your-openai-token',
|
perplexityToken: process.env.PERPLEXITY_KEY
|
||||||
anthropicToken: 'your-anthropic-token',
|
});
|
||||||
|
|
||||||
|
async function research(topic: string) {
|
||||||
|
// Perplexity excels at web-aware research
|
||||||
|
const findings = await researcher.perplexityProvider.chat({
|
||||||
|
systemMessage: 'You are a research assistant. Provide factual, cited information.',
|
||||||
|
userMessage: `Research the latest developments in ${topic}`,
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
|
|
||||||
|
return findings.message;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Local AI for Sensitive Data
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const localAI = new SmartAi({
|
||||||
ollama: {
|
ollama: {
|
||||||
baseUrl: 'http://localhost:11434',
|
baseUrl: 'http://localhost:11434',
|
||||||
model: 'llama2',
|
model: 'llama2',
|
||||||
@@ -191,32 +271,205 @@ const smartAi = new SmartAi({
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
await smartAi.start();
|
// Process sensitive documents without leaving your infrastructure
|
||||||
|
async function analyzeSensitiveDoc(pdfBuffer: Buffer) {
|
||||||
|
const analysis = await localAI.ollamaProvider.document({
|
||||||
|
systemMessage: 'Extract and summarize key information.',
|
||||||
|
userMessage: 'Analyze this confidential document',
|
||||||
|
messageHistory: [],
|
||||||
|
pdfDocuments: [pdfBuffer]
|
||||||
|
});
|
||||||
|
|
||||||
|
// Data never leaves your servers
|
||||||
|
return analysis.message;
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Advanced Streaming Customization
|
## ⚡ Performance Tips
|
||||||
|
|
||||||
Developers can implement real-time processing pipelines with custom transformations:
|
### 1. Provider Selection Strategy
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
const customProcessingStream = new TransformStream({
|
class SmartAIRouter {
|
||||||
transform(chunk, controller) {
|
constructor(private ai: SmartAi) {}
|
||||||
const processed = chunk.toUpperCase(); // Example transformation
|
|
||||||
controller.enqueue(processed);
|
async query(message: string, requirements: {
|
||||||
|
speed?: boolean;
|
||||||
|
accuracy?: boolean;
|
||||||
|
cost?: boolean;
|
||||||
|
privacy?: boolean;
|
||||||
|
}) {
|
||||||
|
if (requirements.privacy) {
|
||||||
|
return this.ai.ollamaProvider.chat({...}); // Local only
|
||||||
|
}
|
||||||
|
if (requirements.speed) {
|
||||||
|
return this.ai.groqProvider.chat({...}); // 10x faster
|
||||||
|
}
|
||||||
|
if (requirements.accuracy) {
|
||||||
|
return this.ai.anthropicProvider.chat({...}); // Best reasoning
|
||||||
|
}
|
||||||
|
// Default fallback
|
||||||
|
return this.ai.openaiProvider.chat({...});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Streaming for Large Responses
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Don't wait for the entire response
|
||||||
|
async function streamResponse(userQuery: string) {
|
||||||
|
const stream = await ai.openaiProvider.chatStream(createInputStream(userQuery));
|
||||||
|
|
||||||
|
// Process tokens as they arrive
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
updateUI(chunk); // Immediate feedback
|
||||||
|
await processChunk(chunk); // Parallel processing
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Parallel Multi-Provider Queries
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Get the best answer from multiple AIs
|
||||||
|
async function consensusQuery(question: string) {
|
||||||
|
const providers = [
|
||||||
|
ai.openaiProvider.chat({...}),
|
||||||
|
ai.anthropicProvider.chat({...}),
|
||||||
|
ai.perplexityProvider.chat({...})
|
||||||
|
];
|
||||||
|
|
||||||
|
const responses = await Promise.all(providers);
|
||||||
|
return synthesizeResponses(responses);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🛠️ Advanced Features
|
||||||
|
|
||||||
|
### Custom Streaming Transformations
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Add real-time translation
|
||||||
|
const translationStream = new TransformStream({
|
||||||
|
async transform(chunk, controller) {
|
||||||
|
const translated = await translateChunk(chunk);
|
||||||
|
controller.enqueue(translated);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
const processedStream = stream.pipeThrough(customProcessingStream);
|
const responseStream = await ai.openaiProvider.chatStream(input);
|
||||||
const processedReader = processedStream.getReader();
|
const translatedStream = responseStream.pipeThrough(translationStream);
|
||||||
|
```
|
||||||
|
|
||||||
while (true) {
|
### Error Handling & Fallbacks
|
||||||
const { done, value } = await processedReader.read();
|
|
||||||
if (done) break;
|
```typescript
|
||||||
console.log('Processed Output:', value);
|
class ResilientAI {
|
||||||
|
private providers = ['openai', 'anthropic', 'groq'];
|
||||||
|
|
||||||
|
async query(opts: ChatOptions): Promise<ChatResponse> {
|
||||||
|
for (const provider of this.providers) {
|
||||||
|
try {
|
||||||
|
return await this.ai[`${provider}Provider`].chat(opts);
|
||||||
|
} catch (error) {
|
||||||
|
console.warn(`${provider} failed, trying next...`);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
throw new Error('All providers failed');
|
||||||
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
This approach can facilitate adaptive content processing workflows.
|
### Token Counting & Cost Management
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Track usage across providers
|
||||||
|
class UsageTracker {
|
||||||
|
async trackedChat(provider: string, options: ChatOptions) {
|
||||||
|
const start = Date.now();
|
||||||
|
const response = await ai[`${provider}Provider`].chat(options);
|
||||||
|
|
||||||
|
const usage = {
|
||||||
|
provider,
|
||||||
|
duration: Date.now() - start,
|
||||||
|
inputTokens: estimateTokens(options),
|
||||||
|
outputTokens: estimateTokens(response.message)
|
||||||
|
};
|
||||||
|
|
||||||
|
await this.logUsage(usage);
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 📦 Installation & Setup
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
- Node.js 16+
|
||||||
|
- TypeScript 4.5+
|
||||||
|
- API keys for your chosen providers
|
||||||
|
|
||||||
|
### Environment Setup
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install
|
||||||
|
npm install @push.rocks/smartai
|
||||||
|
|
||||||
|
# Set up environment variables
|
||||||
|
export OPENAI_API_KEY=sk-...
|
||||||
|
export ANTHROPIC_API_KEY=sk-ant-...
|
||||||
|
export PERPLEXITY_API_KEY=pplx-...
|
||||||
|
# ... etc
|
||||||
|
```
|
||||||
|
|
||||||
|
### TypeScript Configuration
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"compilerOptions": {
|
||||||
|
"target": "ES2022",
|
||||||
|
"module": "NodeNext",
|
||||||
|
"lib": ["ES2022"],
|
||||||
|
"strict": true,
|
||||||
|
"esModuleInterop": true,
|
||||||
|
"skipLibCheck": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🎯 Choosing the Right Provider
|
||||||
|
|
||||||
|
| Use Case | Recommended Provider | Why |
|
||||||
|
|----------|---------------------|-----|
|
||||||
|
| **General Purpose** | OpenAI | Most features, stable, well-documented |
|
||||||
|
| **Complex Reasoning** | Anthropic | Superior logical thinking, safer outputs |
|
||||||
|
| **Research & Facts** | Perplexity | Web-aware, provides citations |
|
||||||
|
| **Speed Critical** | Groq | 10x faster inference, sub-second responses |
|
||||||
|
| **Privacy Critical** | Ollama | 100% local, no data leaves your servers |
|
||||||
|
| **Real-time Data** | XAI | Access to current information |
|
||||||
|
| **Cost Sensitive** | Ollama/Exo | Free (local) or distributed compute |
|
||||||
|
|
||||||
|
## 🤝 Contributing
|
||||||
|
|
||||||
|
SmartAI is open source and welcomes contributions! Visit our [GitHub repository](https://code.foss.global/push.rocks/smartai) to:
|
||||||
|
|
||||||
|
- Report issues
|
||||||
|
- Submit pull requests
|
||||||
|
- Request features
|
||||||
|
- Join discussions
|
||||||
|
|
||||||
|
## 📈 Roadmap
|
||||||
|
|
||||||
|
- [ ] Streaming function calls
|
||||||
|
- [ ] Image generation support
|
||||||
|
- [ ] Voice input processing
|
||||||
|
- [ ] Fine-tuning integration
|
||||||
|
- [ ] Embedding support
|
||||||
|
- [ ] Agent framework
|
||||||
|
- [ ] More providers (Cohere, AI21, etc.)
|
||||||
|
|
||||||
## License and Legal Information
|
## License and Legal Information
|
||||||
|
|
||||||
|
@@ -29,12 +29,14 @@ tap.test('should create chat response with openai', async () => {
|
|||||||
|
|
||||||
tap.test('should document a pdf', async () => {
|
tap.test('should document a pdf', async () => {
|
||||||
const pdfUrl = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf';
|
const pdfUrl = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf';
|
||||||
const pdfResponse = await smartrequest.getBinary(pdfUrl);
|
const pdfResponse = await smartrequest.SmartRequest.create()
|
||||||
|
.url(pdfUrl)
|
||||||
|
.get();
|
||||||
const result = await testSmartai.openaiProvider.document({
|
const result = await testSmartai.openaiProvider.document({
|
||||||
systemMessage: 'Classify the document. Only the following answers are allowed: "invoice", "bank account statement", "contract", "other". The answer should only contain the keyword for machine use.',
|
systemMessage: 'Classify the document. Only the following answers are allowed: "invoice", "bank account statement", "contract", "other". The answer should only contain the keyword for machine use.',
|
||||||
userMessage: "Classify the document.",
|
userMessage: "Classify the document.",
|
||||||
messageHistory: [],
|
messageHistory: [],
|
||||||
pdfDocuments: [pdfResponse.body],
|
pdfDocuments: [Buffer.from(await pdfResponse.arrayBuffer())],
|
||||||
});
|
});
|
||||||
console.log(result);
|
console.log(result);
|
||||||
});
|
});
|
||||||
|
@@ -1,3 +1,5 @@
|
|||||||
|
import * as plugins from './plugins.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Message format for chat interactions
|
* Message format for chat interactions
|
||||||
*/
|
*/
|
||||||
@@ -28,17 +30,30 @@ export interface ChatResponse {
|
|||||||
* Provides a common interface for different AI providers (OpenAI, Anthropic, Perplexity, Ollama)
|
* Provides a common interface for different AI providers (OpenAI, Anthropic, Perplexity, Ollama)
|
||||||
*/
|
*/
|
||||||
export abstract class MultiModalModel {
|
export abstract class MultiModalModel {
|
||||||
|
/**
|
||||||
|
* SmartPdf instance for document processing
|
||||||
|
* Shared across all methods that need PDF functionality
|
||||||
|
*/
|
||||||
|
protected smartpdfInstance: plugins.smartpdf.SmartPdf;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Initializes the model and any necessary resources
|
* Initializes the model and any necessary resources
|
||||||
* Should be called before using any other methods
|
* Should be called before using any other methods
|
||||||
*/
|
*/
|
||||||
abstract start(): Promise<void>;
|
public async start(): Promise<void> {
|
||||||
|
this.smartpdfInstance = new plugins.smartpdf.SmartPdf();
|
||||||
|
await this.smartpdfInstance.start();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Cleans up any resources used by the model
|
* Cleans up any resources used by the model
|
||||||
* Should be called when the model is no longer needed
|
* Should be called when the model is no longer needed
|
||||||
*/
|
*/
|
||||||
abstract stop(): Promise<void>;
|
public async stop(): Promise<void> {
|
||||||
|
if (this.smartpdfInstance) {
|
||||||
|
await this.smartpdfInstance.stop();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Synchronous chat interaction with the model
|
* Synchronous chat interaction with the model
|
||||||
|
@@ -91,7 +91,29 @@ export class SmartAi {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public async stop() {}
|
public async stop() {
|
||||||
|
if (this.openaiProvider) {
|
||||||
|
await this.openaiProvider.stop();
|
||||||
|
}
|
||||||
|
if (this.anthropicProvider) {
|
||||||
|
await this.anthropicProvider.stop();
|
||||||
|
}
|
||||||
|
if (this.perplexityProvider) {
|
||||||
|
await this.perplexityProvider.stop();
|
||||||
|
}
|
||||||
|
if (this.groqProvider) {
|
||||||
|
await this.groqProvider.stop();
|
||||||
|
}
|
||||||
|
if (this.xaiProvider) {
|
||||||
|
await this.xaiProvider.stop();
|
||||||
|
}
|
||||||
|
if (this.ollamaProvider) {
|
||||||
|
await this.ollamaProvider.stop();
|
||||||
|
}
|
||||||
|
if (this.exoProvider) {
|
||||||
|
await this.exoProvider.stop();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* create a new conversation
|
* create a new conversation
|
||||||
|
@@ -20,12 +20,15 @@ export class AnthropicProvider extends MultiModalModel {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async start() {
|
async start() {
|
||||||
|
await super.start();
|
||||||
this.anthropicApiClient = new plugins.anthropic.default({
|
this.anthropicApiClient = new plugins.anthropic.default({
|
||||||
apiKey: this.options.anthropicToken,
|
apiKey: this.options.anthropicToken,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
async stop() {}
|
async stop() {
|
||||||
|
await super.stop();
|
||||||
|
}
|
||||||
|
|
||||||
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||||
// Create a TextDecoder to handle incoming chunks
|
// Create a TextDecoder to handle incoming chunks
|
||||||
@@ -178,11 +181,10 @@ export class AnthropicProvider extends MultiModalModel {
|
|||||||
messageHistory: ChatMessage[];
|
messageHistory: ChatMessage[];
|
||||||
}): Promise<{ message: any }> {
|
}): Promise<{ message: any }> {
|
||||||
// Convert PDF documents to images using SmartPDF
|
// Convert PDF documents to images using SmartPDF
|
||||||
const smartpdfInstance = new plugins.smartpdf.SmartPdf();
|
|
||||||
let documentImageBytesArray: Uint8Array[] = [];
|
let documentImageBytesArray: Uint8Array[] = [];
|
||||||
|
|
||||||
for (const pdfDocument of optionsArg.pdfDocuments) {
|
for (const pdfDocument of optionsArg.pdfDocuments) {
|
||||||
const documentImageArray = await smartpdfInstance.convertPDFToPngBytes(pdfDocument);
|
const documentImageArray = await this.smartpdfInstance.convertPDFToPngBytes(pdfDocument);
|
||||||
documentImageBytesArray = documentImageBytesArray.concat(documentImageArray);
|
documentImageBytesArray = documentImageBytesArray.concat(documentImageArray);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -24,6 +24,7 @@ export class OllamaProvider extends MultiModalModel {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async start() {
|
async start() {
|
||||||
|
await super.start();
|
||||||
// Verify Ollama is running
|
// Verify Ollama is running
|
||||||
try {
|
try {
|
||||||
const response = await fetch(`${this.baseUrl}/api/tags`);
|
const response = await fetch(`${this.baseUrl}/api/tags`);
|
||||||
@@ -35,7 +36,9 @@ export class OllamaProvider extends MultiModalModel {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async stop() {}
|
async stop() {
|
||||||
|
await super.stop();
|
||||||
|
}
|
||||||
|
|
||||||
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||||
// Create a TextDecoder to handle incoming chunks
|
// Create a TextDecoder to handle incoming chunks
|
||||||
@@ -205,11 +208,10 @@ export class OllamaProvider extends MultiModalModel {
|
|||||||
messageHistory: ChatMessage[];
|
messageHistory: ChatMessage[];
|
||||||
}): Promise<{ message: any }> {
|
}): Promise<{ message: any }> {
|
||||||
// Convert PDF documents to images using SmartPDF
|
// Convert PDF documents to images using SmartPDF
|
||||||
const smartpdfInstance = new plugins.smartpdf.SmartPdf();
|
|
||||||
let documentImageBytesArray: Uint8Array[] = [];
|
let documentImageBytesArray: Uint8Array[] = [];
|
||||||
|
|
||||||
for (const pdfDocument of optionsArg.pdfDocuments) {
|
for (const pdfDocument of optionsArg.pdfDocuments) {
|
||||||
const documentImageArray = await smartpdfInstance.convertPDFToPngBytes(pdfDocument);
|
const documentImageArray = await this.smartpdfInstance.convertPDFToPngBytes(pdfDocument);
|
||||||
documentImageBytesArray = documentImageBytesArray.concat(documentImageArray);
|
documentImageBytesArray = documentImageBytesArray.concat(documentImageArray);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1,5 +1,6 @@
|
|||||||
import * as plugins from './plugins.js';
|
import * as plugins from './plugins.js';
|
||||||
import * as paths from './paths.js';
|
import * as paths from './paths.js';
|
||||||
|
import { Readable } from 'stream';
|
||||||
|
|
||||||
// Custom type definition for chat completion messages
|
// Custom type definition for chat completion messages
|
||||||
export type TChatCompletionRequestMessage = {
|
export type TChatCompletionRequestMessage = {
|
||||||
@@ -20,7 +21,6 @@ export interface IOpenaiProviderOptions {
|
|||||||
export class OpenAiProvider extends MultiModalModel {
|
export class OpenAiProvider extends MultiModalModel {
|
||||||
private options: IOpenaiProviderOptions;
|
private options: IOpenaiProviderOptions;
|
||||||
public openAiApiClient: plugins.openai.default;
|
public openAiApiClient: plugins.openai.default;
|
||||||
public smartpdfInstance: plugins.smartpdf.SmartPdf;
|
|
||||||
|
|
||||||
constructor(optionsArg: IOpenaiProviderOptions) {
|
constructor(optionsArg: IOpenaiProviderOptions) {
|
||||||
super();
|
super();
|
||||||
@@ -28,14 +28,16 @@ export class OpenAiProvider extends MultiModalModel {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public async start() {
|
public async start() {
|
||||||
|
await super.start();
|
||||||
this.openAiApiClient = new plugins.openai.default({
|
this.openAiApiClient = new plugins.openai.default({
|
||||||
apiKey: this.options.openaiToken,
|
apiKey: this.options.openaiToken,
|
||||||
dangerouslyAllowBrowser: true,
|
dangerouslyAllowBrowser: true,
|
||||||
});
|
});
|
||||||
this.smartpdfInstance = new plugins.smartpdf.SmartPdf();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public async stop() {}
|
public async stop() {
|
||||||
|
await super.stop();
|
||||||
|
}
|
||||||
|
|
||||||
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||||
// Create a TextDecoder to handle incoming chunks
|
// Create a TextDecoder to handle incoming chunks
|
||||||
@@ -148,7 +150,8 @@ export class OpenAiProvider extends MultiModalModel {
|
|||||||
speed: 1,
|
speed: 1,
|
||||||
});
|
});
|
||||||
const stream = result.body;
|
const stream = result.body;
|
||||||
done.resolve(stream);
|
const nodeStream = Readable.fromWeb(stream as any);
|
||||||
|
done.resolve(nodeStream);
|
||||||
return done.promise;
|
return done.promise;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -164,13 +167,10 @@ export class OpenAiProvider extends MultiModalModel {
|
|||||||
let pdfDocumentImageBytesArray: Uint8Array[] = [];
|
let pdfDocumentImageBytesArray: Uint8Array[] = [];
|
||||||
|
|
||||||
// Convert each PDF into one or more image byte arrays.
|
// Convert each PDF into one or more image byte arrays.
|
||||||
const smartpdfInstance = new plugins.smartpdf.SmartPdf();
|
|
||||||
await smartpdfInstance.start();
|
|
||||||
for (const pdfDocument of optionsArg.pdfDocuments) {
|
for (const pdfDocument of optionsArg.pdfDocuments) {
|
||||||
const documentImageArray = await smartpdfInstance.convertPDFToPngBytes(pdfDocument);
|
const documentImageArray = await this.smartpdfInstance.convertPDFToPngBytes(pdfDocument);
|
||||||
pdfDocumentImageBytesArray = pdfDocumentImageBytesArray.concat(documentImageArray);
|
pdfDocumentImageBytesArray = pdfDocumentImageBytesArray.concat(documentImageArray);
|
||||||
}
|
}
|
||||||
await smartpdfInstance.stop();
|
|
||||||
|
|
||||||
console.log(`image smartfile array`);
|
console.log(`image smartfile array`);
|
||||||
console.log(pdfDocumentImageBytesArray.map((smartfile) => smartfile.length));
|
console.log(pdfDocumentImageBytesArray.map((smartfile) => smartfile.length));
|
||||||
|
@@ -11,7 +11,6 @@ export interface IXAIProviderOptions {
|
|||||||
export class XAIProvider extends MultiModalModel {
|
export class XAIProvider extends MultiModalModel {
|
||||||
private options: IXAIProviderOptions;
|
private options: IXAIProviderOptions;
|
||||||
public openAiApiClient: plugins.openai.default;
|
public openAiApiClient: plugins.openai.default;
|
||||||
public smartpdfInstance: plugins.smartpdf.SmartPdf;
|
|
||||||
|
|
||||||
constructor(optionsArg: IXAIProviderOptions) {
|
constructor(optionsArg: IXAIProviderOptions) {
|
||||||
super();
|
super();
|
||||||
@@ -19,14 +18,16 @@ export class XAIProvider extends MultiModalModel {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public async start() {
|
public async start() {
|
||||||
|
await super.start();
|
||||||
this.openAiApiClient = new plugins.openai.default({
|
this.openAiApiClient = new plugins.openai.default({
|
||||||
apiKey: this.options.xaiToken,
|
apiKey: this.options.xaiToken,
|
||||||
baseURL: 'https://api.x.ai/v1',
|
baseURL: 'https://api.x.ai/v1',
|
||||||
});
|
});
|
||||||
this.smartpdfInstance = new plugins.smartpdf.SmartPdf();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public async stop() {}
|
public async stop() {
|
||||||
|
await super.stop();
|
||||||
|
}
|
||||||
|
|
||||||
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||||
// Create a TextDecoder to handle incoming chunks
|
// Create a TextDecoder to handle incoming chunks
|
||||||
|
Reference in New Issue
Block a user