Compare commits
36 Commits
Author | SHA1 | Date | |
---|---|---|---|
d0a4151a2b | |||
ad5dd4799b | |||
1c49af74ac | |||
eda8ce36df | |||
e82c510094 | |||
0378308721 | |||
189a32683f | |||
f731b9f78d | |||
3701e21284 | |||
490d4996d2 | |||
f099a8f1ed | |||
a0228a0abc | |||
a5257b52e7 | |||
a4144fc071 | |||
af46b3e81e | |||
d50427937c | |||
ffde2e0bf1 | |||
82abc06da4 | |||
3a5f2d52e5 | |||
f628a71184 | |||
d1465fc868 | |||
9e19d320e1 | |||
158d49fa95 | |||
1ce412fd00 | |||
92c382c16e | |||
63d3b7c9bb | |||
2e4c6aa80a | |||
04d505d29e | |||
a636556fdb | |||
a1558e6306 | |||
8b79cd025a | |||
268178f024 | |||
181193352e | |||
616ef168a5 | |||
1b814477ec | |||
f2685164e5 |
82
changelog.md
Normal file
82
changelog.md
Normal file
@ -0,0 +1,82 @@
|
||||
# Changelog
|
||||
|
||||
## 2025-02-03 - 0.2.0 - feat(provider.anthropic)
|
||||
Add support for vision and document processing in Anthropic provider
|
||||
|
||||
- Implemented vision tasks for Anthropic provider using Claude-3-opus-20240229 model.
|
||||
- Implemented document processing for Anthropic provider, supporting conversion of PDF documents to images and analysis with Claude-3-opus-20240229 model.
|
||||
- Updated documentation to reflect the new capabilities of the Anthropic provider.
|
||||
|
||||
## 2025-02-03 - 0.1.0 - feat(providers)
|
||||
Add vision and document processing capabilities to providers
|
||||
|
||||
- OpenAI and Ollama providers now support vision tasks using GPT-4 Vision and Llava models respectively.
|
||||
- Document processing has been implemented for OpenAI and Ollama providers, converting PDFs to images for analysis.
|
||||
- Introduced abstract methods for vision and document processing in the MultiModalModel class.
|
||||
- Updated the readme file with examples for vision and document processing.
|
||||
|
||||
## 2025-02-03 - 0.0.19 - fix(core)
|
||||
Enhanced chat streaming and error handling across providers
|
||||
|
||||
- Refactored chatStream method to properly handle input streams and processes in Perplexity, OpenAI, Ollama, and Anthropic providers.
|
||||
- Improved error handling and message parsing in chatStream implementations.
|
||||
- Defined distinct interfaces for chat options, messages, and responses.
|
||||
- Adjusted the test logic in test/test.ts for the new classification response requirement.
|
||||
|
||||
## 2024-09-19 - 0.0.18 - fix(dependencies)
|
||||
Update dependencies to the latest versions.
|
||||
|
||||
- Updated @git.zone/tsbuild from ^2.1.76 to ^2.1.84
|
||||
- Updated @git.zone/tsrun from ^1.2.46 to ^1.2.49
|
||||
- Updated @push.rocks/tapbundle from ^5.0.23 to ^5.3.0
|
||||
- Updated @types/node from ^20.12.12 to ^22.5.5
|
||||
- Updated @anthropic-ai/sdk from ^0.21.0 to ^0.27.3
|
||||
- Updated @push.rocks/smartfile from ^11.0.14 to ^11.0.21
|
||||
- Updated @push.rocks/smartpromise from ^4.0.3 to ^4.0.4
|
||||
- Updated @push.rocks/webstream from ^1.0.8 to ^1.0.10
|
||||
- Updated openai from ^4.47.1 to ^4.62.1
|
||||
|
||||
## 2024-05-29 - 0.0.17 - Documentation
|
||||
Updated project description.
|
||||
|
||||
- Improved project description for clarity and details.
|
||||
|
||||
## 2024-05-17 - 0.0.16 to 0.0.15 - Core
|
||||
Fixes and updates.
|
||||
|
||||
- Various core updates and fixes for stability improvements.
|
||||
|
||||
## 2024-04-29 - 0.0.14 to 0.0.13 - Core
|
||||
Fixes and updates.
|
||||
|
||||
- Multiple core updates and fixes for enhanced functionality.
|
||||
|
||||
## 2024-04-29 - 0.0.12 - Core
|
||||
Fixes and updates.
|
||||
|
||||
- Core update and bug fixes.
|
||||
|
||||
## 2024-04-29 - 0.0.11 - Provider
|
||||
Fix integration for anthropic provider.
|
||||
|
||||
- Correction in the integration process with anthropic provider for better compatibility.
|
||||
|
||||
## 2024-04-27 - 0.0.10 to 0.0.9 - Core
|
||||
Fixes and updates.
|
||||
|
||||
- Updates and fixes to core components.
|
||||
- Updated tsconfig for improved TypeScript configuration.
|
||||
|
||||
## 2024-04-01 - 0.0.8 to 0.0.7 - Core and npmextra
|
||||
Core updates and npmextra configuration.
|
||||
|
||||
- Core fixes and updates.
|
||||
- Updates to npmextra.json for githost configuration.
|
||||
|
||||
## 2024-03-31 - 0.0.6 to 0.0.2 - Core
|
||||
Initial core updates and fixes.
|
||||
|
||||
- Multiple updates and fixes to core following initial versions.
|
||||
|
||||
|
||||
This summarizes the relevant updates and changes based on the provided commit messages. The changelog excludes commits that are version tags without meaningful content or repeated entries.
|
@ -5,14 +5,28 @@
|
||||
"githost": "code.foss.global",
|
||||
"gitscope": "push.rocks",
|
||||
"gitrepo": "smartai",
|
||||
"description": "a standardaized interface to talk to AI models",
|
||||
"description": "A TypeScript library for integrating and interacting with multiple AI models, offering capabilities for chat and potentially audio responses.",
|
||||
"npmPackagename": "@push.rocks/smartai",
|
||||
"license": "MIT",
|
||||
"projectDomain": "push.rocks"
|
||||
"projectDomain": "push.rocks",
|
||||
"keywords": [
|
||||
"AI integration",
|
||||
"chatbot",
|
||||
"TypeScript",
|
||||
"OpenAI",
|
||||
"Anthropic",
|
||||
"multi-model support",
|
||||
"audio responses",
|
||||
"text-to-speech",
|
||||
"streaming chat"
|
||||
]
|
||||
}
|
||||
},
|
||||
"npmci": {
|
||||
"npmGlobalTools": [],
|
||||
"npmAccessLevel": "public"
|
||||
},
|
||||
"tsdoc": {
|
||||
"legal": "\n## License and Legal Information\n\nThis repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository. \n\n**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.\n\n### Trademarks\n\nThis project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.\n\n### Company Information\n\nTask Venture Capital GmbH \nRegistered at District court Bremen HRB 35230 HB, Germany\n\nFor any legal inquiries or if you require further information, please contact us via email at hello@task.vc.\n\nBy using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.\n"
|
||||
}
|
||||
}
|
42
package.json
42
package.json
@ -1,8 +1,8 @@
|
||||
{
|
||||
"name": "@push.rocks/smartai",
|
||||
"version": "0.0.5",
|
||||
"version": "0.2.0",
|
||||
"private": false,
|
||||
"description": "a standardaized interface to talk to AI models",
|
||||
"description": "A TypeScript library for integrating and interacting with multiple AI models, offering capabilities for chat and potentially audio responses.",
|
||||
"main": "dist_ts/index.js",
|
||||
"typings": "dist_ts/index.d.ts",
|
||||
"type": "module",
|
||||
@ -14,22 +14,33 @@
|
||||
"buildDocs": "(tsdoc)"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@git.zone/tsbuild": "^2.1.25",
|
||||
"@git.zone/tsbuild": "^2.1.84",
|
||||
"@git.zone/tsbundle": "^2.0.5",
|
||||
"@git.zone/tsrun": "^1.2.46",
|
||||
"@git.zone/tstest": "^1.0.44",
|
||||
"@push.rocks/tapbundle": "^5.0.15",
|
||||
"@types/node": "^20.8.7"
|
||||
"@git.zone/tsrun": "^1.2.49",
|
||||
"@git.zone/tstest": "^1.0.90",
|
||||
"@push.rocks/qenv": "^6.0.5",
|
||||
"@push.rocks/tapbundle": "^5.3.0",
|
||||
"@types/node": "^22.5.5"
|
||||
},
|
||||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.27.3",
|
||||
"@push.rocks/smartarray": "^1.0.8",
|
||||
"@push.rocks/smartfile": "^11.0.21",
|
||||
"@push.rocks/smartpath": "^5.0.18",
|
||||
"@push.rocks/smartpdf": "^3.1.6",
|
||||
"@push.rocks/smartpromise": "^4.0.4",
|
||||
"@push.rocks/smartrequest": "^2.0.22",
|
||||
"@push.rocks/webstream": "^1.0.10",
|
||||
"openai": "^4.62.1"
|
||||
},
|
||||
"dependencies": {},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://code.foss.global/push.rocks/smartai.git"
|
||||
"url": "https://code.foss.global/push.rocks/smartai.git"
|
||||
},
|
||||
"bugs": {
|
||||
"url": "https://code.foss.global/push.rocks/smartai/issues"
|
||||
},
|
||||
"homepage": "https://code.foss.global/push.rocks/smartai#readme",
|
||||
"homepage": "https://code.foss.global/push.rocks/smartai",
|
||||
"browserslist": [
|
||||
"last 1 chrome versions"
|
||||
],
|
||||
@ -44,5 +55,16 @@
|
||||
"cli.js",
|
||||
"npmextra.json",
|
||||
"readme.md"
|
||||
],
|
||||
"keywords": [
|
||||
"AI integration",
|
||||
"chatbot",
|
||||
"TypeScript",
|
||||
"OpenAI",
|
||||
"Anthropic",
|
||||
"multi-model support",
|
||||
"audio responses",
|
||||
"text-to-speech",
|
||||
"streaming chat"
|
||||
]
|
||||
}
|
||||
|
7694
pnpm-lock.yaml
generated
7694
pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
4
qenv.yml
Normal file
4
qenv.yml
Normal file
@ -0,0 +1,4 @@
|
||||
required:
|
||||
- OPENAI_TOKEN
|
||||
- ANTHROPIC_TOKEN
|
||||
- PERPLEXITY_TOKEN
|
1
readme.hints.md
Normal file
1
readme.hints.md
Normal file
@ -0,0 +1 @@
|
||||
|
258
readme.md
258
readme.md
@ -1,31 +1,241 @@
|
||||
# @push.rocks/smartai
|
||||
a standardaized interface to talk to AI models
|
||||
|
||||
## Availabililty and Links
|
||||
* [npmjs.org (npm package)](https://www.npmjs.com/package/@push.rocks/smartai)
|
||||
* [gitlab.com (source)](https://code.foss.global/push.rocks/smartai)
|
||||
* [github.com (source mirror)](https://github.com/push.rocks/smartai)
|
||||
* [docs (typedoc)](https://push.rocks.gitlab.io/smartai/)
|
||||
Provides a standardized interface for integrating and conversing with multiple AI models, supporting operations like chat, streaming interactions, and audio responses.
|
||||
|
||||
## Status for master
|
||||
## Install
|
||||
|
||||
Status Category | Status Badge
|
||||
-- | --
|
||||
GitLab Pipelines | [](https://lossless.cloud)
|
||||
GitLab Pipline Test Coverage | [](https://lossless.cloud)
|
||||
npm | [](https://lossless.cloud)
|
||||
Snyk | [](https://lossless.cloud)
|
||||
TypeScript Support | [](https://lossless.cloud)
|
||||
node Support | [](https://nodejs.org/dist/latest-v10.x/docs/api/)
|
||||
Code Style | [](https://lossless.cloud)
|
||||
PackagePhobia (total standalone install weight) | [](https://lossless.cloud)
|
||||
PackagePhobia (package size on registry) | [](https://lossless.cloud)
|
||||
BundlePhobia (total size when bundled) | [](https://lossless.cloud)
|
||||
To add @push.rocks/smartai to your project, run the following command in your terminal:
|
||||
|
||||
```bash
|
||||
npm install @push.rocks/smartai
|
||||
```
|
||||
|
||||
This command installs the package and adds it to your project's dependencies.
|
||||
|
||||
## Supported AI Providers
|
||||
|
||||
@push.rocks/smartai supports multiple AI providers, each with its own unique capabilities:
|
||||
|
||||
### OpenAI
|
||||
- Models: GPT-4, GPT-3.5-turbo, GPT-4-vision-preview
|
||||
- Features: Chat, Streaming, Audio Generation, Vision, Document Processing
|
||||
- Configuration:
|
||||
```typescript
|
||||
openaiToken: 'your-openai-token'
|
||||
```
|
||||
|
||||
### Anthropic
|
||||
- Models: Claude-3-opus-20240229
|
||||
- Features: Chat, Streaming, Vision, Document Processing
|
||||
- Configuration:
|
||||
```typescript
|
||||
anthropicToken: 'your-anthropic-token'
|
||||
```
|
||||
|
||||
### Perplexity
|
||||
- Models: Mixtral-8x7b-instruct
|
||||
- Features: Chat, Streaming
|
||||
- Configuration:
|
||||
```typescript
|
||||
perplexityToken: 'your-perplexity-token'
|
||||
```
|
||||
|
||||
### Groq
|
||||
- Models: Llama-3.3-70b-versatile
|
||||
- Features: Chat, Streaming
|
||||
- Configuration:
|
||||
```typescript
|
||||
groqToken: 'your-groq-token'
|
||||
```
|
||||
|
||||
### Ollama
|
||||
- Models: Configurable (default: llama2, llava for vision/documents)
|
||||
- Features: Chat, Streaming, Vision, Document Processing
|
||||
- Configuration:
|
||||
```typescript
|
||||
baseUrl: 'http://localhost:11434' // Optional
|
||||
model: 'llama2' // Optional
|
||||
visionModel: 'llava' // Optional, for vision and document tasks
|
||||
```
|
||||
|
||||
## Usage
|
||||
Use TypeScript for best in class intellisense
|
||||
For further information read the linked docs at the top of this readme.
|
||||
|
||||
## Legal
|
||||
> MIT licensed | **©** [Task Venture Capital GmbH](https://task.vc)
|
||||
| By using this npm module you agree to our [privacy policy](https://lossless.gmbH/privacy)
|
||||
The `@push.rocks/smartai` package is a comprehensive solution for integrating and interacting with various AI models, designed to support operations ranging from chat interactions to audio responses. This documentation will guide you through the process of utilizing `@push.rocks/smartai` in your applications.
|
||||
|
||||
### Getting Started
|
||||
|
||||
Before you begin, ensure you have installed the package as described in the **Install** section above. Once installed, you can start integrating AI functionalities into your application.
|
||||
|
||||
### Initializing SmartAi
|
||||
|
||||
The first step is to import and initialize the `SmartAi` class with appropriate options for the AI services you plan to use:
|
||||
|
||||
```typescript
|
||||
import { SmartAi } from '@push.rocks/smartai';
|
||||
|
||||
const smartAi = new SmartAi({
|
||||
openaiToken: 'your-openai-token',
|
||||
anthropicToken: 'your-anthropic-token',
|
||||
perplexityToken: 'your-perplexity-token',
|
||||
groqToken: 'your-groq-token',
|
||||
ollama: {
|
||||
baseUrl: 'http://localhost:11434',
|
||||
model: 'llama2'
|
||||
}
|
||||
});
|
||||
|
||||
await smartAi.start();
|
||||
```
|
||||
|
||||
### Chat Interactions
|
||||
|
||||
#### Synchronous Chat
|
||||
|
||||
For simple question-answer interactions:
|
||||
|
||||
```typescript
|
||||
const response = await smartAi.openaiProvider.chat({
|
||||
systemMessage: 'You are a helpful assistant.',
|
||||
userMessage: 'What is the capital of France?',
|
||||
messageHistory: [] // Previous messages in the conversation
|
||||
});
|
||||
|
||||
console.log(response.message);
|
||||
```
|
||||
|
||||
#### Streaming Chat
|
||||
|
||||
For real-time, streaming interactions:
|
||||
|
||||
```typescript
|
||||
const textEncoder = new TextEncoder();
|
||||
const textDecoder = new TextDecoder();
|
||||
|
||||
// Create input and output streams
|
||||
const { writable, readable } = new TransformStream();
|
||||
const writer = writable.getWriter();
|
||||
|
||||
// Send a message
|
||||
const message = {
|
||||
role: 'user',
|
||||
content: 'Tell me a story about a brave knight'
|
||||
};
|
||||
|
||||
writer.write(textEncoder.encode(JSON.stringify(message) + '\n'));
|
||||
|
||||
// Process the response stream
|
||||
const stream = await smartAi.openaiProvider.chatStream(readable);
|
||||
const reader = stream.getReader();
|
||||
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
console.log('AI:', value); // Process each chunk of the response
|
||||
}
|
||||
```
|
||||
|
||||
### Audio Generation
|
||||
|
||||
For providers that support audio generation (currently OpenAI):
|
||||
|
||||
```typescript
|
||||
const audioStream = await smartAi.openaiProvider.audio({
|
||||
message: 'Hello, this is a test of text-to-speech'
|
||||
});
|
||||
|
||||
// Handle the audio stream (e.g., save to file or play)
|
||||
```
|
||||
|
||||
### Document Processing
|
||||
|
||||
For providers that support document processing (OpenAI, Ollama, and Anthropic):
|
||||
|
||||
```typescript
|
||||
// Using OpenAI
|
||||
const result = await smartAi.openaiProvider.document({
|
||||
systemMessage: 'Classify the document type',
|
||||
userMessage: 'What type of document is this?',
|
||||
messageHistory: [],
|
||||
pdfDocuments: [pdfBuffer] // Uint8Array of PDF content
|
||||
});
|
||||
|
||||
// Using Ollama with llava
|
||||
const analysis = await smartAi.ollamaProvider.document({
|
||||
systemMessage: 'You are a document analysis assistant',
|
||||
userMessage: 'Extract the key information from this document',
|
||||
messageHistory: [],
|
||||
pdfDocuments: [pdfBuffer] // Uint8Array of PDF content
|
||||
});
|
||||
|
||||
// Using Anthropic with Claude 3
|
||||
const anthropicAnalysis = await smartAi.anthropicProvider.document({
|
||||
systemMessage: 'You are a document analysis assistant',
|
||||
userMessage: 'Please analyze this document and extract key information',
|
||||
messageHistory: [],
|
||||
pdfDocuments: [pdfBuffer] // Uint8Array of PDF content
|
||||
});
|
||||
```
|
||||
|
||||
Both providers will:
|
||||
1. Convert PDF documents to images
|
||||
2. Process each page using their vision models
|
||||
3. Return a comprehensive analysis based on the system message and user query
|
||||
|
||||
### Vision Processing
|
||||
|
||||
For providers that support vision tasks (OpenAI, Ollama, and Anthropic):
|
||||
|
||||
```typescript
|
||||
// Using OpenAI's GPT-4 Vision
|
||||
const description = await smartAi.openaiProvider.vision({
|
||||
image: imageBuffer, // Buffer containing the image data
|
||||
prompt: 'What do you see in this image?'
|
||||
});
|
||||
|
||||
// Using Ollama's Llava model
|
||||
const analysis = await smartAi.ollamaProvider.vision({
|
||||
image: imageBuffer,
|
||||
prompt: 'Analyze this image in detail'
|
||||
});
|
||||
|
||||
// Using Anthropic's Claude 3
|
||||
const anthropicAnalysis = await smartAi.anthropicProvider.vision({
|
||||
image: imageBuffer,
|
||||
prompt: 'Please analyze this image and describe what you see'
|
||||
});
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
All providers implement proper error handling. It's recommended to wrap API calls in try-catch blocks:
|
||||
|
||||
```typescript
|
||||
try {
|
||||
const response = await smartAi.openaiProvider.chat({
|
||||
systemMessage: 'You are a helpful assistant.',
|
||||
userMessage: 'Hello!',
|
||||
messageHistory: []
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('AI provider error:', error.message);
|
||||
}
|
||||
```
|
||||
|
||||
## License and Legal Information
|
||||
|
||||
This repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository.
|
||||
|
||||
**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.
|
||||
|
||||
### Trademarks
|
||||
|
||||
This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.
|
||||
|
||||
### Company Information
|
||||
|
||||
Task Venture Capital GmbH
|
||||
Registered at District court Bremen HRB 35230 HB, Germany
|
||||
|
||||
For any legal inquiries or if you require further information, please contact us via email at hello@task.vc.
|
||||
|
||||
By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.
|
||||
|
84
test/test.ts
84
test/test.ts
@ -1,8 +1,84 @@
|
||||
import { expect, expectAsync, tap } from '@push.rocks/tapbundle';
|
||||
import * as smartai from '../ts/index.js'
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
import * as smartrequest from '@push.rocks/smartrequest';
|
||||
import * as smartfile from '@push.rocks/smartfile';
|
||||
|
||||
tap.test('first test', async () => {
|
||||
console.log(smartai)
|
||||
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||
|
||||
import * as smartai from '../ts/index.js';
|
||||
|
||||
let testSmartai: smartai.SmartAi;
|
||||
|
||||
tap.test('should create a smartai instance', async () => {
|
||||
testSmartai = new smartai.SmartAi({
|
||||
openaiToken: await testQenv.getEnvVarOnDemand('OPENAI_TOKEN'),
|
||||
});
|
||||
await testSmartai.start();
|
||||
});
|
||||
|
||||
tap.test('should create chat response with openai', async () => {
|
||||
const userMessage = 'How are you?';
|
||||
const response = await testSmartai.openaiProvider.chat({
|
||||
systemMessage: 'Hello',
|
||||
userMessage: userMessage,
|
||||
messageHistory: [
|
||||
],
|
||||
});
|
||||
console.log(`userMessage: ${userMessage}`);
|
||||
console.log(response.message);
|
||||
});
|
||||
|
||||
tap.test('should document a pdf', async () => {
|
||||
const pdfUrl = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf';
|
||||
const pdfResponse = await smartrequest.getBinary(pdfUrl);
|
||||
const result = await testSmartai.openaiProvider.document({
|
||||
systemMessage: 'Classify the document. Only the following answers are allowed: "invoice", "bank account statement", "contract", "other". The answer should only contain the keyword for machine use.',
|
||||
userMessage: "Classify the document.",
|
||||
messageHistory: [],
|
||||
pdfDocuments: [pdfResponse.body],
|
||||
});
|
||||
console.log(result);
|
||||
});
|
||||
|
||||
tap.test('should recognize companies in a pdf', async () => {
|
||||
const pdfBuffer = await smartfile.fs.toBuffer('./.nogit/demo_without_textlayer.pdf');
|
||||
const result = await testSmartai.openaiProvider.document({
|
||||
systemMessage: `
|
||||
summarize the document.
|
||||
|
||||
answer in JSON format, adhering to the following schema:
|
||||
\`\`\`typescript
|
||||
type TAnswer = {
|
||||
entitySender: {
|
||||
type: 'official state entity' | 'company' | 'person';
|
||||
name: string;
|
||||
address: string;
|
||||
city: string;
|
||||
country: string;
|
||||
EU: boolean; // wether the entity is within EU
|
||||
};
|
||||
entityReceiver: {
|
||||
type: 'official state entity' | 'company' | 'person';
|
||||
name: string;
|
||||
address: string;
|
||||
city: string;
|
||||
country: string;
|
||||
EU: boolean; // wether the entity is within EU
|
||||
};
|
||||
date: string; // the date of the document as YYYY-MM-DD
|
||||
title: string; // a short title, suitable for a filename
|
||||
}
|
||||
\`\`\`
|
||||
`,
|
||||
userMessage: "Classify the document.",
|
||||
messageHistory: [],
|
||||
pdfDocuments: [pdfBuffer],
|
||||
});
|
||||
console.log(result);
|
||||
})
|
||||
|
||||
tap.start()
|
||||
tap.test('should stop the smartai instance', async () => {
|
||||
await testSmartai.stop();
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
|
@ -1,8 +1,8 @@
|
||||
/**
|
||||
* autocreated commitinfo by @pushrocks/commitinfo
|
||||
* autocreated commitinfo by @push.rocks/commitinfo
|
||||
*/
|
||||
export const commitinfo = {
|
||||
name: '@push.rocks/smartai',
|
||||
version: '0.0.5',
|
||||
description: 'a standardaized interface to talk to AI models'
|
||||
version: '0.2.0',
|
||||
description: 'A TypeScript library for integrating and interacting with multiple AI models, offering capabilities for chat and potentially audio responses.'
|
||||
}
|
||||
|
86
ts/abstract.classes.multimodal.ts
Normal file
86
ts/abstract.classes.multimodal.ts
Normal file
@ -0,0 +1,86 @@
|
||||
/**
|
||||
* Message format for chat interactions
|
||||
*/
|
||||
export interface ChatMessage {
|
||||
role: 'assistant' | 'user' | 'system';
|
||||
content: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Options for chat interactions
|
||||
*/
|
||||
export interface ChatOptions {
|
||||
systemMessage: string;
|
||||
userMessage: string;
|
||||
messageHistory: ChatMessage[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Response format for chat interactions
|
||||
*/
|
||||
export interface ChatResponse {
|
||||
role: 'assistant';
|
||||
message: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Abstract base class for multi-modal AI models.
|
||||
* Provides a common interface for different AI providers (OpenAI, Anthropic, Perplexity, Ollama)
|
||||
*/
|
||||
export abstract class MultiModalModel {
|
||||
/**
|
||||
* Initializes the model and any necessary resources
|
||||
* Should be called before using any other methods
|
||||
*/
|
||||
abstract start(): Promise<void>;
|
||||
|
||||
/**
|
||||
* Cleans up any resources used by the model
|
||||
* Should be called when the model is no longer needed
|
||||
*/
|
||||
abstract stop(): Promise<void>;
|
||||
|
||||
/**
|
||||
* Synchronous chat interaction with the model
|
||||
* @param optionsArg Options containing system message, user message, and message history
|
||||
* @returns Promise resolving to the assistant's response
|
||||
*/
|
||||
public abstract chat(optionsArg: ChatOptions): Promise<ChatResponse>;
|
||||
|
||||
/**
|
||||
* Streaming interface for chat interactions
|
||||
* Allows for real-time responses from the model
|
||||
* @param input Stream of user messages
|
||||
* @returns Stream of model responses
|
||||
*/
|
||||
public abstract chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>>;
|
||||
|
||||
/**
|
||||
* Text-to-speech conversion
|
||||
* @param optionsArg Options containing the message to convert to speech
|
||||
* @returns Promise resolving to a readable stream of audio data
|
||||
* @throws Error if the provider doesn't support audio generation
|
||||
*/
|
||||
public abstract audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream>;
|
||||
|
||||
/**
|
||||
* Vision-language processing
|
||||
* @param optionsArg Options containing the image and prompt for analysis
|
||||
* @returns Promise resolving to the model's description or analysis of the image
|
||||
* @throws Error if the provider doesn't support vision tasks
|
||||
*/
|
||||
public abstract vision(optionsArg: { image: Buffer; prompt: string }): Promise<string>;
|
||||
|
||||
/**
|
||||
* Document analysis and processing
|
||||
* @param optionsArg Options containing system message, user message, PDF documents, and message history
|
||||
* @returns Promise resolving to the model's analysis of the documents
|
||||
* @throws Error if the provider doesn't support document processing
|
||||
*/
|
||||
public abstract document(optionsArg: {
|
||||
systemMessage: string;
|
||||
userMessage: string;
|
||||
pdfDocuments: Uint8Array[];
|
||||
messageHistory: ChatMessage[];
|
||||
}): Promise<{ message: any }>;
|
||||
}
|
116
ts/classes.conversation.ts
Normal file
116
ts/classes.conversation.ts
Normal file
@ -0,0 +1,116 @@
|
||||
import type { SmartAi } from "./classes.smartai.js";
|
||||
import { OpenAiProvider } from "./provider.openai.js";
|
||||
|
||||
type TProcessFunction = (input: string) => Promise<string>;
|
||||
|
||||
export interface IConversationOptions {
|
||||
processFunction: TProcessFunction;
|
||||
}
|
||||
|
||||
/**
|
||||
* a conversation
|
||||
*/
|
||||
export class Conversation {
|
||||
// STATIC
|
||||
public static async createWithOpenAi(smartaiRefArg: SmartAi) {
|
||||
if (!smartaiRefArg.openaiProvider) {
|
||||
throw new Error('OpenAI provider not available');
|
||||
}
|
||||
const conversation = new Conversation(smartaiRefArg, {
|
||||
processFunction: async (input) => {
|
||||
return '' // TODO implement proper streaming
|
||||
}
|
||||
});
|
||||
return conversation;
|
||||
}
|
||||
|
||||
public static async createWithAnthropic(smartaiRefArg: SmartAi) {
|
||||
if (!smartaiRefArg.anthropicProvider) {
|
||||
throw new Error('Anthropic provider not available');
|
||||
}
|
||||
const conversation = new Conversation(smartaiRefArg, {
|
||||
processFunction: async (input) => {
|
||||
return '' // TODO implement proper streaming
|
||||
}
|
||||
});
|
||||
return conversation;
|
||||
}
|
||||
|
||||
public static async createWithPerplexity(smartaiRefArg: SmartAi) {
|
||||
if (!smartaiRefArg.perplexityProvider) {
|
||||
throw new Error('Perplexity provider not available');
|
||||
}
|
||||
const conversation = new Conversation(smartaiRefArg, {
|
||||
processFunction: async (input) => {
|
||||
return '' // TODO implement proper streaming
|
||||
}
|
||||
});
|
||||
return conversation;
|
||||
}
|
||||
|
||||
public static async createWithOllama(smartaiRefArg: SmartAi) {
|
||||
if (!smartaiRefArg.ollamaProvider) {
|
||||
throw new Error('Ollama provider not available');
|
||||
}
|
||||
const conversation = new Conversation(smartaiRefArg, {
|
||||
processFunction: async (input) => {
|
||||
return '' // TODO implement proper streaming
|
||||
}
|
||||
});
|
||||
return conversation;
|
||||
}
|
||||
|
||||
// INSTANCE
|
||||
smartaiRef: SmartAi
|
||||
private systemMessage: string;
|
||||
private processFunction: TProcessFunction;
|
||||
private inputStreamWriter: WritableStreamDefaultWriter<string> | null = null;
|
||||
private outputStreamController: ReadableStreamDefaultController<string> | null = null;
|
||||
|
||||
constructor(smartairefArg: SmartAi, options: IConversationOptions) {
|
||||
this.processFunction = options.processFunction;
|
||||
}
|
||||
|
||||
public async setSystemMessage(systemMessageArg: string) {
|
||||
this.systemMessage = systemMessageArg;
|
||||
}
|
||||
|
||||
private setupOutputStream(): ReadableStream<string> {
|
||||
return new ReadableStream<string>({
|
||||
start: (controller) => {
|
||||
this.outputStreamController = controller;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private setupInputStream(): WritableStream<string> {
|
||||
const writableStream = new WritableStream<string>({
|
||||
write: async (chunk) => {
|
||||
const processedData = await this.processFunction(chunk);
|
||||
if (this.outputStreamController) {
|
||||
this.outputStreamController.enqueue(processedData);
|
||||
}
|
||||
},
|
||||
close: () => {
|
||||
this.outputStreamController?.close();
|
||||
},
|
||||
abort: (err) => {
|
||||
console.error('Stream aborted', err);
|
||||
this.outputStreamController?.error(err);
|
||||
}
|
||||
});
|
||||
return writableStream;
|
||||
}
|
||||
|
||||
public getInputStreamWriter(): WritableStreamDefaultWriter<string> {
|
||||
if (!this.inputStreamWriter) {
|
||||
const inputStream = this.setupInputStream();
|
||||
this.inputStreamWriter = inputStream.getWriter();
|
||||
}
|
||||
return this.inputStreamWriter;
|
||||
}
|
||||
|
||||
public getOutputStream(): ReadableStream<string> {
|
||||
return this.setupOutputStream();
|
||||
}
|
||||
}
|
62
ts/classes.smartai.ts
Normal file
62
ts/classes.smartai.ts
Normal file
@ -0,0 +1,62 @@
|
||||
import { Conversation } from './classes.conversation.js';
|
||||
import * as plugins from './plugins.js';
|
||||
import { AnthropicProvider } from './provider.anthropic.js';
|
||||
import type { OllamaProvider } from './provider.ollama.js';
|
||||
import { OpenAiProvider } from './provider.openai.js';
|
||||
import type { PerplexityProvider } from './provider.perplexity.js';
|
||||
|
||||
|
||||
export interface ISmartAiOptions {
|
||||
openaiToken?: string;
|
||||
anthropicToken?: string;
|
||||
perplexityToken?: string;
|
||||
}
|
||||
|
||||
export type TProvider = 'openai' | 'anthropic' | 'perplexity' | 'ollama';
|
||||
|
||||
export class SmartAi {
|
||||
public options: ISmartAiOptions;
|
||||
|
||||
public openaiProvider: OpenAiProvider;
|
||||
public anthropicProvider: AnthropicProvider;
|
||||
public perplexityProvider: PerplexityProvider;
|
||||
public ollamaProvider: OllamaProvider;
|
||||
|
||||
constructor(optionsArg: ISmartAiOptions) {
|
||||
this.options = optionsArg;
|
||||
}
|
||||
|
||||
public async start() {
|
||||
if (this.options.openaiToken) {
|
||||
this.openaiProvider = new OpenAiProvider({
|
||||
openaiToken: this.options.openaiToken,
|
||||
});
|
||||
await this.openaiProvider.start();
|
||||
}
|
||||
if (this.options.anthropicToken) {
|
||||
this.anthropicProvider = new AnthropicProvider({
|
||||
anthropicToken: this.options.anthropicToken,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public async stop() {}
|
||||
|
||||
/**
|
||||
* create a new conversation
|
||||
*/
|
||||
createConversation(provider: TProvider) {
|
||||
switch (provider) {
|
||||
case 'openai':
|
||||
return Conversation.createWithOpenAi(this);
|
||||
case 'anthropic':
|
||||
return Conversation.createWithAnthropic(this);
|
||||
case 'perplexity':
|
||||
return Conversation.createWithPerplexity(this);
|
||||
case 'ollama':
|
||||
return Conversation.createWithOllama(this);
|
||||
default:
|
||||
throw new Error('Provider not available');
|
||||
}
|
||||
}
|
||||
}
|
15
ts/classes.tts.ts
Normal file
15
ts/classes.tts.ts
Normal file
@ -0,0 +1,15 @@
|
||||
import type { SmartAi } from './classes.smartai.js';
|
||||
import * as plugins from './plugins.js';
|
||||
|
||||
export class TTS {
|
||||
public static async createWithOpenAi(smartaiRef: SmartAi): Promise<TTS> {
|
||||
return new TTS(smartaiRef);
|
||||
}
|
||||
|
||||
// INSTANCE
|
||||
smartaiRef: SmartAi;
|
||||
|
||||
constructor(smartairefArg: SmartAi) {
|
||||
this.smartaiRef = smartairefArg;
|
||||
}
|
||||
}
|
@ -1 +1,3 @@
|
||||
export * from './smartai.classes.smartai.js';
|
||||
export * from './classes.smartai.js';
|
||||
export * from './abstract.classes.multimodal.js';
|
||||
export * from './provider.openai.js';
|
||||
|
0
ts/interfaces.ts
Normal file
0
ts/interfaces.ts
Normal file
4
ts/paths.ts
Normal file
4
ts/paths.ts
Normal file
@ -0,0 +1,4 @@
|
||||
import * as plugins from './plugins.js';
|
||||
|
||||
export const packageDir = plugins.path.join(plugins.smartpath.get.dirnameFromImportMetaUrl(import.meta.url), '../');
|
||||
export const nogitDir = plugins.path.join(packageDir, './.nogit');
|
36
ts/plugins.ts
Normal file
36
ts/plugins.ts
Normal file
@ -0,0 +1,36 @@
|
||||
// node native
|
||||
import * as path from 'path';
|
||||
|
||||
export {
|
||||
path,
|
||||
}
|
||||
|
||||
// @push.rocks scope
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
import * as smartarray from '@push.rocks/smartarray';
|
||||
import * as smartfile from '@push.rocks/smartfile';
|
||||
import * as smartpath from '@push.rocks/smartpath';
|
||||
import * as smartpdf from '@push.rocks/smartpdf';
|
||||
import * as smartpromise from '@push.rocks/smartpromise';
|
||||
import * as smartrequest from '@push.rocks/smartrequest';
|
||||
import * as webstream from '@push.rocks/webstream';
|
||||
|
||||
export {
|
||||
smartarray,
|
||||
qenv,
|
||||
smartfile,
|
||||
smartpath,
|
||||
smartpdf,
|
||||
smartpromise,
|
||||
smartrequest,
|
||||
webstream,
|
||||
}
|
||||
|
||||
// third party
|
||||
import * as anthropic from '@anthropic-ai/sdk';
|
||||
import * as openai from 'openai';
|
||||
|
||||
export {
|
||||
anthropic,
|
||||
openai,
|
||||
}
|
240
ts/provider.anthropic.ts
Normal file
240
ts/provider.anthropic.ts
Normal file
@ -0,0 +1,240 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import * as paths from './paths.js';
|
||||
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
||||
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js';
|
||||
import type { ImageBlockParam, TextBlockParam } from '@anthropic-ai/sdk/resources/messages';
|
||||
|
||||
type ContentBlock = ImageBlockParam | TextBlockParam;
|
||||
|
||||
export interface IAnthropicProviderOptions {
|
||||
anthropicToken: string;
|
||||
}
|
||||
|
||||
export class AnthropicProvider extends MultiModalModel {
|
||||
private options: IAnthropicProviderOptions;
|
||||
public anthropicApiClient: plugins.anthropic.default;
|
||||
|
||||
constructor(optionsArg: IAnthropicProviderOptions) {
|
||||
super();
|
||||
this.options = optionsArg // Ensure the token is stored
|
||||
}
|
||||
|
||||
async start() {
|
||||
this.anthropicApiClient = new plugins.anthropic.default({
|
||||
apiKey: this.options.anthropicToken,
|
||||
});
|
||||
}
|
||||
|
||||
async stop() {}
|
||||
|
||||
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||
// Create a TextDecoder to handle incoming chunks
|
||||
const decoder = new TextDecoder();
|
||||
let buffer = '';
|
||||
let currentMessage: { role: string; content: string; } | null = null;
|
||||
|
||||
// Create a TransformStream to process the input
|
||||
const transform = new TransformStream<Uint8Array, string>({
|
||||
async transform(chunk, controller) {
|
||||
buffer += decoder.decode(chunk, { stream: true });
|
||||
|
||||
// Try to parse complete JSON messages from the buffer
|
||||
while (true) {
|
||||
const newlineIndex = buffer.indexOf('\n');
|
||||
if (newlineIndex === -1) break;
|
||||
|
||||
const line = buffer.slice(0, newlineIndex);
|
||||
buffer = buffer.slice(newlineIndex + 1);
|
||||
|
||||
if (line.trim()) {
|
||||
try {
|
||||
const message = JSON.parse(line);
|
||||
currentMessage = {
|
||||
role: message.role || 'user',
|
||||
content: message.content || '',
|
||||
};
|
||||
} catch (e) {
|
||||
console.error('Failed to parse message:', e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we have a complete message, send it to Anthropic
|
||||
if (currentMessage) {
|
||||
const stream = await this.anthropicApiClient.messages.create({
|
||||
model: 'claude-3-opus-20240229',
|
||||
messages: [{ role: currentMessage.role, content: currentMessage.content }],
|
||||
system: '',
|
||||
stream: true,
|
||||
max_tokens: 4000,
|
||||
});
|
||||
|
||||
// Process each chunk from Anthropic
|
||||
for await (const chunk of stream) {
|
||||
const content = chunk.delta?.text;
|
||||
if (content) {
|
||||
controller.enqueue(content);
|
||||
}
|
||||
}
|
||||
|
||||
currentMessage = null;
|
||||
}
|
||||
},
|
||||
|
||||
flush(controller) {
|
||||
if (buffer) {
|
||||
try {
|
||||
const message = JSON.parse(buffer);
|
||||
controller.enqueue(message.content || '');
|
||||
} catch (e) {
|
||||
console.error('Failed to parse remaining buffer:', e);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Connect the input to our transform stream
|
||||
return input.pipeThrough(transform);
|
||||
}
|
||||
|
||||
// Implementing the synchronous chat interaction
|
||||
public async chat(optionsArg: ChatOptions): Promise<ChatResponse> {
|
||||
// Convert message history to Anthropic format
|
||||
const messages = optionsArg.messageHistory.map(msg => ({
|
||||
role: msg.role === 'assistant' ? 'assistant' as const : 'user' as const,
|
||||
content: msg.content
|
||||
}));
|
||||
|
||||
const result = await this.anthropicApiClient.messages.create({
|
||||
model: 'claude-3-opus-20240229',
|
||||
system: optionsArg.systemMessage,
|
||||
messages: [
|
||||
...messages,
|
||||
{ role: 'user' as const, content: optionsArg.userMessage }
|
||||
],
|
||||
max_tokens: 4000,
|
||||
});
|
||||
|
||||
// Extract text content from the response
|
||||
let message = '';
|
||||
for (const block of result.content) {
|
||||
if ('text' in block) {
|
||||
message += block.text;
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
role: 'assistant' as const,
|
||||
message,
|
||||
};
|
||||
}
|
||||
|
||||
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
||||
// Anthropic does not provide an audio API, so this method is not implemented.
|
||||
throw new Error('Audio generation is not yet supported by Anthropic.');
|
||||
}
|
||||
|
||||
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
|
||||
const base64Image = optionsArg.image.toString('base64');
|
||||
|
||||
const content: ContentBlock[] = [
|
||||
{
|
||||
type: 'text',
|
||||
text: optionsArg.prompt
|
||||
},
|
||||
{
|
||||
type: 'image',
|
||||
source: {
|
||||
type: 'base64',
|
||||
media_type: 'image/jpeg',
|
||||
data: base64Image
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
const result = await this.anthropicApiClient.messages.create({
|
||||
model: 'claude-3-opus-20240229',
|
||||
messages: [{
|
||||
role: 'user',
|
||||
content
|
||||
}],
|
||||
max_tokens: 1024
|
||||
});
|
||||
|
||||
// Extract text content from the response
|
||||
let message = '';
|
||||
for (const block of result.content) {
|
||||
if ('text' in block) {
|
||||
message += block.text;
|
||||
}
|
||||
}
|
||||
return message;
|
||||
}
|
||||
|
||||
public async document(optionsArg: {
|
||||
systemMessage: string;
|
||||
userMessage: string;
|
||||
pdfDocuments: Uint8Array[];
|
||||
messageHistory: ChatMessage[];
|
||||
}): Promise<{ message: any }> {
|
||||
// Convert PDF documents to images using SmartPDF
|
||||
const smartpdfInstance = new plugins.smartpdf.SmartPdf();
|
||||
let documentImageBytesArray: Uint8Array[] = [];
|
||||
|
||||
for (const pdfDocument of optionsArg.pdfDocuments) {
|
||||
const documentImageArray = await smartpdfInstance.convertPDFToPngBytes(pdfDocument);
|
||||
documentImageBytesArray = documentImageBytesArray.concat(documentImageArray);
|
||||
}
|
||||
|
||||
// Convert message history to Anthropic format
|
||||
const messages = optionsArg.messageHistory.map(msg => ({
|
||||
role: msg.role === 'assistant' ? 'assistant' as const : 'user' as const,
|
||||
content: msg.content
|
||||
}));
|
||||
|
||||
// Create content array with text and images
|
||||
const content: ContentBlock[] = [
|
||||
{
|
||||
type: 'text',
|
||||
text: optionsArg.userMessage
|
||||
}
|
||||
];
|
||||
|
||||
// Add each document page as an image
|
||||
for (const imageBytes of documentImageBytesArray) {
|
||||
content.push({
|
||||
type: 'image',
|
||||
source: {
|
||||
type: 'base64',
|
||||
media_type: 'image/jpeg',
|
||||
data: Buffer.from(imageBytes).toString('base64')
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
const result = await this.anthropicApiClient.messages.create({
|
||||
model: 'claude-3-opus-20240229',
|
||||
system: optionsArg.systemMessage,
|
||||
messages: [
|
||||
...messages,
|
||||
{ role: 'user', content }
|
||||
],
|
||||
max_tokens: 4096
|
||||
});
|
||||
|
||||
// Extract text content from the response
|
||||
let message = '';
|
||||
for (const block of result.content) {
|
||||
if ('text' in block) {
|
||||
message += block.text;
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: message
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
192
ts/provider.groq.ts
Normal file
192
ts/provider.groq.ts
Normal file
@ -0,0 +1,192 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import * as paths from './paths.js';
|
||||
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
||||
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js';
|
||||
|
||||
export interface IGroqProviderOptions {
|
||||
groqToken: string;
|
||||
model?: string;
|
||||
}
|
||||
|
||||
export class GroqProvider extends MultiModalModel {
|
||||
private options: IGroqProviderOptions;
|
||||
private baseUrl = 'https://api.groq.com/v1';
|
||||
|
||||
constructor(optionsArg: IGroqProviderOptions) {
|
||||
super();
|
||||
this.options = {
|
||||
...optionsArg,
|
||||
model: optionsArg.model || 'llama-3.3-70b-versatile', // Default model
|
||||
};
|
||||
}
|
||||
|
||||
async start() {}
|
||||
|
||||
async stop() {}
|
||||
|
||||
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||
// Create a TextDecoder to handle incoming chunks
|
||||
const decoder = new TextDecoder();
|
||||
let buffer = '';
|
||||
let currentMessage: { role: string; content: string; } | null = null;
|
||||
|
||||
// Create a TransformStream to process the input
|
||||
const transform = new TransformStream<Uint8Array, string>({
|
||||
async transform(chunk, controller) {
|
||||
buffer += decoder.decode(chunk, { stream: true });
|
||||
|
||||
// Try to parse complete JSON messages from the buffer
|
||||
while (true) {
|
||||
const newlineIndex = buffer.indexOf('\n');
|
||||
if (newlineIndex === -1) break;
|
||||
|
||||
const line = buffer.slice(0, newlineIndex);
|
||||
buffer = buffer.slice(newlineIndex + 1);
|
||||
|
||||
if (line.trim()) {
|
||||
try {
|
||||
const message = JSON.parse(line);
|
||||
currentMessage = {
|
||||
role: message.role || 'user',
|
||||
content: message.content || '',
|
||||
};
|
||||
} catch (e) {
|
||||
console.error('Failed to parse message:', e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we have a complete message, send it to Groq
|
||||
if (currentMessage) {
|
||||
const response = await fetch(`${this.baseUrl}/chat/completions`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${this.options.groqToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: this.options.model,
|
||||
messages: [{ role: currentMessage.role, content: currentMessage.content }],
|
||||
stream: true,
|
||||
}),
|
||||
});
|
||||
|
||||
// Process each chunk from Groq
|
||||
const reader = response.body?.getReader();
|
||||
if (reader) {
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = new TextDecoder().decode(value);
|
||||
const lines = chunk.split('\n');
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('data: ')) {
|
||||
const data = line.slice(6);
|
||||
if (data === '[DONE]') break;
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(data);
|
||||
const content = parsed.choices[0]?.delta?.content;
|
||||
if (content) {
|
||||
controller.enqueue(content);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error('Failed to parse SSE data:', e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
reader.releaseLock();
|
||||
}
|
||||
}
|
||||
|
||||
currentMessage = null;
|
||||
}
|
||||
},
|
||||
|
||||
flush(controller) {
|
||||
if (buffer) {
|
||||
try {
|
||||
const message = JSON.parse(buffer);
|
||||
controller.enqueue(message.content || '');
|
||||
} catch (e) {
|
||||
console.error('Failed to parse remaining buffer:', e);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Connect the input to our transform stream
|
||||
return input.pipeThrough(transform);
|
||||
}
|
||||
|
||||
// Implementing the synchronous chat interaction
|
||||
public async chat(optionsArg: ChatOptions): Promise<ChatResponse> {
|
||||
const messages = [
|
||||
// System message
|
||||
{
|
||||
role: 'system',
|
||||
content: optionsArg.systemMessage,
|
||||
},
|
||||
// Message history
|
||||
...optionsArg.messageHistory.map(msg => ({
|
||||
role: msg.role,
|
||||
content: msg.content,
|
||||
})),
|
||||
// User message
|
||||
{
|
||||
role: 'user',
|
||||
content: optionsArg.userMessage,
|
||||
},
|
||||
];
|
||||
|
||||
const response = await fetch(`${this.baseUrl}/chat/completions`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${this.options.groqToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: this.options.model,
|
||||
messages,
|
||||
temperature: 0.7,
|
||||
max_completion_tokens: 1024,
|
||||
stream: false,
|
||||
}),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const error = await response.json();
|
||||
throw new Error(`Groq API error: ${error.message || response.statusText}`);
|
||||
}
|
||||
|
||||
const result = await response.json();
|
||||
|
||||
return {
|
||||
role: 'assistant',
|
||||
message: result.choices[0].message.content,
|
||||
};
|
||||
}
|
||||
|
||||
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
||||
// Groq does not provide an audio API, so this method is not implemented.
|
||||
throw new Error('Audio generation is not yet supported by Groq.');
|
||||
}
|
||||
|
||||
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
|
||||
throw new Error('Vision tasks are not yet supported by Groq.');
|
||||
}
|
||||
|
||||
public async document(optionsArg: {
|
||||
systemMessage: string;
|
||||
userMessage: string;
|
||||
pdfDocuments: Uint8Array[];
|
||||
messageHistory: ChatMessage[];
|
||||
}): Promise<{ message: any }> {
|
||||
throw new Error('Document processing is not yet supported by Groq.');
|
||||
}
|
||||
}
|
252
ts/provider.ollama.ts
Normal file
252
ts/provider.ollama.ts
Normal file
@ -0,0 +1,252 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import * as paths from './paths.js';
|
||||
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
||||
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js';
|
||||
|
||||
export interface IOllamaProviderOptions {
|
||||
baseUrl?: string;
|
||||
model?: string;
|
||||
visionModel?: string; // Model to use for vision tasks (e.g. 'llava')
|
||||
}
|
||||
|
||||
export class OllamaProvider extends MultiModalModel {
|
||||
private options: IOllamaProviderOptions;
|
||||
private baseUrl: string;
|
||||
private model: string;
|
||||
private visionModel: string;
|
||||
|
||||
constructor(optionsArg: IOllamaProviderOptions = {}) {
|
||||
super();
|
||||
this.options = optionsArg;
|
||||
this.baseUrl = optionsArg.baseUrl || 'http://localhost:11434';
|
||||
this.model = optionsArg.model || 'llama2';
|
||||
this.visionModel = optionsArg.visionModel || 'llava';
|
||||
}
|
||||
|
||||
async start() {
|
||||
// Verify Ollama is running
|
||||
try {
|
||||
const response = await fetch(`${this.baseUrl}/api/tags`);
|
||||
if (!response.ok) {
|
||||
throw new Error('Failed to connect to Ollama server');
|
||||
}
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to connect to Ollama server at ${this.baseUrl}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
async stop() {}
|
||||
|
||||
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||
// Create a TextDecoder to handle incoming chunks
|
||||
const decoder = new TextDecoder();
|
||||
let buffer = '';
|
||||
let currentMessage: { role: string; content: string; } | null = null;
|
||||
|
||||
// Create a TransformStream to process the input
|
||||
const transform = new TransformStream<Uint8Array, string>({
|
||||
async transform(chunk, controller) {
|
||||
buffer += decoder.decode(chunk, { stream: true });
|
||||
|
||||
// Try to parse complete JSON messages from the buffer
|
||||
while (true) {
|
||||
const newlineIndex = buffer.indexOf('\n');
|
||||
if (newlineIndex === -1) break;
|
||||
|
||||
const line = buffer.slice(0, newlineIndex);
|
||||
buffer = buffer.slice(newlineIndex + 1);
|
||||
|
||||
if (line.trim()) {
|
||||
try {
|
||||
const message = JSON.parse(line);
|
||||
currentMessage = {
|
||||
role: message.role || 'user',
|
||||
content: message.content || '',
|
||||
};
|
||||
} catch (e) {
|
||||
console.error('Failed to parse message:', e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we have a complete message, send it to Ollama
|
||||
if (currentMessage) {
|
||||
const response = await fetch(`${this.baseUrl}/api/chat`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: this.model,
|
||||
messages: [{ role: currentMessage.role, content: currentMessage.content }],
|
||||
stream: true,
|
||||
}),
|
||||
});
|
||||
|
||||
// Process each chunk from Ollama
|
||||
const reader = response.body?.getReader();
|
||||
if (reader) {
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = new TextDecoder().decode(value);
|
||||
const lines = chunk.split('\n');
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.trim()) {
|
||||
try {
|
||||
const parsed = JSON.parse(line);
|
||||
const content = parsed.message?.content;
|
||||
if (content) {
|
||||
controller.enqueue(content);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error('Failed to parse Ollama response:', e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
reader.releaseLock();
|
||||
}
|
||||
}
|
||||
|
||||
currentMessage = null;
|
||||
}
|
||||
},
|
||||
|
||||
flush(controller) {
|
||||
if (buffer) {
|
||||
try {
|
||||
const message = JSON.parse(buffer);
|
||||
controller.enqueue(message.content || '');
|
||||
} catch (e) {
|
||||
console.error('Failed to parse remaining buffer:', e);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Connect the input to our transform stream
|
||||
return input.pipeThrough(transform);
|
||||
}
|
||||
|
||||
// Implementing the synchronous chat interaction
|
||||
public async chat(optionsArg: ChatOptions): Promise<ChatResponse> {
|
||||
// Format messages for Ollama
|
||||
const messages = [
|
||||
{ role: 'system', content: optionsArg.systemMessage },
|
||||
...optionsArg.messageHistory,
|
||||
{ role: 'user', content: optionsArg.userMessage }
|
||||
];
|
||||
|
||||
// Make API call to Ollama
|
||||
const response = await fetch(`${this.baseUrl}/api/chat`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: this.model,
|
||||
messages: messages,
|
||||
stream: false
|
||||
}),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Ollama API error: ${response.statusText}`);
|
||||
}
|
||||
|
||||
const result = await response.json();
|
||||
|
||||
return {
|
||||
role: 'assistant' as const,
|
||||
message: result.message.content,
|
||||
};
|
||||
}
|
||||
|
||||
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
||||
throw new Error('Audio generation is not supported by Ollama.');
|
||||
}
|
||||
|
||||
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
|
||||
const base64Image = optionsArg.image.toString('base64');
|
||||
|
||||
const response = await fetch(`${this.baseUrl}/api/chat`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: this.visionModel,
|
||||
messages: [{
|
||||
role: 'user',
|
||||
content: optionsArg.prompt,
|
||||
images: [base64Image]
|
||||
}],
|
||||
stream: false
|
||||
}),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Ollama API error: ${response.statusText}`);
|
||||
}
|
||||
|
||||
const result = await response.json();
|
||||
return result.message.content;
|
||||
}
|
||||
|
||||
public async document(optionsArg: {
|
||||
systemMessage: string;
|
||||
userMessage: string;
|
||||
pdfDocuments: Uint8Array[];
|
||||
messageHistory: ChatMessage[];
|
||||
}): Promise<{ message: any }> {
|
||||
// Convert PDF documents to images using SmartPDF
|
||||
const smartpdfInstance = new plugins.smartpdf.SmartPdf();
|
||||
let documentImageBytesArray: Uint8Array[] = [];
|
||||
|
||||
for (const pdfDocument of optionsArg.pdfDocuments) {
|
||||
const documentImageArray = await smartpdfInstance.convertPDFToPngBytes(pdfDocument);
|
||||
documentImageBytesArray = documentImageBytesArray.concat(documentImageArray);
|
||||
}
|
||||
|
||||
// Convert images to base64
|
||||
const base64Images = documentImageBytesArray.map(bytes => Buffer.from(bytes).toString('base64'));
|
||||
|
||||
// Send request to Ollama with images
|
||||
const response = await fetch(`${this.baseUrl}/api/chat`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: this.visionModel,
|
||||
messages: [
|
||||
{ role: 'system', content: optionsArg.systemMessage },
|
||||
...optionsArg.messageHistory,
|
||||
{
|
||||
role: 'user',
|
||||
content: optionsArg.userMessage,
|
||||
images: base64Images
|
||||
}
|
||||
],
|
||||
stream: false
|
||||
}),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Ollama API error: ${response.statusText}`);
|
||||
}
|
||||
|
||||
const result = await response.json();
|
||||
return {
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: result.message.content
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
218
ts/provider.openai.ts
Normal file
218
ts/provider.openai.ts
Normal file
@ -0,0 +1,218 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import * as paths from './paths.js';
|
||||
|
||||
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
||||
|
||||
export interface IOpenaiProviderOptions {
|
||||
openaiToken: string;
|
||||
}
|
||||
|
||||
export class OpenAiProvider extends MultiModalModel {
|
||||
private options: IOpenaiProviderOptions;
|
||||
public openAiApiClient: plugins.openai.default;
|
||||
public smartpdfInstance: plugins.smartpdf.SmartPdf;
|
||||
|
||||
constructor(optionsArg: IOpenaiProviderOptions) {
|
||||
super();
|
||||
this.options = optionsArg;
|
||||
}
|
||||
|
||||
public async start() {
|
||||
this.openAiApiClient = new plugins.openai.default({
|
||||
apiKey: this.options.openaiToken,
|
||||
dangerouslyAllowBrowser: true,
|
||||
});
|
||||
this.smartpdfInstance = new plugins.smartpdf.SmartPdf();
|
||||
}
|
||||
|
||||
public async stop() {}
|
||||
|
||||
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||
// Create a TextDecoder to handle incoming chunks
|
||||
const decoder = new TextDecoder();
|
||||
let buffer = '';
|
||||
let currentMessage: { role: string; content: string; } | null = null;
|
||||
|
||||
// Create a TransformStream to process the input
|
||||
const transform = new TransformStream<Uint8Array, string>({
|
||||
async transform(chunk, controller) {
|
||||
buffer += decoder.decode(chunk, { stream: true });
|
||||
|
||||
// Try to parse complete JSON messages from the buffer
|
||||
while (true) {
|
||||
const newlineIndex = buffer.indexOf('\n');
|
||||
if (newlineIndex === -1) break;
|
||||
|
||||
const line = buffer.slice(0, newlineIndex);
|
||||
buffer = buffer.slice(newlineIndex + 1);
|
||||
|
||||
if (line.trim()) {
|
||||
try {
|
||||
const message = JSON.parse(line);
|
||||
currentMessage = {
|
||||
role: message.role || 'user',
|
||||
content: message.content || '',
|
||||
};
|
||||
} catch (e) {
|
||||
console.error('Failed to parse message:', e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we have a complete message, send it to OpenAI
|
||||
if (currentMessage) {
|
||||
const stream = await this.openAiApiClient.chat.completions.create({
|
||||
model: 'gpt-4',
|
||||
messages: [{ role: currentMessage.role, content: currentMessage.content }],
|
||||
stream: true,
|
||||
});
|
||||
|
||||
// Process each chunk from OpenAI
|
||||
for await (const chunk of stream) {
|
||||
const content = chunk.choices[0]?.delta?.content;
|
||||
if (content) {
|
||||
controller.enqueue(content);
|
||||
}
|
||||
}
|
||||
|
||||
currentMessage = null;
|
||||
}
|
||||
},
|
||||
|
||||
flush(controller) {
|
||||
if (buffer) {
|
||||
try {
|
||||
const message = JSON.parse(buffer);
|
||||
controller.enqueue(message.content || '');
|
||||
} catch (e) {
|
||||
console.error('Failed to parse remaining buffer:', e);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Connect the input to our transform stream
|
||||
return input.pipeThrough(transform);
|
||||
}
|
||||
|
||||
// Implementing the synchronous chat interaction
|
||||
public async chat(optionsArg: {
|
||||
systemMessage: string;
|
||||
userMessage: string;
|
||||
messageHistory: {
|
||||
role: 'assistant' | 'user';
|
||||
content: string;
|
||||
}[];
|
||||
}) {
|
||||
const result = await this.openAiApiClient.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
|
||||
messages: [
|
||||
{ role: 'system', content: optionsArg.systemMessage },
|
||||
...optionsArg.messageHistory,
|
||||
{ role: 'user', content: optionsArg.userMessage },
|
||||
],
|
||||
});
|
||||
return {
|
||||
role: result.choices[0].message.role as 'assistant',
|
||||
message: result.choices[0].message.content,
|
||||
};
|
||||
}
|
||||
|
||||
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
||||
const done = plugins.smartpromise.defer<NodeJS.ReadableStream>();
|
||||
const result = await this.openAiApiClient.audio.speech.create({
|
||||
model: 'tts-1-hd',
|
||||
input: optionsArg.message,
|
||||
voice: 'nova',
|
||||
response_format: 'mp3',
|
||||
speed: 1,
|
||||
});
|
||||
const stream = result.body;
|
||||
done.resolve(stream);
|
||||
return done.promise;
|
||||
}
|
||||
|
||||
public async document(optionsArg: {
|
||||
systemMessage: string;
|
||||
userMessage: string;
|
||||
pdfDocuments: Uint8Array[];
|
||||
messageHistory: {
|
||||
role: 'assistant' | 'user';
|
||||
content: any;
|
||||
}[];
|
||||
}) {
|
||||
let pdfDocumentImageBytesArray: Uint8Array[] = [];
|
||||
|
||||
for (const pdfDocument of optionsArg.pdfDocuments) {
|
||||
const documentImageArray = await this.smartpdfInstance.convertPDFToPngBytes(pdfDocument);
|
||||
pdfDocumentImageBytesArray = pdfDocumentImageBytesArray.concat(documentImageArray);
|
||||
}
|
||||
|
||||
console.log(`image smartfile array`);
|
||||
console.log(pdfDocumentImageBytesArray.map((smartfile) => smartfile.length));
|
||||
|
||||
const smartfileArray = await plugins.smartarray.map(
|
||||
pdfDocumentImageBytesArray,
|
||||
async (pdfDocumentImageBytes) => {
|
||||
return plugins.smartfile.SmartFile.fromBuffer(
|
||||
'pdfDocumentImage.jpg',
|
||||
Buffer.from(pdfDocumentImageBytes)
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
const result = await this.openAiApiClient.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
// response_format: { type: "json_object" }, // not supported for now
|
||||
messages: [
|
||||
{ role: 'system', content: optionsArg.systemMessage },
|
||||
...optionsArg.messageHistory,
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: optionsArg.userMessage },
|
||||
...(() => {
|
||||
const returnArray = [];
|
||||
for (const imageBytes of pdfDocumentImageBytesArray) {
|
||||
returnArray.push({
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: 'data:image/png;base64,' + Buffer.from(imageBytes).toString('base64'),
|
||||
},
|
||||
});
|
||||
}
|
||||
return returnArray;
|
||||
})(),
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
return {
|
||||
message: result.choices[0].message,
|
||||
};
|
||||
}
|
||||
|
||||
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
|
||||
const result = await this.openAiApiClient.chat.completions.create({
|
||||
model: 'gpt-4-vision-preview',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: optionsArg.prompt },
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: `data:image/jpeg;base64,${optionsArg.image.toString('base64')}`
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
max_tokens: 300
|
||||
});
|
||||
|
||||
return result.choices[0].message.content || '';
|
||||
}
|
||||
}
|
171
ts/provider.perplexity.ts
Normal file
171
ts/provider.perplexity.ts
Normal file
@ -0,0 +1,171 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import * as paths from './paths.js';
|
||||
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
||||
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js';
|
||||
|
||||
export interface IPerplexityProviderOptions {
|
||||
perplexityToken: string;
|
||||
}
|
||||
|
||||
export class PerplexityProvider extends MultiModalModel {
|
||||
private options: IPerplexityProviderOptions;
|
||||
|
||||
constructor(optionsArg: IPerplexityProviderOptions) {
|
||||
super();
|
||||
this.options = optionsArg;
|
||||
}
|
||||
|
||||
async start() {
|
||||
// Initialize any necessary clients or resources
|
||||
}
|
||||
|
||||
async stop() {}
|
||||
|
||||
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||
// Create a TextDecoder to handle incoming chunks
|
||||
const decoder = new TextDecoder();
|
||||
let buffer = '';
|
||||
let currentMessage: { role: string; content: string; } | null = null;
|
||||
|
||||
// Create a TransformStream to process the input
|
||||
const transform = new TransformStream<Uint8Array, string>({
|
||||
async transform(chunk, controller) {
|
||||
buffer += decoder.decode(chunk, { stream: true });
|
||||
|
||||
// Try to parse complete JSON messages from the buffer
|
||||
while (true) {
|
||||
const newlineIndex = buffer.indexOf('\n');
|
||||
if (newlineIndex === -1) break;
|
||||
|
||||
const line = buffer.slice(0, newlineIndex);
|
||||
buffer = buffer.slice(newlineIndex + 1);
|
||||
|
||||
if (line.trim()) {
|
||||
try {
|
||||
const message = JSON.parse(line);
|
||||
currentMessage = {
|
||||
role: message.role || 'user',
|
||||
content: message.content || '',
|
||||
};
|
||||
} catch (e) {
|
||||
console.error('Failed to parse message:', e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we have a complete message, send it to Perplexity
|
||||
if (currentMessage) {
|
||||
const response = await fetch('https://api.perplexity.ai/chat/completions', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${this.options.perplexityToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: 'mixtral-8x7b-instruct',
|
||||
messages: [{ role: currentMessage.role, content: currentMessage.content }],
|
||||
stream: true,
|
||||
}),
|
||||
});
|
||||
|
||||
// Process each chunk from Perplexity
|
||||
const reader = response.body?.getReader();
|
||||
if (reader) {
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = new TextDecoder().decode(value);
|
||||
const lines = chunk.split('\n');
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('data: ')) {
|
||||
const data = line.slice(6);
|
||||
if (data === '[DONE]') break;
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(data);
|
||||
const content = parsed.choices[0]?.delta?.content;
|
||||
if (content) {
|
||||
controller.enqueue(content);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error('Failed to parse SSE data:', e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
reader.releaseLock();
|
||||
}
|
||||
}
|
||||
|
||||
currentMessage = null;
|
||||
}
|
||||
},
|
||||
|
||||
flush(controller) {
|
||||
if (buffer) {
|
||||
try {
|
||||
const message = JSON.parse(buffer);
|
||||
controller.enqueue(message.content || '');
|
||||
} catch (e) {
|
||||
console.error('Failed to parse remaining buffer:', e);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Connect the input to our transform stream
|
||||
return input.pipeThrough(transform);
|
||||
}
|
||||
|
||||
// Implementing the synchronous chat interaction
|
||||
public async chat(optionsArg: ChatOptions): Promise<ChatResponse> {
|
||||
// Make API call to Perplexity
|
||||
const response = await fetch('https://api.perplexity.ai/chat/completions', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${this.options.perplexityToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: 'mixtral-8x7b-instruct', // Using Mixtral model
|
||||
messages: [
|
||||
{ role: 'system', content: optionsArg.systemMessage },
|
||||
...optionsArg.messageHistory,
|
||||
{ role: 'user', content: optionsArg.userMessage }
|
||||
],
|
||||
}),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Perplexity API error: ${response.statusText}`);
|
||||
}
|
||||
|
||||
const result = await response.json();
|
||||
|
||||
return {
|
||||
role: 'assistant' as const,
|
||||
message: result.choices[0].message.content,
|
||||
};
|
||||
}
|
||||
|
||||
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
||||
throw new Error('Audio generation is not supported by Perplexity.');
|
||||
}
|
||||
|
||||
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
|
||||
throw new Error('Vision tasks are not supported by Perplexity.');
|
||||
}
|
||||
|
||||
public async document(optionsArg: {
|
||||
systemMessage: string;
|
||||
userMessage: string;
|
||||
pdfDocuments: Uint8Array[];
|
||||
messageHistory: ChatMessage[];
|
||||
}): Promise<{ message: any }> {
|
||||
throw new Error('Document processing is not supported by Perplexity.');
|
||||
}
|
||||
}
|
@ -1,53 +0,0 @@
|
||||
type TProcessFunction = (input: string) => Promise<string>;
|
||||
|
||||
interface ISmartAiOptions {
|
||||
processFunction: TProcessFunction;
|
||||
}
|
||||
|
||||
class SmartAi {
|
||||
private processFunction: TProcessFunction;
|
||||
private inputStreamWriter: WritableStreamDefaultWriter<string> | null = null;
|
||||
private outputStreamController: ReadableStreamDefaultController<string> | null = null;
|
||||
|
||||
constructor(options: ISmartAiOptions) {
|
||||
this.processFunction = options.processFunction;
|
||||
}
|
||||
|
||||
private setupOutputStream(): ReadableStream<string> {
|
||||
return new ReadableStream<string>({
|
||||
start: (controller) => {
|
||||
this.outputStreamController = controller;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private setupInputStream(): WritableStream<string> {
|
||||
return new WritableStream<string>({
|
||||
write: async (chunk) => {
|
||||
const processedData = await this.processFunction(chunk);
|
||||
if (this.outputStreamController) {
|
||||
this.outputStreamController.enqueue(processedData);
|
||||
}
|
||||
},
|
||||
close: () => {
|
||||
this.outputStreamController?.close();
|
||||
},
|
||||
abort: (err) => {
|
||||
console.error('Stream aborted', err);
|
||||
this.outputStreamController?.error(err);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public getInputStreamWriter(): WritableStreamDefaultWriter<string> {
|
||||
if (!this.inputStreamWriter) {
|
||||
const inputStream = this.setupInputStream();
|
||||
this.inputStreamWriter = inputStream.getWriter();
|
||||
}
|
||||
return this.inputStreamWriter;
|
||||
}
|
||||
|
||||
public getOutputStream(): ReadableStream<string> {
|
||||
return this.setupOutputStream();
|
||||
}
|
||||
}
|
@ -1,4 +0,0 @@
|
||||
const removeme = {};
|
||||
export {
|
||||
removeme
|
||||
}
|
Reference in New Issue
Block a user