Compare commits
7 Commits
Author | SHA1 | Date | |
---|---|---|---|
f628a71184 | |||
d1465fc868 | |||
9e19d320e1 | |||
158d49fa95 | |||
1ce412fd00 | |||
92c382c16e | |||
63d3b7c9bb |
@ -16,7 +16,10 @@
|
||||
"text-to-speech",
|
||||
"conversation stream",
|
||||
"TypeScript",
|
||||
"ESM"
|
||||
"ESM",
|
||||
"streaming API",
|
||||
"modular design",
|
||||
"development tool"
|
||||
]
|
||||
}
|
||||
},
|
||||
|
27
package.json
27
package.json
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@push.rocks/smartai",
|
||||
"version": "0.0.9",
|
||||
"version": "0.0.12",
|
||||
"private": false,
|
||||
"description": "Provides a standardized interface for integrating and conversing with multiple AI models, supporting operations like chat and potentially audio responses.",
|
||||
"main": "dist_ts/index.js",
|
||||
@ -17,17 +17,21 @@
|
||||
"@git.zone/tsbuild": "^2.1.25",
|
||||
"@git.zone/tsbundle": "^2.0.5",
|
||||
"@git.zone/tsrun": "^1.2.46",
|
||||
"@git.zone/tstest": "^1.0.44",
|
||||
"@push.rocks/tapbundle": "^5.0.15",
|
||||
"@types/node": "^20.8.7"
|
||||
"@git.zone/tstest": "^1.0.90",
|
||||
"@push.rocks/qenv": "^6.0.5",
|
||||
"@push.rocks/tapbundle": "^5.0.23",
|
||||
"@types/node": "^20.12.7"
|
||||
},
|
||||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.19.1",
|
||||
"@push.rocks/qenv": "^6.0.5",
|
||||
"@push.rocks/smartfile": "^11.0.4",
|
||||
"@push.rocks/smartpath": "^5.0.11",
|
||||
"@anthropic-ai/sdk": "^0.20.7",
|
||||
"@push.rocks/smartarray": "^1.0.8",
|
||||
"@push.rocks/smartfile": "^11.0.14",
|
||||
"@push.rocks/smartpath": "^5.0.18",
|
||||
"@push.rocks/smartpdf": "^3.1.5",
|
||||
"@push.rocks/smartpromise": "^4.0.3",
|
||||
"openai": "^4.31.0"
|
||||
"@push.rocks/smartrequest": "^2.0.22",
|
||||
"@push.rocks/webstream": "^1.0.8",
|
||||
"openai": "^4.38.5"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@ -59,6 +63,9 @@
|
||||
"text-to-speech",
|
||||
"conversation stream",
|
||||
"TypeScript",
|
||||
"ESM"
|
||||
"ESM",
|
||||
"streaming API",
|
||||
"modular design",
|
||||
"development tool"
|
||||
]
|
||||
}
|
||||
|
1016
pnpm-lock.yaml
generated
1016
pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
4
qenv.yml
4
qenv.yml
@ -1,2 +1,4 @@
|
||||
required:
|
||||
- OPENAI_TOKEN
|
||||
- OPENAI_TOKEN
|
||||
- ANTHROPIC_TOKEN
|
||||
- PERPLEXITY_TOKEN
|
1
readme.hints.md
Normal file
1
readme.hints.md
Normal file
@ -0,0 +1 @@
|
||||
|
84
readme.md
84
readme.md
@ -1,108 +1,112 @@
|
||||
# @push.rocks/smartai
|
||||
a standardized interface to talk to AI models
|
||||
|
||||
Provides a standardized interface for integrating and conversing with multiple AI models, supporting operations like chat and potentially audio responses.
|
||||
|
||||
## Install
|
||||
To install `@push.rocks/smartai`, run the following command in your terminal:
|
||||
|
||||
To add @push.rocks/smartai to your project, run the following command in your terminal:
|
||||
|
||||
```bash
|
||||
npm install @push.rocks/smartai
|
||||
```
|
||||
|
||||
This will add the package to your project's dependencies.
|
||||
This command installs the package and adds it to your project's dependencies.
|
||||
|
||||
## Usage
|
||||
|
||||
In the following guide, you'll learn how to leverage `@push.rocks/smartai` for integrating AI models into your applications using TypeScript with ESM syntax.
|
||||
The usage section delves into how to leverage the `@push.rocks/smartai` package to interact with AI models in an application. This package simplifies the integration and conversation with AI models by providing a standardized interface. The examples below demonstrate the package's capabilities in engaging with AI models for chat operations and potentially handling audio responses using TypeScript and ESM syntax.
|
||||
|
||||
### Getting Started
|
||||
### Integrating AI Models
|
||||
|
||||
First, you'll need to import the necessary modules from `@push.rocks/smartai`. This typically includes the main `SmartAi` class along with any specific provider classes you intend to use, such as `OpenAiProvider` or `AnthropicProvider`.
|
||||
#### Importing the Module
|
||||
|
||||
Start by importing `SmartAi` and the AI providers you wish to use from `@push.rocks/smartai`.
|
||||
|
||||
```typescript
|
||||
import { SmartAi, OpenAiProvider, AnthropicProvider } from '@push.rocks/smartai';
|
||||
```
|
||||
|
||||
### Initialization
|
||||
#### Initializing `SmartAi`
|
||||
|
||||
Create an instance of `SmartAi` by providing the required options, which include authentication tokens for the AI providers you plan to use.
|
||||
Create an instance of `SmartAi` with the necessary credentials for accessing the AI services.
|
||||
|
||||
```typescript
|
||||
const smartAi = new SmartAi({
|
||||
openaiToken: 'your-openai-token-here',
|
||||
anthropicToken: 'your-anthropic-token-here'
|
||||
openaiToken: 'your-openai-access-token',
|
||||
anthropicToken: 'your-anthropic-access-token'
|
||||
});
|
||||
```
|
||||
|
||||
### Creating a Conversation
|
||||
### Chatting with the AI
|
||||
|
||||
`@push.rocks/smartai` offers a versatile way to handle conversations with AI. To create a conversation using OpenAI, for instance:
|
||||
#### Creating a Conversation
|
||||
|
||||
To begin a conversation, choose the AI provider you'd like to use. For instance, to use OpenAI:
|
||||
|
||||
```typescript
|
||||
async function createOpenAiConversation() {
|
||||
const conversation = await smartAi.createOpenApiConversation();
|
||||
// Use the conversation for chatting
|
||||
}
|
||||
```
|
||||
|
||||
For Anthropic-based conversations:
|
||||
Similarly, for an Anthropic AI conversation:
|
||||
|
||||
```typescript
|
||||
async function createAnthropicConversation() {
|
||||
const conversation = await smartAi.createAnthropicConversation();
|
||||
// Use the conversation for chatting
|
||||
}
|
||||
```
|
||||
|
||||
### Advanced Usage: Streaming and Chat
|
||||
### Streaming Chat with OpenAI
|
||||
|
||||
Advanced use cases might require direct access to the streaming APIs provided by the AI models. For instance, handling a chat stream with OpenAI can be achieved as follows:
|
||||
|
||||
#### Set Up the Conversation Stream
|
||||
|
||||
First, create a conversation and obtain the input and output streams.
|
||||
For more advanced scenarios, like a streaming chat with OpenAI, you would interact with the chat stream directly:
|
||||
|
||||
```typescript
|
||||
const conversation = await smartAi.createOpenApiConversation();
|
||||
// Assuming a conversation has been created and initialized...
|
||||
const inputStreamWriter = conversation.getInputStreamWriter();
|
||||
const outputStream = conversation.getOutputStream();
|
||||
```
|
||||
|
||||
#### Write to Input Stream
|
||||
// Write a message to the input stream for the AI to process
|
||||
await inputStreamWriter.write('Hello, how can I help you today?');
|
||||
|
||||
To send messages to the AI model, use the input stream writer.
|
||||
|
||||
```typescript
|
||||
await inputStreamWriter.write('Hello, SmartAI!');
|
||||
```
|
||||
|
||||
#### Processing Output Stream
|
||||
|
||||
Output from the AI model can be processed by reading from the output stream.
|
||||
|
||||
```typescript
|
||||
// Listen to the output stream for responses from AI
|
||||
const reader = outputStream.getReader();
|
||||
reader.read().then(function processText({ done, value }) {
|
||||
if (done) {
|
||||
console.log("Stream complete");
|
||||
console.log("No more messages from AI");
|
||||
return;
|
||||
}
|
||||
console.log("Received from AI:", value);
|
||||
console.log("AI says:", value);
|
||||
// Continue reading messages
|
||||
reader.read().then(processText);
|
||||
});
|
||||
```
|
||||
|
||||
### Handling Audio
|
||||
### Handling Audio Responses
|
||||
|
||||
`@push.rocks/smartai` also supports handling audio responses from AI models. To generate and retrieve audio output:
|
||||
The package may also support converting text responses from the AI into audio. While specific implementation details depend on the AI provider's capabilities, a generic approach would involve creating a text-to-speech instance and utilizing it:
|
||||
|
||||
```typescript
|
||||
// This is a hypothetical function call as the implementation might vary
|
||||
const tts = await TTS.createWithOpenAi(smartAi);
|
||||
|
||||
// The TTS instance would then be used to convert text to speech
|
||||
```
|
||||
|
||||
This code snippet initializes text-to-speech (TTS) capabilities using the OpenAI model. Further customization and usage of audio APIs will depend on the capabilities offered by the specific AI model and provider you are working with.
|
||||
### Extensive Feature Set
|
||||
|
||||
`@push.rocks/smartai` provides comprehensive support for interacting with various AI models, not limited to text chat. It encompasses audio responses, potentially incorporating AI-powered analyses, and other multi-modal interactions.
|
||||
|
||||
Refer to the specific AI providers’ documentation through `@push.rocks/smartai`, such as OpenAI and Anthropic, for detailed guidance on utilizing the full spectrum of capabilities, including the implementation of custom conversation flows, handling streaming data efficiently, and generating audio responses from AI conversations.
|
||||
|
||||
### Conclusion
|
||||
|
||||
`@push.rocks/smartai` offers a flexible and standardized interface for interacting with AI models, streamlining the development of applications that leverage AI capabilities. Through the outlined examples, you've seen how to initialize the library, create conversations, and handle both text and audio interactions with AI models in a TypeScript environment following ESM syntax.
|
||||
Equipped with `@push.rocks/smartai`, developers can streamline the integration of sophisticated AI interactions into their applications. The package facilitates robust communication with AI models, supporting diverse operations from simple chats to complex audio feedback mechanisms, all within a unified, easy-to-use interface.
|
||||
|
||||
Explore the package more to uncover its full potential in creating engaging, AI-enhanced interactions in your applications.
|
||||
|
||||
For a comprehensive understanding of all features and to explore more advanced use cases, refer to the official [documentation](https://code.foss.global/push.rocks/smartai#readme) and check the `npmextra.json` file's `tsdocs` section for additional insights on module usage.
|
||||
|
||||
## License and Legal Information
|
||||
|
||||
|
84
test/test.ts
84
test/test.ts
@ -1,8 +1,84 @@
|
||||
import { expect, expectAsync, tap } from '@push.rocks/tapbundle';
|
||||
import * as smartai from '../ts/index.js'
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
import * as smartrequest from '@push.rocks/smartrequest';
|
||||
import * as smartfile from '@push.rocks/smartfile';
|
||||
|
||||
tap.test('first test', async () => {
|
||||
console.log(smartai)
|
||||
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||
|
||||
import * as smartai from '../ts/index.js';
|
||||
|
||||
let testSmartai: smartai.SmartAi;
|
||||
|
||||
tap.test('should create a smartai instance', async () => {
|
||||
testSmartai = new smartai.SmartAi({
|
||||
openaiToken: await testQenv.getEnvVarOnDemand('OPENAI_TOKEN'),
|
||||
});
|
||||
await testSmartai.start();
|
||||
});
|
||||
|
||||
tap.test('should create chat response with openai', async () => {
|
||||
const userMessage = 'How are you?';
|
||||
const response = await testSmartai.openaiProvider.chat({
|
||||
systemMessage: 'Hello',
|
||||
userMessage: userMessage,
|
||||
messageHistory: [
|
||||
],
|
||||
});
|
||||
console.log(`userMessage: ${userMessage}`);
|
||||
console.log(response.message.content);
|
||||
});
|
||||
|
||||
tap.test('should document a pdf', async () => {
|
||||
const pdfUrl = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf';
|
||||
const pdfResponse = await smartrequest.getBinary(pdfUrl);
|
||||
const result = await testSmartai.openaiProvider.document({
|
||||
systemMessage: 'Classify the document. Only the following answers are allowed: "invoice", "bank account statement", "contract", "other"',
|
||||
userMessage: "Classify the document.",
|
||||
messageHistory: [],
|
||||
pdfDocuments: [pdfResponse.body],
|
||||
});
|
||||
console.log(result);
|
||||
});
|
||||
|
||||
tap.test('should recognize companies in a pdf', async () => {
|
||||
const pdfBuffer = await smartfile.fs.toBuffer('./.nogit/demo_without_textlayer.pdf');
|
||||
const result = await testSmartai.openaiProvider.document({
|
||||
systemMessage: `
|
||||
summarize the document.
|
||||
|
||||
answer in JSON format, adhering to the following schema:
|
||||
\`\`\`typescript
|
||||
type TAnswer = {
|
||||
entitySender: {
|
||||
type: 'official state entity' | 'company' | 'person';
|
||||
name: string;
|
||||
address: string;
|
||||
city: string;
|
||||
country: string;
|
||||
EU: boolean; // wether the entity is within EU
|
||||
};
|
||||
entityReceiver: {
|
||||
type: 'official state entity' | 'company' | 'person';
|
||||
name: string;
|
||||
address: string;
|
||||
city: string;
|
||||
country: string;
|
||||
EU: boolean; // wether the entity is within EU
|
||||
};
|
||||
date: string; // the date of the document as YYYY-MM-DD
|
||||
title: string; // a short title, suitable for a filename
|
||||
}
|
||||
\`\`\`
|
||||
`,
|
||||
userMessage: "Classify the document.",
|
||||
messageHistory: [],
|
||||
pdfDocuments: [pdfBuffer],
|
||||
});
|
||||
console.log(result);
|
||||
})
|
||||
|
||||
tap.start()
|
||||
tap.test('should stop the smartai instance', async () => {
|
||||
await testSmartai.stop();
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
|
@ -3,6 +3,6 @@
|
||||
*/
|
||||
export const commitinfo = {
|
||||
name: '@push.rocks/smartai',
|
||||
version: '0.0.9',
|
||||
version: '0.0.12',
|
||||
description: 'Provides a standardized interface for integrating and conversing with multiple AI models, supporting operations like chat and potentially audio responses.'
|
||||
}
|
||||
|
@ -8,8 +8,25 @@ export abstract class MultiModalModel {
|
||||
* stops the model
|
||||
*/
|
||||
abstract stop(): Promise<void>;
|
||||
|
||||
public abstract chat(optionsArg: {
|
||||
systemMessage: string,
|
||||
userMessage: string,
|
||||
messageHistory: {
|
||||
role: 'assistant' | 'user';
|
||||
content: string;
|
||||
}[]
|
||||
}): Promise<{
|
||||
role: 'assistant';
|
||||
message: string;
|
||||
}>
|
||||
|
||||
// Defines a streaming interface for chat interactions.
|
||||
// The implementation will vary based on the specific AI model.
|
||||
abstract chatStream(input: ReadableStream<string>): ReadableStream<string>;
|
||||
/**
|
||||
* Defines a streaming interface for chat interactions.
|
||||
* The implementation will vary based on the specific AI model.
|
||||
* @param input
|
||||
*/
|
||||
public abstract chatStream(input: ReadableStream<string>): Promise<ReadableStream<string>>;
|
||||
|
||||
|
||||
}
|
||||
|
@ -12,9 +12,11 @@ export interface IConversationOptions {
|
||||
*/
|
||||
export class Conversation {
|
||||
// STATIC
|
||||
public static async createWithOpenAi(smartaiRef: SmartAi) {
|
||||
const openaiProvider = new OpenAiProvider(smartaiRef.options.openaiToken);
|
||||
const conversation = new Conversation(smartaiRef, {
|
||||
public static async createWithOpenAi(smartaiRefArg: SmartAi) {
|
||||
if (!smartaiRefArg.openaiProvider) {
|
||||
throw new Error('OpenAI provider not available');
|
||||
}
|
||||
const conversation = new Conversation(smartaiRefArg, {
|
||||
processFunction: async (input) => {
|
||||
return '' // TODO implement proper streaming
|
||||
}
|
||||
@ -22,9 +24,11 @@ export class Conversation {
|
||||
return conversation;
|
||||
}
|
||||
|
||||
public static async createWithAnthropic(smartaiRef: SmartAi) {
|
||||
const anthropicProvider = new OpenAiProvider(smartaiRef.options.anthropicToken);
|
||||
const conversation = new Conversation(smartaiRef, {
|
||||
public static async createWithAnthropic(smartaiRefArg: SmartAi) {
|
||||
if (!smartaiRefArg.anthropicProvider) {
|
||||
throw new Error('Anthropic provider not available');
|
||||
}
|
||||
const conversation = new Conversation(smartaiRefArg, {
|
||||
processFunction: async (input) => {
|
||||
return '' // TODO implement proper streaming
|
||||
}
|
||||
@ -32,6 +36,29 @@ export class Conversation {
|
||||
return conversation;
|
||||
}
|
||||
|
||||
public static async createWithPerplexity(smartaiRefArg: SmartAi) {
|
||||
if (!smartaiRefArg.perplexityProvider) {
|
||||
throw new Error('Perplexity provider not available');
|
||||
}
|
||||
const conversation = new Conversation(smartaiRefArg, {
|
||||
processFunction: async (input) => {
|
||||
return '' // TODO implement proper streaming
|
||||
}
|
||||
});
|
||||
return conversation;
|
||||
}
|
||||
|
||||
public static async createWithOllama(smartaiRefArg: SmartAi) {
|
||||
if (!smartaiRefArg.ollamaProvider) {
|
||||
throw new Error('Ollama provider not available');
|
||||
}
|
||||
const conversation = new Conversation(smartaiRefArg, {
|
||||
processFunction: async (input) => {
|
||||
return '' // TODO implement proper streaming
|
||||
}
|
||||
});
|
||||
return conversation;
|
||||
}
|
||||
|
||||
// INSTANCE
|
||||
smartaiRef: SmartAi
|
||||
@ -44,8 +71,8 @@ export class Conversation {
|
||||
this.processFunction = options.processFunction;
|
||||
}
|
||||
|
||||
setSystemMessage(systemMessage: string) {
|
||||
this.systemMessage = systemMessage;
|
||||
public async setSystemMessage(systemMessageArg: string) {
|
||||
this.systemMessage = systemMessageArg;
|
||||
}
|
||||
|
||||
private setupOutputStream(): ReadableStream<string> {
|
||||
@ -57,7 +84,7 @@ export class Conversation {
|
||||
}
|
||||
|
||||
private setupInputStream(): WritableStream<string> {
|
||||
return new WritableStream<string>({
|
||||
const writableStream = new WritableStream<string>({
|
||||
write: async (chunk) => {
|
||||
const processedData = await this.processFunction(chunk);
|
||||
if (this.outputStreamController) {
|
||||
@ -72,6 +99,7 @@ export class Conversation {
|
||||
this.outputStreamController?.error(err);
|
||||
}
|
||||
});
|
||||
return writableStream;
|
||||
}
|
||||
|
||||
public getInputStreamWriter(): WritableStreamDefaultWriter<string> {
|
||||
|
@ -1,30 +1,62 @@
|
||||
import { Conversation } from './classes.conversation.js';
|
||||
import * as plugins from './plugins.js';
|
||||
import { AnthropicProvider } from './provider.anthropic.js';
|
||||
import type { OllamaProvider } from './provider.ollama.js';
|
||||
import { OpenAiProvider } from './provider.openai.js';
|
||||
import type { PerplexityProvider } from './provider.perplexity.js';
|
||||
|
||||
|
||||
export interface ISmartAiOptions {
|
||||
openaiToken: string;
|
||||
anthropicToken: string;
|
||||
openaiToken?: string;
|
||||
anthropicToken?: string;
|
||||
perplexityToken?: string;
|
||||
}
|
||||
|
||||
export type TProvider = 'openai' | 'anthropic' | 'perplexity' | 'ollama';
|
||||
|
||||
export class SmartAi {
|
||||
public options: ISmartAiOptions;
|
||||
|
||||
public openaiProvider: OpenAiProvider;
|
||||
public anthropicProvider: AnthropicProvider;
|
||||
public perplexityProvider: PerplexityProvider;
|
||||
public ollamaProvider: OllamaProvider;
|
||||
|
||||
constructor(optionsArg: ISmartAiOptions) {
|
||||
this.options = optionsArg;
|
||||
}
|
||||
|
||||
/**
|
||||
* creates an OpenAI conversation
|
||||
*/
|
||||
public async createOpenApiConversation() {
|
||||
const conversation = await Conversation.createWithOpenAi(this);
|
||||
|
||||
public async start() {
|
||||
if (this.options.openaiToken) {
|
||||
this.openaiProvider = new OpenAiProvider({
|
||||
openaiToken: this.options.openaiToken,
|
||||
});
|
||||
await this.openaiProvider.start();
|
||||
}
|
||||
if (this.options.anthropicToken) {
|
||||
this.anthropicProvider = new AnthropicProvider({
|
||||
anthropicToken: this.options.anthropicToken,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public async stop() {}
|
||||
|
||||
/**
|
||||
* creates an OpenAI conversation
|
||||
* create a new conversation
|
||||
*/
|
||||
public async createAnthropicConversation() {
|
||||
const conversation = await Conversation.createWithAnthropic(this);
|
||||
createConversation(provider: TProvider) {
|
||||
switch (provider) {
|
||||
case 'openai':
|
||||
return Conversation.createWithOpenAi(this);
|
||||
case 'anthropic':
|
||||
return Conversation.createWithAnthropic(this);
|
||||
case 'perplexity':
|
||||
return Conversation.createWithPerplexity(this);
|
||||
case 'ollama':
|
||||
return Conversation.createWithOllama(this);
|
||||
default:
|
||||
throw new Error('Provider not available');
|
||||
}
|
||||
}
|
||||
}
|
0
ts/interfaces.ts
Normal file
0
ts/interfaces.ts
Normal file
@ -7,15 +7,23 @@ export {
|
||||
|
||||
// @push.rocks scope
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
import * as smartpath from '@push.rocks/smartpath';
|
||||
import * as smartpromise from '@push.rocks/smartpromise';
|
||||
import * as smartarray from '@push.rocks/smartarray';
|
||||
import * as smartfile from '@push.rocks/smartfile';
|
||||
import * as smartpath from '@push.rocks/smartpath';
|
||||
import * as smartpdf from '@push.rocks/smartpdf';
|
||||
import * as smartpromise from '@push.rocks/smartpromise';
|
||||
import * as smartrequest from '@push.rocks/smartrequest';
|
||||
import * as webstream from '@push.rocks/webstream';
|
||||
|
||||
export {
|
||||
smartarray,
|
||||
qenv,
|
||||
smartpath,
|
||||
smartpromise,
|
||||
smartfile,
|
||||
smartpath,
|
||||
smartpdf,
|
||||
smartpromise,
|
||||
smartrequest,
|
||||
webstream,
|
||||
}
|
||||
|
||||
// third party
|
||||
|
@ -2,74 +2,61 @@ import * as plugins from './plugins.js';
|
||||
import * as paths from './paths.js';
|
||||
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
||||
|
||||
export interface IAnthropicProviderOptions {
|
||||
anthropicToken: string;
|
||||
}
|
||||
|
||||
export class AnthropicProvider extends MultiModalModel {
|
||||
private anthropicToken: string;
|
||||
private options: IAnthropicProviderOptions;
|
||||
public anthropicApiClient: plugins.anthropic.default;
|
||||
|
||||
constructor(anthropicToken: string) {
|
||||
constructor(optionsArg: IAnthropicProviderOptions) {
|
||||
super();
|
||||
this.anthropicToken = anthropicToken; // Ensure the token is stored
|
||||
this.options = optionsArg // Ensure the token is stored
|
||||
}
|
||||
|
||||
async start() {
|
||||
this.anthropicApiClient = new plugins.anthropic.default({
|
||||
apiKey: this.anthropicToken,
|
||||
apiKey: this.options.anthropicToken,
|
||||
});
|
||||
}
|
||||
|
||||
async stop() {}
|
||||
|
||||
chatStream(input: ReadableStream<string>): ReadableStream<string> {
|
||||
const decoder = new TextDecoder();
|
||||
let messageHistory: { role: 'assistant' | 'user'; content: string }[] = [];
|
||||
public async chatStream(input: ReadableStream<string>): Promise<ReadableStream<string>> {
|
||||
// TODO: implement for OpenAI
|
||||
|
||||
return new ReadableStream({
|
||||
async start(controller) {
|
||||
const reader = input.getReader();
|
||||
try {
|
||||
let done, value;
|
||||
while ((({ done, value } = await reader.read()), !done)) {
|
||||
const userMessage = decoder.decode(value, { stream: true });
|
||||
messageHistory.push({ role: 'user', content: userMessage });
|
||||
const aiResponse = await this.chat('', userMessage, messageHistory);
|
||||
messageHistory.push({ role: 'assistant', content: aiResponse.message });
|
||||
// Directly enqueue the string response instead of encoding it first
|
||||
controller.enqueue(aiResponse.message);
|
||||
}
|
||||
controller.close();
|
||||
} catch (err) {
|
||||
controller.error(err);
|
||||
}
|
||||
},
|
||||
});
|
||||
const returnStream = new ReadableStream();
|
||||
return returnStream;
|
||||
}
|
||||
|
||||
// Implementing the synchronous chat interaction
|
||||
public async chat(
|
||||
systemMessage: string,
|
||||
userMessage: string,
|
||||
public async chat(optionsArg: {
|
||||
systemMessage: string;
|
||||
userMessage: string;
|
||||
messageHistory: {
|
||||
role: 'assistant' | 'user';
|
||||
content: string;
|
||||
}[]
|
||||
) {
|
||||
}[];
|
||||
}) {
|
||||
const result = await this.anthropicApiClient.messages.create({
|
||||
model: 'claude-3-opus-20240229',
|
||||
system: systemMessage,
|
||||
system: optionsArg.systemMessage,
|
||||
messages: [
|
||||
...messageHistory,
|
||||
{ role: 'user', content: userMessage },
|
||||
...optionsArg.messageHistory,
|
||||
{ role: 'user', content: optionsArg.userMessage },
|
||||
],
|
||||
max_tokens: 4000,
|
||||
});
|
||||
|
||||
return {
|
||||
message: result.content,
|
||||
role: result.role as 'assistant',
|
||||
message: result.content.join('\n'),
|
||||
};
|
||||
}
|
||||
|
||||
public async audio(messageArg: string) {
|
||||
private async audio(messageArg: string) {
|
||||
// Anthropic does not provide an audio API, so this method is not implemented.
|
||||
throw new Error('Audio generation is not supported by Anthropic.');
|
||||
throw new Error('Audio generation is not yet supported by Anthropic.');
|
||||
}
|
||||
}
|
3
ts/provider.ollama.ts
Normal file
3
ts/provider.ollama.ts
Normal file
@ -0,0 +1,3 @@
|
||||
import * as plugins from './plugins.js';
|
||||
|
||||
export class OllamaProvider {}
|
@ -3,87 +3,132 @@ import * as paths from './paths.js';
|
||||
|
||||
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
||||
|
||||
export class OpenAiProvider extends MultiModalModel {
|
||||
private openAiToken: string;
|
||||
public openAiApiClient: plugins.openai.default;
|
||||
export interface IOpenaiProviderOptions {
|
||||
openaiToken: string;
|
||||
}
|
||||
|
||||
constructor(openaiToken: string) {
|
||||
export class OpenAiProvider extends MultiModalModel {
|
||||
private options: IOpenaiProviderOptions;
|
||||
public openAiApiClient: plugins.openai.default;
|
||||
public smartpdfInstance: plugins.smartpdf.SmartPdf;
|
||||
|
||||
constructor(optionsArg: IOpenaiProviderOptions) {
|
||||
super();
|
||||
this.openAiToken = openaiToken; // Ensure the token is stored
|
||||
this.options = optionsArg;
|
||||
}
|
||||
|
||||
async start() {
|
||||
public async start() {
|
||||
this.openAiApiClient = new plugins.openai.default({
|
||||
apiKey: this.openAiToken,
|
||||
apiKey: this.options.openaiToken,
|
||||
dangerouslyAllowBrowser: true,
|
||||
});
|
||||
this.smartpdfInstance = new plugins.smartpdf.SmartPdf();
|
||||
}
|
||||
|
||||
async stop() {}
|
||||
public async stop() {}
|
||||
|
||||
chatStream(input: ReadableStream<string>): ReadableStream<string> {
|
||||
const decoder = new TextDecoder();
|
||||
let messageHistory: { role: 'assistant' | 'user'; content: string }[] = [];
|
||||
public async chatStream(input: ReadableStream<string>): Promise<ReadableStream<string>> {
|
||||
// TODO: implement for OpenAI
|
||||
|
||||
return new ReadableStream({
|
||||
async start(controller) {
|
||||
const reader = input.getReader();
|
||||
try {
|
||||
let done, value;
|
||||
while ((({ done, value } = await reader.read()), !done)) {
|
||||
const userMessage = decoder.decode(value, { stream: true });
|
||||
messageHistory.push({ role: 'user', content: userMessage });
|
||||
|
||||
const aiResponse = await this.chat('', userMessage, messageHistory);
|
||||
messageHistory.push({ role: 'assistant', content: aiResponse.message });
|
||||
|
||||
// Directly enqueue the string response instead of encoding it first
|
||||
controller.enqueue(aiResponse.message);
|
||||
}
|
||||
controller.close();
|
||||
} catch (err) {
|
||||
controller.error(err);
|
||||
}
|
||||
},
|
||||
});
|
||||
const returnStream = new ReadableStream();
|
||||
return returnStream;
|
||||
}
|
||||
|
||||
// Implementing the synchronous chat interaction
|
||||
public async chat(
|
||||
systemMessage: string,
|
||||
userMessage: string,
|
||||
public async chat(optionsArg: {
|
||||
systemMessage: string;
|
||||
userMessage: string;
|
||||
messageHistory: {
|
||||
role: 'assistant' | 'user';
|
||||
content: string;
|
||||
}[]
|
||||
) {
|
||||
}[];
|
||||
}) {
|
||||
const result = await this.openAiApiClient.chat.completions.create({
|
||||
model: 'gpt-4-turbo-preview',
|
||||
|
||||
|
||||
messages: [
|
||||
{ role: 'system', content: systemMessage },
|
||||
...messageHistory,
|
||||
{ role: 'user', content: userMessage },
|
||||
{ role: 'system', content: optionsArg.systemMessage },
|
||||
...optionsArg.messageHistory,
|
||||
{ role: 'user', content: optionsArg.userMessage },
|
||||
],
|
||||
});
|
||||
return {
|
||||
role: result.choices[0].message.role as 'assistant',
|
||||
message: result.choices[0].message.content,
|
||||
};
|
||||
}
|
||||
|
||||
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
||||
const done = plugins.smartpromise.defer<NodeJS.ReadableStream>();
|
||||
const result = await this.openAiApiClient.audio.speech.create({
|
||||
model: 'tts-1-hd',
|
||||
input: optionsArg.message,
|
||||
voice: 'nova',
|
||||
response_format: 'mp3',
|
||||
speed: 1,
|
||||
});
|
||||
const stream = result.body;
|
||||
done.resolve(stream);
|
||||
return done.promise;
|
||||
}
|
||||
|
||||
public async document(optionsArg: {
|
||||
systemMessage: string;
|
||||
userMessage: string;
|
||||
pdfDocuments: Uint8Array[];
|
||||
messageHistory: {
|
||||
role: 'assistant' | 'user';
|
||||
content: any;
|
||||
}[];
|
||||
}) {
|
||||
let pdfDocumentImageBytesArray: Uint8Array[] = [];
|
||||
|
||||
for (const pdfDocument of optionsArg.pdfDocuments) {
|
||||
const documentImageArray = await this.smartpdfInstance.convertPDFToPngBytes(pdfDocument);
|
||||
pdfDocumentImageBytesArray = pdfDocumentImageBytesArray.concat(documentImageArray);
|
||||
}
|
||||
|
||||
console.log(`image smartfile array`);
|
||||
console.log(pdfDocumentImageBytesArray.map((smartfile) => smartfile.length));
|
||||
|
||||
const smartfileArray = await plugins.smartarray.map(
|
||||
pdfDocumentImageBytesArray,
|
||||
async (pdfDocumentImageBytes) => {
|
||||
return plugins.smartfile.SmartFile.fromBuffer(
|
||||
'pdfDocumentImage.jpg',
|
||||
Buffer.from(pdfDocumentImageBytes)
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
const result = await this.openAiApiClient.chat.completions.create({
|
||||
model: 'gpt-4-vision-preview',
|
||||
// response_format: { type: "json_object" }, // not supported for now
|
||||
messages: [
|
||||
{ role: 'system', content: optionsArg.systemMessage },
|
||||
...optionsArg.messageHistory,
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: optionsArg.userMessage },
|
||||
...(() => {
|
||||
const returnArray = [];
|
||||
for (const imageBytes of pdfDocumentImageBytesArray) {
|
||||
returnArray.push({
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: 'data:image/png;base64,' + Buffer.from(imageBytes).toString('base64'),
|
||||
},
|
||||
});
|
||||
}
|
||||
return returnArray;
|
||||
})(),
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
return {
|
||||
message: result.choices[0].message,
|
||||
};
|
||||
}
|
||||
|
||||
public async audio(messageArg: string) {
|
||||
const done = plugins.smartpromise.defer();
|
||||
const result = await this.openAiApiClient.audio.speech.create({
|
||||
model: 'tts-1-hd',
|
||||
input: messageArg,
|
||||
voice: 'nova',
|
||||
response_format: 'mp3',
|
||||
speed: 1,
|
||||
});
|
||||
const stream = result.body.pipe(plugins.smartfile.fsStream.createWriteStream(plugins.path.join(paths.nogitDir, 'output.mp3')));
|
||||
stream.on('finish', () => {
|
||||
done.resolve();
|
||||
});
|
||||
return done.promise;
|
||||
}
|
||||
}
|
||||
|
3
ts/provider.perplexity.ts
Normal file
3
ts/provider.perplexity.ts
Normal file
@ -0,0 +1,3 @@
|
||||
import * as plugins from './plugins.js';
|
||||
|
||||
export class PerplexityProvider {}
|
Reference in New Issue
Block a user