Compare commits

...

15 Commits

Author SHA1 Message Date
7cb9bc24dc v0.13.0
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-20 02:03:20 +00:00
9ad039f77b feat(provider.ollama): add chain-of-thought reasoning support to chat messages and Ollama provider 2026-01-20 02:03:20 +00:00
6c6652d75d v0.12.1
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-20 01:27:52 +00:00
2040b3c629 fix(docs): update documentation: clarify provider capabilities, add provider capabilities summary, polish examples and formatting, and remove Serena project config 2026-01-20 01:27:52 +00:00
ae8d3ccf33 v0.12.0
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-20 01:10:27 +00:00
3b900d0ba9 feat(ollama): add support for base64-encoded images in chat messages and forward them to the Ollama provider 2026-01-20 01:10:27 +00:00
d49152390f v0.11.1 2026-01-20 00:37:59 +00:00
d615ec9227 feat(streaming): add chatStreaming method with token callback for real-time generation progress
- Add StreamingChatOptions interface with onToken callback
- Add optional chatStreaming method to MultiModalModel abstract class
- Implement chatStreaming in OllamaProvider using collectStreamResponse
2026-01-20 00:37:49 +00:00
dfa863ee7d v0.11.0
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-20 00:12:21 +00:00
c84ede1f1d feat(ollama): support defaultOptions and defaultTimeout for ollama provider 2026-01-20 00:12:21 +00:00
4937dbf6ab v0.10.1
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-20 00:03:06 +00:00
8cb052449e fix(): no changes detected — no release necessary 2026-01-20 00:03:06 +00:00
126e9b239b feat(OllamaProvider): add model options, streaming support, and thinking tokens
- Add IOllamaModelOptions interface for runtime options (num_ctx, temperature, etc.)
- Extend IOllamaProviderOptions with defaultOptions and defaultTimeout
- Add IOllamaChatOptions for per-request overrides
- Add IOllamaStreamChunk and IOllamaChatResponse interfaces
- Add chatStreamResponse() for async iteration with options
- Add collectStreamResponse() for streaming with progress callback
- Add chatWithOptions() for non-streaming with full options
- Update chat() to use defaultOptions and defaultTimeout
2026-01-20 00:02:45 +00:00
a556053510 v0.10.0
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-18 22:11:52 +00:00
e4dc81edc9 feat(mistral): add Mistral provider with native PDF OCR and chat integration 2026-01-18 22:11:52 +00:00
22 changed files with 1261 additions and 439 deletions

1
.serena/.gitignore vendored
View File

@@ -1 +0,0 @@
/cache

View File

@@ -1,67 +0,0 @@
# language of the project (csharp, python, rust, java, typescript, go, cpp, or ruby)
# * For C, use cpp
# * For JavaScript, use typescript
# Special requirements:
# * csharp: Requires the presence of a .sln file in the project folder.
language: typescript
# whether to use the project's gitignore file to ignore files
# Added on 2025-04-07
ignore_all_files_in_gitignore: true
# list of additional paths to ignore
# same syntax as gitignore, so you can use * and **
# Was previously called `ignored_dirs`, please update your config if you are using that.
# Added (renamed) on 2025-04-07
ignored_paths: []
# whether the project is in read-only mode
# If set to true, all editing tools will be disabled and attempts to use them will result in an error
# Added on 2025-04-18
read_only: false
# list of tool names to exclude. We recommend not excluding any tools, see the readme for more details.
# Below is the complete list of tools for convenience.
# To make sure you have the latest list of tools, and to view their descriptions,
# execute `uv run scripts/print_tool_overview.py`.
#
# * `activate_project`: Activates a project by name.
# * `check_onboarding_performed`: Checks whether project onboarding was already performed.
# * `create_text_file`: Creates/overwrites a file in the project directory.
# * `delete_lines`: Deletes a range of lines within a file.
# * `delete_memory`: Deletes a memory from Serena's project-specific memory store.
# * `execute_shell_command`: Executes a shell command.
# * `find_referencing_code_snippets`: Finds code snippets in which the symbol at the given location is referenced.
# * `find_referencing_symbols`: Finds symbols that reference the symbol at the given location (optionally filtered by type).
# * `find_symbol`: Performs a global (or local) search for symbols with/containing a given name/substring (optionally filtered by type).
# * `get_current_config`: Prints the current configuration of the agent, including the active and available projects, tools, contexts, and modes.
# * `get_symbols_overview`: Gets an overview of the top-level symbols defined in a given file.
# * `initial_instructions`: Gets the initial instructions for the current project.
# Should only be used in settings where the system prompt cannot be set,
# e.g. in clients you have no control over, like Claude Desktop.
# * `insert_after_symbol`: Inserts content after the end of the definition of a given symbol.
# * `insert_at_line`: Inserts content at a given line in a file.
# * `insert_before_symbol`: Inserts content before the beginning of the definition of a given symbol.
# * `list_dir`: Lists files and directories in the given directory (optionally with recursion).
# * `list_memories`: Lists memories in Serena's project-specific memory store.
# * `onboarding`: Performs onboarding (identifying the project structure and essential tasks, e.g. for testing or building).
# * `prepare_for_new_conversation`: Provides instructions for preparing for a new conversation (in order to continue with the necessary context).
# * `read_file`: Reads a file within the project directory.
# * `read_memory`: Reads the memory with the given name from Serena's project-specific memory store.
# * `remove_project`: Removes a project from the Serena configuration.
# * `replace_lines`: Replaces a range of lines within a file with new content.
# * `replace_symbol_body`: Replaces the full definition of a symbol.
# * `restart_language_server`: Restarts the language server, may be necessary when edits not through Serena happen.
# * `search_for_pattern`: Performs a search for a pattern in the project.
# * `summarize_changes`: Provides instructions for summarizing the changes made to the codebase.
# * `switch_modes`: Activates modes by providing a list of their names
# * `think_about_collected_information`: Thinking tool for pondering the completeness of collected information.
# * `think_about_task_adherence`: Thinking tool for determining whether the agent is still on track with the current task.
# * `think_about_whether_you_are_done`: Thinking tool for determining whether the task is truly completed.
# * `write_memory`: Writes a named memory (for future reference) to Serena's project-specific memory store.
excluded_tools: []
# initial prompt for the project. It will always be given to the LLM upon activating the project
# (contrary to the memories, which are loaded on demand).
initial_prompt: ""
project_name: "smartai"

View File

@@ -1,5 +1,50 @@
# Changelog # Changelog
## 2026-01-20 - 0.13.0 - feat(provider.ollama)
add chain-of-thought reasoning support to chat messages and Ollama provider
- Added optional reasoning?: string to chat message and chat response interfaces to surface chain-of-thought data.
- Propagates reasoning from message history into formatted requests sent to Ollama.
- Maps Ollama response fields (thinking or reasoning) into ChatResponse.reasoning so downstream code can access model reasoning output.
## 2026-01-20 - 0.12.1 - fix(docs)
update documentation: clarify provider capabilities, add provider capabilities summary, polish examples and formatting, and remove Serena project config
- Removed .serena/project.yml and cleaned up .serena/.gitignore
- Added Provider Capabilities Summary and expanded/clarified provider tables in readme.md and readme.hints.md
- Clarified Anthropic extended thinking details and Mistral native PDF OCR notes
- Polished example code snippets and fixed minor typos/formatting (GPT-5 mention, ElevenLabs model note, consistent punctuation)
- Updated test command references and other README usage instructions
## 2026-01-20 - 0.12.0 - feat(ollama)
add support for base64-encoded images in chat messages and forward them to the Ollama provider
- Add optional images?: string[] to ChatMessage and ChatOptions interfaces (multimodal/vision support)
- Propagate images from messageHistory and ChatOptions to the Ollama API payload in chat, chatStreaming, and streaming handlers
- Changes are non-breaking: images are optional and existing behavior is preserved when absent
## 2026-01-20 - 0.11.0 - feat(ollama)
support defaultOptions and defaultTimeout for ollama provider
- Added ollama.defaultOptions object with fields: num_ctx, temperature, top_k, top_p, repeat_penalty, num_predict, stop, seed
- Added ollama.defaultTimeout option
- Pass defaultOptions and defaultTimeout into OllamaProvider constructor when initializing the provider
- Non-breaking change: existing behavior preserved if new fields are undefined
## 2026-01-20 - 0.10.1 - fix()
no changes detected — no release necessary
- No files changed in the provided diff; there are no code, documentation, or configuration modifications to release.
## 2026-01-18 - 0.10.0 - feat(mistral)
add Mistral provider with native PDF OCR and chat integration
- Adds dependency @mistralai/mistralai
- Implements ts/provider.mistral.ts providing chat() and document() (OCR) functionality
- Registers and exposes MistralProvider in SmartAi (options, lifecycle, conversation routing)
- Adds unit/integration tests: test.chat.mistral.ts and test.document.mistral.ts
- Updates readme.hints.md with Mistral usage, configuration and notes
## 2026-01-18 - 0.9.0 - feat(providers) ## 2026-01-18 - 0.9.0 - feat(providers)
Add Anthropic extended thinking and adapt providers to new streaming/file APIs; bump dependencies and update docs, tests and configuration Add Anthropic extended thinking and adapt providers to new streaming/file APIs; bump dependencies and update docs, tests and configuration

View File

@@ -1,6 +1,6 @@
{ {
"name": "@push.rocks/smartai", "name": "@push.rocks/smartai",
"version": "0.9.0", "version": "0.13.0",
"private": false, "private": false,
"description": "SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.", "description": "SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.",
"main": "dist_ts/index.js", "main": "dist_ts/index.js",
@@ -18,21 +18,22 @@
"@git.zone/tsbuild": "^4.1.2", "@git.zone/tsbuild": "^4.1.2",
"@git.zone/tsbundle": "^2.8.1", "@git.zone/tsbundle": "^2.8.1",
"@git.zone/tsrun": "^2.0.1", "@git.zone/tsrun": "^2.0.1",
"@git.zone/tstest": "^3.1.4", "@git.zone/tstest": "^3.1.6",
"@push.rocks/qenv": "^6.1.3", "@push.rocks/qenv": "^6.1.3",
"@types/node": "^22.15.17", "@types/node": "^25.0.9",
"typescript": "^5.9.3" "typescript": "^5.9.3"
}, },
"dependencies": { "dependencies": {
"@anthropic-ai/sdk": "^0.71.2", "@anthropic-ai/sdk": "^0.71.2",
"@mistralai/mistralai": "^1.12.0",
"@push.rocks/smartarray": "^1.1.0", "@push.rocks/smartarray": "^1.1.0",
"@push.rocks/smartfile": "^11.2.7", "@push.rocks/smartfs": "^1.3.1",
"@push.rocks/smartpath": "^6.0.0", "@push.rocks/smartpath": "^6.0.0",
"@push.rocks/smartpdf": "^4.1.1", "@push.rocks/smartpdf": "^4.1.1",
"@push.rocks/smartpromise": "^4.2.3", "@push.rocks/smartpromise": "^4.2.3",
"@push.rocks/smartrequest": "^5.0.1", "@push.rocks/smartrequest": "^5.0.1",
"@push.rocks/webstream": "^1.0.10", "@push.rocks/webstream": "^1.0.10",
"openai": "^5.12.2" "openai": "^6.16.0"
}, },
"repository": { "repository": {
"type": "git", "type": "git",

116
pnpm-lock.yaml generated
View File

@@ -11,12 +11,15 @@ importers:
'@anthropic-ai/sdk': '@anthropic-ai/sdk':
specifier: ^0.71.2 specifier: ^0.71.2
version: 0.71.2(zod@3.25.76) version: 0.71.2(zod@3.25.76)
'@mistralai/mistralai':
specifier: ^1.12.0
version: 1.12.0
'@push.rocks/smartarray': '@push.rocks/smartarray':
specifier: ^1.1.0 specifier: ^1.1.0
version: 1.1.0 version: 1.1.0
'@push.rocks/smartfile': '@push.rocks/smartfs':
specifier: ^11.2.7 specifier: ^1.3.1
version: 11.2.7 version: 1.3.1
'@push.rocks/smartpath': '@push.rocks/smartpath':
specifier: ^6.0.0 specifier: ^6.0.0
version: 6.0.0 version: 6.0.0
@@ -33,8 +36,8 @@ importers:
specifier: ^1.0.10 specifier: ^1.0.10
version: 1.0.10 version: 1.0.10
openai: openai:
specifier: ^5.12.2 specifier: ^6.16.0
version: 5.12.2(ws@8.18.3)(zod@3.25.76) version: 6.16.0(ws@8.18.3)(zod@3.25.76)
devDependencies: devDependencies:
'@git.zone/tsbuild': '@git.zone/tsbuild':
specifier: ^4.1.2 specifier: ^4.1.2
@@ -46,14 +49,14 @@ importers:
specifier: ^2.0.1 specifier: ^2.0.1
version: 2.0.1 version: 2.0.1
'@git.zone/tstest': '@git.zone/tstest':
specifier: ^3.1.4 specifier: ^3.1.6
version: 3.1.4(@aws-sdk/credential-providers@3.808.0)(socks@2.8.4)(typescript@5.9.3) version: 3.1.6(@aws-sdk/credential-providers@3.808.0)(socks@2.8.4)(typescript@5.9.3)
'@push.rocks/qenv': '@push.rocks/qenv':
specifier: ^6.1.3 specifier: ^6.1.3
version: 6.1.3 version: 6.1.3
'@types/node': '@types/node':
specifier: ^22.15.17 specifier: ^25.0.9
version: 22.15.17 version: 25.0.9
typescript: typescript:
specifier: ^5.9.3 specifier: ^5.9.3
version: 5.9.3 version: 5.9.3
@@ -813,8 +816,8 @@ packages:
resolution: {integrity: sha512-NEcnsjvlC1o3Z6SS3VhKCf6Ev+Sh4EAinmggslrIR/ppMrvjDbXNFXoyr3PB+GLeSAR0JRZ1fGvVYjpEzjBdIg==} resolution: {integrity: sha512-NEcnsjvlC1o3Z6SS3VhKCf6Ev+Sh4EAinmggslrIR/ppMrvjDbXNFXoyr3PB+GLeSAR0JRZ1fGvVYjpEzjBdIg==}
hasBin: true hasBin: true
'@git.zone/tstest@3.1.4': '@git.zone/tstest@3.1.6':
resolution: {integrity: sha512-S7kubbb0yLYOh/QAzFsjG6a20lZiyNKo4pt0yK1yvd9I7X8Rw6/mCT/BicLkan7G7Nk7scUfxaK9+aFsHmdQdw==} resolution: {integrity: sha512-xRGc6wO4rJ6mohPCMIBDRH+oNjiIvX6Jeo8v/Y5o5VyKSHFmqol7FCKSBrojMcqgBpESnLHFPJAAOmT9W3JV8Q==}
hasBin: true hasBin: true
'@happy-dom/global-registrator@15.11.7': '@happy-dom/global-registrator@15.11.7':
@@ -1132,6 +1135,9 @@ packages:
'@lit/reactive-element@2.1.1': '@lit/reactive-element@2.1.1':
resolution: {integrity: sha512-N+dm5PAYdQ8e6UlywyyrgI2t++wFGXfHx+dSJ1oBrg6FAxUj40jId++EaRm80MKX5JnlH1sBsyZ5h0bcZKemCg==} resolution: {integrity: sha512-N+dm5PAYdQ8e6UlywyyrgI2t++wFGXfHx+dSJ1oBrg6FAxUj40jId++EaRm80MKX5JnlH1sBsyZ5h0bcZKemCg==}
'@mistralai/mistralai@1.12.0':
resolution: {integrity: sha512-oDr1hcS3wsIT/QupBG93TNiA5kilwBYoAIyl5BNYqMM2Ix/xsNq+wT8b++uhp/GTUMx44n+8Bn1mkATbwxe6bQ==}
'@mixmark-io/domino@2.2.0': '@mixmark-io/domino@2.2.0':
resolution: {integrity: sha512-Y28PR25bHXUg88kCV7nivXrP2Nj2RueZ3/l/jdx6J9f8J4nsEGcgX0Qe6lt7Pa+J79+kPiJU3LguR6O/6zrLOw==} resolution: {integrity: sha512-Y28PR25bHXUg88kCV7nivXrP2Nj2RueZ3/l/jdx6J9f8J4nsEGcgX0Qe6lt7Pa+J79+kPiJU3LguR6O/6zrLOw==}
@@ -2491,8 +2497,11 @@ packages:
'@types/node@16.9.1': '@types/node@16.9.1':
resolution: {integrity: sha512-QpLcX9ZSsq3YYUUnD3nFDY8H7wctAhQj/TFKL8Ya8v5fMm3CFXxo8zStsLAl780ltoYoo1WvKUVGBQK+1ifr7g==} resolution: {integrity: sha512-QpLcX9ZSsq3YYUUnD3nFDY8H7wctAhQj/TFKL8Ya8v5fMm3CFXxo8zStsLAl780ltoYoo1WvKUVGBQK+1ifr7g==}
'@types/node@22.15.17': '@types/node@22.19.7':
resolution: {integrity: sha512-wIX2aSZL5FE+MR0JlvF87BNVrtFWf6AE6rxSE9X7OwnVvoyCQjpzSRJ+M87se/4QCkCiebQAqrJ0y6fwIyi7nw==} resolution: {integrity: sha512-MciR4AKGHWl7xwxkBa6xUGxQJ4VBOmPTF7sL+iGzuahOFaO0jHCsuEfS80pan1ef4gWId1oWOweIhrDEYLuaOw==}
'@types/node@25.0.9':
resolution: {integrity: sha512-/rpCXHlCWeqClNBwUhDcusJxXYDjZTyE8v5oTO7WbL8eij2nKhUeU89/6xgjU7N4/Vh3He0BtyhJdQbDyhiXAw==}
'@types/ping@0.4.4': '@types/ping@0.4.4':
resolution: {integrity: sha512-ifvo6w2f5eJYlXm+HiVx67iJe8WZp87sfa683nlqED5Vnt9Z93onkokNoWqOG21EaE8fMxyKPobE+mkPEyxsdw==} resolution: {integrity: sha512-ifvo6w2f5eJYlXm+HiVx67iJe8WZp87sfa683nlqED5Vnt9Z93onkokNoWqOG21EaE8fMxyKPobE+mkPEyxsdw==}
@@ -4019,12 +4028,12 @@ packages:
resolution: {integrity: sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==} resolution: {integrity: sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==}
engines: {node: '>=12'} engines: {node: '>=12'}
openai@5.12.2: openai@6.16.0:
resolution: {integrity: sha512-xqzHHQch5Tws5PcKR2xsZGX9xtch+JQFz5zb14dGqlshmmDAFBFEWmeIpf7wVqWV+w7Emj7jRgkNJakyKE0tYQ==} resolution: {integrity: sha512-fZ1uBqjFUjXzbGc35fFtYKEOxd20kd9fDpFeqWtsOZWiubY8CZ1NAlXHW3iathaFvqmNtCWMIsosCuyeI7Joxg==}
hasBin: true hasBin: true
peerDependencies: peerDependencies:
ws: ^8.18.0 ws: ^8.18.0
zod: ^3.23.8 zod: ^3.25 || ^4.0
peerDependenciesMeta: peerDependenciesMeta:
ws: ws:
optional: true optional: true
@@ -4715,6 +4724,9 @@ packages:
undici-types@6.21.0: undici-types@6.21.0:
resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==} resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==}
undici-types@7.16.0:
resolution: {integrity: sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==}
unified@11.0.5: unified@11.0.5:
resolution: {integrity: sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==} resolution: {integrity: sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==}
@@ -4895,6 +4907,11 @@ packages:
resolution: {integrity: sha512-U/PBtDf35ff0D8X8D0jfdzHYEPFxAI7jJlxZXwCSez5M3190m+QobIfh+sWDWSHMCWWJN2AWamkegn6vr6YBTw==} resolution: {integrity: sha512-U/PBtDf35ff0D8X8D0jfdzHYEPFxAI7jJlxZXwCSez5M3190m+QobIfh+sWDWSHMCWWJN2AWamkegn6vr6YBTw==}
engines: {node: '>=18'} engines: {node: '>=18'}
zod-to-json-schema@3.25.1:
resolution: {integrity: sha512-pM/SU9d3YAggzi6MtR4h7ruuQlqKtad8e9S0fmxcMi+ueAK5Korys/aWcV9LIIHTVbj01NdzxcnXSN+O74ZIVA==}
peerDependencies:
zod: ^3.25 || ^4
zod@3.24.2: zod@3.24.2:
resolution: {integrity: sha512-lY7CDW43ECgW9u1TcT3IoXHflywfVqDYze4waEz812jR/bZ8FHDsl7pFQoSZTz5N+2NqRXs8GBwnAwo3ZNxqhQ==} resolution: {integrity: sha512-lY7CDW43ECgW9u1TcT3IoXHflywfVqDYze4waEz812jR/bZ8FHDsl7pFQoSZTz5N+2NqRXs8GBwnAwo3ZNxqhQ==}
@@ -6906,7 +6923,7 @@ snapshots:
'@push.rocks/smartshell': 3.3.0 '@push.rocks/smartshell': 3.3.0
tsx: 4.21.0 tsx: 4.21.0
'@git.zone/tstest@3.1.4(@aws-sdk/credential-providers@3.808.0)(socks@2.8.4)(typescript@5.9.3)': '@git.zone/tstest@3.1.6(@aws-sdk/credential-providers@3.808.0)(socks@2.8.4)(typescript@5.9.3)':
dependencies: dependencies:
'@api.global/typedserver': 3.0.80 '@api.global/typedserver': 3.0.80
'@git.zone/tsbundle': 2.8.1 '@git.zone/tsbundle': 2.8.1
@@ -7060,7 +7077,7 @@ snapshots:
'@inquirer/figures': 1.0.15 '@inquirer/figures': 1.0.15
'@inquirer/type': 2.0.0 '@inquirer/type': 2.0.0
'@types/mute-stream': 0.0.4 '@types/mute-stream': 0.0.4
'@types/node': 22.15.17 '@types/node': 22.19.7
'@types/wrap-ansi': 3.0.0 '@types/wrap-ansi': 3.0.0
ansi-escapes: 4.3.2 ansi-escapes: 4.3.2
cli-width: 4.1.0 cli-width: 4.1.0
@@ -7350,6 +7367,11 @@ snapshots:
dependencies: dependencies:
'@lit-labs/ssr-dom-shim': 1.4.0 '@lit-labs/ssr-dom-shim': 1.4.0
'@mistralai/mistralai@1.12.0':
dependencies:
zod: 3.25.76
zod-to-json-schema: 3.25.1(zod@3.25.76)
'@mixmark-io/domino@2.2.0': {} '@mixmark-io/domino@2.2.0': {}
'@module-federation/error-codes@0.22.0': {} '@module-federation/error-codes@0.22.0': {}
@@ -9635,27 +9657,27 @@ snapshots:
'@types/bn.js@5.2.0': '@types/bn.js@5.2.0':
dependencies: dependencies:
'@types/node': 22.15.17 '@types/node': 22.19.7
'@types/body-parser@1.19.6': '@types/body-parser@1.19.6':
dependencies: dependencies:
'@types/connect': 3.4.38 '@types/connect': 3.4.38
'@types/node': 22.15.17 '@types/node': 22.19.7
'@types/buffer-json@2.0.3': {} '@types/buffer-json@2.0.3': {}
'@types/clean-css@4.2.11': '@types/clean-css@4.2.11':
dependencies: dependencies:
'@types/node': 22.15.17 '@types/node': 22.19.7
source-map: 0.6.1 source-map: 0.6.1
'@types/connect@3.4.38': '@types/connect@3.4.38':
dependencies: dependencies:
'@types/node': 22.15.17 '@types/node': 22.19.7
'@types/cors@2.8.19': '@types/cors@2.8.19':
dependencies: dependencies:
'@types/node': 22.15.17 '@types/node': 22.19.7
'@types/debug@4.1.12': '@types/debug@4.1.12':
dependencies: dependencies:
@@ -9665,7 +9687,7 @@ snapshots:
'@types/dns-packet@5.6.5': '@types/dns-packet@5.6.5':
dependencies: dependencies:
'@types/node': 22.15.17 '@types/node': 22.19.7
'@types/elliptic@6.4.18': '@types/elliptic@6.4.18':
dependencies: dependencies:
@@ -9673,7 +9695,7 @@ snapshots:
'@types/express-serve-static-core@5.0.7': '@types/express-serve-static-core@5.0.7':
dependencies: dependencies:
'@types/node': 22.15.17 '@types/node': 22.19.7
'@types/qs': 6.14.0 '@types/qs': 6.14.0
'@types/range-parser': 1.2.7 '@types/range-parser': 1.2.7
'@types/send': 0.17.5 '@types/send': 0.17.5
@@ -9697,16 +9719,16 @@ snapshots:
'@types/fs-extra@11.0.4': '@types/fs-extra@11.0.4':
dependencies: dependencies:
'@types/jsonfile': 6.1.4 '@types/jsonfile': 6.1.4
'@types/node': 22.15.17 '@types/node': 22.19.7
'@types/fs-extra@9.0.13': '@types/fs-extra@9.0.13':
dependencies: dependencies:
'@types/node': 22.15.17 '@types/node': 22.19.7
'@types/glob@7.2.0': '@types/glob@7.2.0':
dependencies: dependencies:
'@types/minimatch': 5.1.2 '@types/minimatch': 5.1.2
'@types/node': 22.15.17 '@types/node': 22.19.7
'@types/hast@3.0.4': '@types/hast@3.0.4':
dependencies: dependencies:
@@ -9728,7 +9750,7 @@ snapshots:
'@types/jsonfile@6.1.4': '@types/jsonfile@6.1.4':
dependencies: dependencies:
'@types/node': 22.15.17 '@types/node': 22.19.7
'@types/mdast@4.0.4': '@types/mdast@4.0.4':
dependencies: dependencies:
@@ -9746,18 +9768,22 @@ snapshots:
'@types/mute-stream@0.0.4': '@types/mute-stream@0.0.4':
dependencies: dependencies:
'@types/node': 22.15.17 '@types/node': 22.19.7
'@types/node-forge@1.3.11': '@types/node-forge@1.3.11':
dependencies: dependencies:
'@types/node': 22.15.17 '@types/node': 22.19.7
'@types/node@16.9.1': {} '@types/node@16.9.1': {}
'@types/node@22.15.17': '@types/node@22.19.7':
dependencies: dependencies:
undici-types: 6.21.0 undici-types: 6.21.0
'@types/node@25.0.9':
dependencies:
undici-types: 7.16.0
'@types/ping@0.4.4': {} '@types/ping@0.4.4': {}
'@types/qs@6.14.0': {} '@types/qs@6.14.0': {}
@@ -9773,28 +9799,28 @@ snapshots:
'@types/send@0.17.5': '@types/send@0.17.5':
dependencies: dependencies:
'@types/mime': 1.3.5 '@types/mime': 1.3.5
'@types/node': 22.15.17 '@types/node': 22.19.7
'@types/serve-static@1.15.8': '@types/serve-static@1.15.8':
dependencies: dependencies:
'@types/http-errors': 2.0.5 '@types/http-errors': 2.0.5
'@types/node': 22.15.17 '@types/node': 22.19.7
'@types/send': 0.17.5 '@types/send': 0.17.5
'@types/serve-static@2.2.0': '@types/serve-static@2.2.0':
dependencies: dependencies:
'@types/http-errors': 2.0.5 '@types/http-errors': 2.0.5
'@types/node': 22.15.17 '@types/node': 22.19.7
'@types/symbol-tree@3.2.5': {} '@types/symbol-tree@3.2.5': {}
'@types/tar-stream@3.1.4': '@types/tar-stream@3.1.4':
dependencies: dependencies:
'@types/node': 22.15.17 '@types/node': 22.19.7
'@types/through2@2.0.41': '@types/through2@2.0.41':
dependencies: dependencies:
'@types/node': 22.15.17 '@types/node': 22.19.7
'@types/trusted-types@2.0.7': {} '@types/trusted-types@2.0.7': {}
@@ -9816,7 +9842,7 @@ snapshots:
'@types/whatwg-url@8.2.2': '@types/whatwg-url@8.2.2':
dependencies: dependencies:
'@types/node': 22.15.17 '@types/node': 22.19.7
'@types/webidl-conversions': 7.0.3 '@types/webidl-conversions': 7.0.3
'@types/which@3.0.4': {} '@types/which@3.0.4': {}
@@ -9825,11 +9851,11 @@ snapshots:
'@types/ws@8.18.1': '@types/ws@8.18.1':
dependencies: dependencies:
'@types/node': 22.15.17 '@types/node': 22.19.7
'@types/yauzl@2.10.3': '@types/yauzl@2.10.3':
dependencies: dependencies:
'@types/node': 22.15.17 '@types/node': 22.19.7
optional: true optional: true
'@ungap/structured-clone@1.3.0': {} '@ungap/structured-clone@1.3.0': {}
@@ -10289,7 +10315,7 @@ snapshots:
engine.io@6.6.4: engine.io@6.6.4:
dependencies: dependencies:
'@types/cors': 2.8.19 '@types/cors': 2.8.19
'@types/node': 22.15.17 '@types/node': 22.19.7
accepts: 1.3.8 accepts: 1.3.8
base64id: 2.0.0 base64id: 2.0.0
cookie: 0.7.2 cookie: 0.7.2
@@ -11556,7 +11582,7 @@ snapshots:
is-docker: 2.2.1 is-docker: 2.2.1
is-wsl: 2.2.0 is-wsl: 2.2.0
openai@5.12.2(ws@8.18.3)(zod@3.25.76): openai@6.16.0(ws@8.18.3)(zod@3.25.76):
optionalDependencies: optionalDependencies:
ws: 8.18.3 ws: 8.18.3
zod: 3.25.76 zod: 3.25.76
@@ -12351,6 +12377,8 @@ snapshots:
undici-types@6.21.0: {} undici-types@6.21.0: {}
undici-types@7.16.0: {}
unified@11.0.5: unified@11.0.5:
dependencies: dependencies:
'@types/unist': 3.0.3 '@types/unist': 3.0.3
@@ -12506,6 +12534,10 @@ snapshots:
yoctocolors-cjs@2.1.3: {} yoctocolors-cjs@2.1.3: {}
zod-to-json-schema@3.25.1(zod@3.25.76):
dependencies:
zod: 3.25.76
zod@3.24.2: {} zod@3.24.2: {}
zod@3.25.76: {} zod@3.25.76: {}

View File

@@ -3,8 +3,10 @@
## Dependencies ## Dependencies
- Uses `@git.zone/tstest` v3.x for testing (import from `@git.zone/tstest/tapbundle`) - Uses `@git.zone/tstest` v3.x for testing (import from `@git.zone/tstest/tapbundle`)
- `@push.rocks/smartfile` is kept at v11 to avoid migration to factory pattern - `@push.rocks/smartfs` v1.x for file system operations
- `@anthropic-ai/sdk` v0.71.x with extended thinking support - `@anthropic-ai/sdk` v0.71.x with extended thinking support
- `@mistralai/mistralai` v1.x for Mistral OCR and chat capabilities
- `openai` v6.x for OpenAI API integration
- `@push.rocks/smartrequest` v5.x - uses `response.stream()` + `Readable.fromWeb()` for streaming - `@push.rocks/smartrequest` v5.x - uses `response.stream()` + `Readable.fromWeb()` for streaming
## Important Notes ## Important Notes
@@ -12,11 +14,52 @@
- When extended thinking is enabled, temperature parameter must NOT be set (or set to 1) - When extended thinking is enabled, temperature parameter must NOT be set (or set to 1)
- The `streamNode()` method was removed in smartrequest v5, use `response.stream()` with `Readable.fromWeb()` instead - The `streamNode()` method was removed in smartrequest v5, use `response.stream()` with `Readable.fromWeb()` instead
## Anthropic Extended Thinking Feature ## Provider Capabilities Summary
| Provider | Chat | Stream | TTS | Vision | Documents | Research | Images |
|--------------|------|--------|-----|--------|-----------|----------|--------|
| OpenAI | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Anthropic | ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ❌ |
| Mistral | ✅ | ✅ | ❌ | ✅ | ✅ | ❌ | ❌ |
| ElevenLabs | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ |
| Ollama | ✅ | ✅ | ❌ | ✅ | ✅ | ❌ | ❌ |
| XAI | ✅ | ✅ | ❌ | ❌ | ✅ | ❌ | ❌ |
| Perplexity | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ❌ |
| Groq | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Exo | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ |
## Mistral Provider Integration
### Overview ### Overview
The Anthropic provider now supports extended thinking by default across all methods. Extended thinking enables Claude to spend more time reasoning about complex problems before generating responses, leading to higher quality answers for difficult questions. The Mistral provider supports:
- **Document AI** via Mistral OCR (December 2025) - native PDF processing without image conversion
- **Chat capabilities** using Mistral's chat models (`mistral-large-latest`, etc.)
### Key Advantage: Native PDF Support
Unlike other providers that require converting PDFs to images (using SmartPdf), Mistral OCR natively accepts PDF documents as base64-encoded data. This makes document processing potentially faster and more accurate for text extraction.
### Configuration
```typescript
import * as smartai from '@push.rocks/smartai';
const provider = new smartai.MistralProvider({
mistralToken: 'your-token-here',
chatModel: 'mistral-large-latest', // default
ocrModel: 'mistral-ocr-latest', // default
tableFormat: 'markdown', // 'markdown' or 'html'
});
await provider.start();
```
### API Key
Tests require `MISTRAL_API_KEY` in `.nogit/env.json`.
## Anthropic Extended Thinking Feature
### Configuration ### Configuration
@@ -33,8 +76,6 @@ const provider = new smartai.AnthropicProvider({
### Thinking Modes ### Thinking Modes
The `extendedThinking` parameter accepts four modes:
| Mode | Budget Tokens | Use Case | | Mode | Budget Tokens | Use Case |
| ---------- | ------------- | ----------------------------------------------- | | ---------- | ------------- | ----------------------------------------------- |
| `'quick'` | 2,048 | Lightweight reasoning for simple queries | | `'quick'` | 2,048 | Lightweight reasoning for simple queries |
@@ -42,141 +83,13 @@ The `extendedThinking` parameter accepts four modes:
| `'deep'` | 16,000 | Complex reasoning for difficult problems | | `'deep'` | 16,000 | Complex reasoning for difficult problems |
| `'off'` | 0 | Disable extended thinking | | `'off'` | 0 | Disable extended thinking |
**Default Behavior**: If `extendedThinking` is not specified, it defaults to `'normal'` mode (8,000 tokens). ### Implementation Details
### Supported Methods - Extended thinking is implemented via `getThinkingConfig()` private method
- When thinking is enabled, temperature must NOT be set
- Uses `claude-sonnet-4-5-20250929` model
Extended thinking is automatically applied to all Anthropic provider methods: ## Testing
- `chat()` - Synchronous chat
- `chatStream()` - Streaming chat
- `vision()` - Image analysis
- `document()` - PDF document processing
- `research()` - Web research with citations
### Token Budget Constraints
**Important**: The thinking budget must be less than `max_tokens` for the API call. The current `max_tokens` values are:
- `chatStream()`: 20,000 tokens (sufficient for all modes ✓)
- `chat()`: 20,000 tokens (sufficient for all modes ✓)
- `vision()`: 10,000 tokens (sufficient for all modes ✓)
- `document()`: 20,000 tokens (sufficient for all modes ✓)
- `research()`: 20,000 tokens for all searchDepth levels (sufficient ✓)
### Performance and Cost Implications
**Token Usage**:
- You are charged for the **full thinking tokens** generated, not just the summary
- Higher thinking budgets may result in more thorough reasoning but increased costs
- The budget is a **target**, not a strict limit - actual usage may vary
**Response Quality**:
- `'quick'`: Fast responses, basic reasoning
- `'normal'`: Good balance between quality and speed (recommended for most use cases)
- `'deep'`: Highest quality reasoning for complex problems, slower responses
**Recommendations**:
- Start with `'normal'` (default) for general usage
- Use `'deep'` for complex analytical tasks, philosophy, mathematics, or research
- Use `'quick'` for simple factual queries where deep reasoning isn't needed
- Use `'off'` only if you want traditional Claude behavior without extended thinking
### Usage Examples
#### Example 1: Default (Normal Mode)
```typescript
const provider = new smartai.AnthropicProvider({
anthropicToken: process.env.ANTHROPIC_TOKEN,
// extendedThinking defaults to 'normal'
});
await provider.start();
const response = await provider.chat({
systemMessage: 'You are a helpful assistant.',
userMessage: 'Explain the implications of quantum computing.',
messageHistory: [],
});
```
#### Example 2: Deep Thinking for Complex Analysis
```typescript
const provider = new smartai.AnthropicProvider({
anthropicToken: process.env.ANTHROPIC_TOKEN,
extendedThinking: 'deep', // 16,000 token budget
});
await provider.start();
const response = await provider.chat({
systemMessage: 'You are a philosopher and ethicist.',
userMessage: 'Analyze the trolley problem from multiple ethical frameworks.',
messageHistory: [],
});
```
#### Example 3: Quick Mode for Simple Queries
```typescript
const provider = new smartai.AnthropicProvider({
anthropicToken: process.env.ANTHROPIC_TOKEN,
extendedThinking: 'quick', // 2,048 token budget
});
await provider.start();
const response = await provider.chat({
systemMessage: 'You are a helpful assistant.',
userMessage: 'What is the capital of France?',
messageHistory: [],
});
```
#### Example 4: Disable Thinking
```typescript
const provider = new smartai.AnthropicProvider({
anthropicToken: process.env.ANTHROPIC_TOKEN,
extendedThinking: 'off', // No extended thinking
});
await provider.start();
const response = await provider.chat({
systemMessage: 'You are a helpful assistant.',
userMessage: 'Tell me a joke.',
messageHistory: [],
});
```
#### Example 5: Extended Thinking with Vision
```typescript
const provider = new smartai.AnthropicProvider({
anthropicToken: process.env.ANTHROPIC_TOKEN,
extendedThinking: 'normal',
});
await provider.start();
const imageBuffer = await fs.promises.readFile('./image.jpg');
const analysis = await provider.vision({
image: imageBuffer,
prompt: 'Analyze this image in detail and explain what you see.',
});
```
### Testing
Comprehensive tests for extended thinking are available in:
- `test/test.thinking.anthropic.ts` - Tests all thinking modes
Run tests with: Run tests with:
@@ -184,27 +97,8 @@ Run tests with:
pnpm test pnpm test
``` ```
Run specific thinking tests: Run specific tests:
```bash ```bash
npx tstest test/test.thinking.anthropic.ts --verbose npx tstest test/test.something.ts --verbose
``` ```
### API Reference
According to Anthropic's documentation:
- Extended thinking is supported on Claude Sonnet 4.5, 4, 3.7, Haiku 4.5, and Opus 4.1, 4
- The current model used is `claude-sonnet-4-5-20250929`
- Minimum thinking budget is 1,024 tokens
- Thinking budget must be less than `max_tokens`
### Implementation Details
The extended thinking feature is implemented via:
1. **Interface**: `IAnthropicProviderOptions.extendedThinking` property
2. **Helper Method**: `getThinkingConfig()` private method that maps modes to token budgets
3. **API Parameter**: Adds `thinking: { type: 'enabled', budget_tokens: number }` to all API calls
The thinking configuration is applied automatically to all API calls when the provider is instantiated.

304
readme.md
View File

@@ -6,7 +6,7 @@
[![TypeScript](https://img.shields.io/badge/TypeScript-5.x-blue.svg)](https://www.typescriptlang.org/) [![TypeScript](https://img.shields.io/badge/TypeScript-5.x-blue.svg)](https://www.typescriptlang.org/)
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
SmartAI unifies the world's leading AI providers - OpenAI, Anthropic, Perplexity, Ollama, Groq, XAI, Exo, and ElevenLabs - under a single, elegant TypeScript interface. Build AI applications at lightning speed without vendor lock-in. SmartAI unifies the world's leading AI providers OpenAI, Anthropic, Mistral, Perplexity, Ollama, Groq, XAI, Exo, and ElevenLabs under a single, elegant TypeScript interface. Build AI applications at lightning speed without vendor lock-in.
## Issue Reporting and Security ## Issue Reporting and Security
@@ -14,19 +14,23 @@ For reporting bugs, issues, or security vulnerabilities, please visit [community
## 🎯 Why SmartAI? ## 🎯 Why SmartAI?
- **🔌 Universal Interface**: Write once, run with any AI provider. Switch between GPT-4, Claude, Llama, or Grok with a single line change. - **🔌 Universal Interface**: Write once, run with any AI provider. Switch between GPT-5, Claude, Llama, or Grok with a single line change.
- **🛡️ Type-Safe**: Full TypeScript support with comprehensive type definitions for all operations - **🛡️ Type-Safe**: Full TypeScript support with comprehensive type definitions for all operations.
- **🌊 Streaming First**: Built for real-time applications with native streaming support - **🌊 Streaming First**: Built for real-time applications with native streaming support.
- **🎨 Multi-Modal**: Seamlessly work with text, images, audio, and documents - **🎨 Multi-Modal**: Seamlessly work with text, images, audio, and documents.
- **🏠 Local & Cloud**: Support for both cloud providers and local models via Ollama - **🏠 Local & Cloud**: Support for both cloud providers and local models via Ollama/Exo.
- **⚡ Zero Lock-In**: Your code remains portable across all AI providers - **⚡ Zero Lock-In**: Your code remains portable across all AI providers.
## 🚀 Quick Start ## 📦 Installation
```bash ```bash
npm install @push.rocks/smartai npm install @push.rocks/smartai
# or
pnpm install @push.rocks/smartai
``` ```
## 🚀 Quick Start
```typescript ```typescript
import { SmartAi } from '@push.rocks/smartai'; import { SmartAi } from '@push.rocks/smartai';
@@ -48,6 +52,8 @@ const response = await ai.openaiProvider.chat({
userMessage: 'Explain quantum computing in simple terms', userMessage: 'Explain quantum computing in simple terms',
messageHistory: [], messageHistory: [],
}); });
console.log(response.message);
``` ```
## 📊 Provider Capabilities Matrix ## 📊 Provider Capabilities Matrix
@@ -56,14 +62,15 @@ Choose the right provider for your use case:
| Provider | Chat | Streaming | TTS | Vision | Documents | Research | Images | Highlights | | Provider | Chat | Streaming | TTS | Vision | Documents | Research | Images | Highlights |
| -------------- | :--: | :-------: | :-: | :----: | :-------: | :------: | :----: | --------------------------------------------------------------- | | -------------- | :--: | :-------: | :-: | :----: | :-------: | :------: | :----: | --------------------------------------------------------------- |
| **OpenAI** | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | gpt-image-1<br>• DALL-E 3<br>• Deep research API | | **OpenAI** | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | gpt-image-1 • DALL-E 3 • Deep Research API |
| **Anthropic** | ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ❌ | Claude Sonnet 4.5<br>• Superior reasoning<br>• Web search API | | **Anthropic** | ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ❌ | Claude Sonnet 4.5 • Extended Thinking • Web Search API |
| **ElevenLabs** | | | | | | ❌ | ❌ | • Premium TTS<br>• 70+ languages<br>• Natural voices | | **Mistral** | | | | | | ❌ | ❌ | Native PDF OCR • mistral-large • Fast inference |
| **Ollama** | | | | | | ❌ | ❌ | • 100% local<br>• Privacy-first<br>• No API costs | | **ElevenLabs** | | | | | | ❌ | ❌ | Premium TTS • 70+ languages • v3 model |
| **XAI** | ✅ | ✅ | ❌ | | ✅ | ❌ | ❌ | • Grok models<br>• Real-time data<br>• Uncensored | | **Ollama** | ✅ | ✅ | ❌ | | ✅ | ❌ | ❌ | 100% local • Privacy-first • No API costs |
| **Perplexity** | ✅ | ✅ | ❌ | ❌ | | | ❌ | • Web-aware<br>• Research-focused<br>• Sonar Pro models | | **XAI** | ✅ | ✅ | ❌ | ❌ | | | ❌ | Grok 2 • Real-time data |
| **Groq** | ✅ | ✅ | ❌ | ❌ | ❌ | | ❌ | • 10x faster<br>• LPU inference<br>• Low latency | | **Perplexity** | ✅ | ✅ | ❌ | ❌ | ❌ | | ❌ | Web-aware • Research-focused • Sonar Pro |
| **Exo** | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | • Distributed<br>• P2P compute<br>• Decentralized | | **Groq** | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | 10x faster • LPU inference • Llama 3.3 |
| **Exo** | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | Distributed • P2P compute • Decentralized |
## 🎮 Core Features ## 🎮 Core Features
@@ -72,9 +79,9 @@ Choose the right provider for your use case:
Works identically across all providers: Works identically across all providers:
```typescript ```typescript
// Use GPT-4 for complex reasoning // Use GPT-5 for complex reasoning
const gptResponse = await ai.openaiProvider.chat({ const gptResponse = await ai.openaiProvider.chat({
systemMessage: 'You are a expert physicist.', systemMessage: 'You are an expert physicist.',
userMessage: 'Explain the implications of quantum entanglement', userMessage: 'Explain the implications of quantum entanglement',
messageHistory: [], messageHistory: [],
}); });
@@ -127,20 +134,17 @@ const audioStream = await ai.openaiProvider.audio({
const elevenLabsAudio = await ai.elevenlabsProvider.audio({ const elevenLabsAudio = await ai.elevenlabsProvider.audio({
message: 'Experience the most lifelike text to speech technology.', message: 'Experience the most lifelike text to speech technology.',
voiceId: '19STyYD15bswVz51nqLf', // Optional: Samara voice voiceId: '19STyYD15bswVz51nqLf', // Optional: Samara voice
modelId: 'eleven_v3', // Optional: defaults to eleven_v3 (70+ languages, most expressive) modelId: 'eleven_v3', // Optional: defaults to eleven_v3 (70+ languages)
voiceSettings: { voiceSettings: {
// Optional: fine-tune voice characteristics // Optional: fine-tune voice characteristics
stability: 0.5, // 0-1: Speech consistency stability: 0.5, // 0-1: Speech consistency
similarity_boost: 0.8, // 0-1: Voice similarity to original similarity_boost: 0.8, // 0-1: Voice similarity to original
style: 0.0, // 0-1: Expressiveness (higher = more expressive) style: 0.0, // 0-1: Expressiveness
use_speaker_boost: true, // Enhanced clarity use_speaker_boost: true, // Enhanced clarity
}, },
}); });
// Stream directly to speakers // Stream directly to speakers or save to file
audioStream.pipe(speakerOutput);
// Or save to file
audioStream.pipe(fs.createWriteStream('welcome.mp3')); audioStream.pipe(fs.createWriteStream('welcome.mp3'));
``` ```
@@ -157,7 +161,7 @@ const gptVision = await ai.openaiProvider.vision({
prompt: 'Describe this product and suggest marketing angles', prompt: 'Describe this product and suggest marketing angles',
}); });
// Anthropic: Detailed analysis // Anthropic: Detailed analysis with extended thinking
const claudeVision = await ai.anthropicProvider.vision({ const claudeVision = await ai.anthropicProvider.vision({
image, image,
prompt: 'Identify any safety concerns or defects', prompt: 'Identify any safety concerns or defects',
@@ -178,7 +182,7 @@ Extract insights from PDFs with AI:
const contract = fs.readFileSync('contract.pdf'); const contract = fs.readFileSync('contract.pdf');
const invoice = fs.readFileSync('invoice.pdf'); const invoice = fs.readFileSync('invoice.pdf');
// Analyze documents // Analyze documents with OpenAI
const analysis = await ai.openaiProvider.document({ const analysis = await ai.openaiProvider.document({
systemMessage: 'You are a legal expert.', systemMessage: 'You are a legal expert.',
userMessage: 'Compare these documents and highlight key differences', userMessage: 'Compare these documents and highlight key differences',
@@ -186,7 +190,7 @@ const analysis = await ai.openaiProvider.document({
pdfDocuments: [contract, invoice], pdfDocuments: [contract, invoice],
}); });
// Multi-document analysis // Multi-document analysis with Anthropic
const taxDocs = [form1099, w2, receipts]; const taxDocs = [form1099, w2, receipts];
const taxAnalysis = await ai.anthropicProvider.document({ const taxAnalysis = await ai.anthropicProvider.document({
systemMessage: 'You are a tax advisor.', systemMessage: 'You are a tax advisor.',
@@ -212,6 +216,8 @@ console.log(deepResearch.answer);
console.log('Sources:', deepResearch.sources); console.log('Sources:', deepResearch.sources);
// Anthropic Web Search - Domain-filtered research // Anthropic Web Search - Domain-filtered research
import { AnthropicProvider } from '@push.rocks/smartai';
const anthropic = new AnthropicProvider({ const anthropic = new AnthropicProvider({
anthropicToken: 'sk-ant-...', anthropicToken: 'sk-ant-...',
enableWebSearch: true, enableWebSearch: true,
@@ -232,14 +238,14 @@ const perplexityResearch = await ai.perplexityProvider.research({
**Research Options:** **Research Options:**
- `searchDepth`: 'basic' | 'advanced' | 'deep' - `searchDepth`: `'basic'` | `'advanced'` | `'deep'`
- `maxSources`: Number of sources to include - `maxSources`: Number of sources to include
- `includeWebSearch`: Enable web search (OpenAI) - `includeWebSearch`: Enable web search (OpenAI)
- `background`: Run as background task (OpenAI) - `background`: Run as background task (OpenAI)
**Supported Providers:** **Supported Providers:**
- **OpenAI**: Deep Research API with specialized models (`o3-deep-research-2025-06-26`, `o4-mini-deep-research-2025-06-26`) - **OpenAI**: Deep Research API with specialized models (`o3-deep-research-*`, `o4-mini-deep-research-*`)
- **Anthropic**: Web Search API with domain filtering - **Anthropic**: Web Search API with domain filtering
- **Perplexity**: Sonar and Sonar Pro models with built-in citations - **Perplexity**: Sonar and Sonar Pro models with built-in citations
@@ -268,12 +274,12 @@ const response = await anthropic.chat({
**Thinking Modes:** **Thinking Modes:**
| Mode | Budget Tokens | Use Case | | Mode | Budget Tokens | Use Case |
|------|---------------|----------| | ---------- | ------------- | ------------------------------------------------ |
| `'quick'` | 2,048 | Lightweight reasoning for simple queries | | `'quick'` | 2,048 | Lightweight reasoning for simple queries |
| `'normal'` | 8,000 | **Default** - Balanced reasoning for most tasks | | `'normal'` | 8,000 | **Default** Balanced reasoning for most tasks |
| `'deep'` | 16,000 | Complex reasoning for difficult problems | | `'deep'` | 16,000 | Complex reasoning for difficult problems |
| `'off'` | 0 | Disable extended thinking | | `'off'` | 0 | Disable extended thinking |
**Best Practices:** **Best Practices:**
@@ -282,6 +288,39 @@ const response = await anthropic.chat({
- Use `'quick'` for simple factual queries where deep reasoning isn't needed - Use `'quick'` for simple factual queries where deep reasoning isn't needed
- Thinking budget counts against total token usage - Thinking budget counts against total token usage
### 📑 Native PDF OCR (Mistral)
Mistral provides native PDF document processing via their OCR API — no image conversion required:
```typescript
import { MistralProvider } from '@push.rocks/smartai';
const mistral = new MistralProvider({
mistralToken: 'your-api-key',
chatModel: 'mistral-large-latest', // Default
ocrModel: 'mistral-ocr-latest', // Default
tableFormat: 'markdown', // 'markdown' | 'html'
});
await mistral.start();
// Direct PDF processing - no image conversion overhead
const result = await mistral.document({
systemMessage: 'You are a document analyst.',
userMessage: 'Extract all invoice details and calculate the total.',
pdfDocuments: [invoicePdfBuffer],
messageHistory: [],
});
```
**Key Advantage**: Unlike other providers that convert PDFs to images first, Mistral's OCR API processes PDFs natively, potentially offering faster and more accurate text extraction for document-heavy workloads.
**Supported Formats:**
- Native PDF processing via Files API
- Image OCR (JPEG, PNG, GIF, WebP) for vision tasks
- Table extraction with markdown or HTML output
### 🎨 Image Generation & Editing ### 🎨 Image Generation & Editing
Generate and edit images with OpenAI's cutting-edge models: Generate and edit images with OpenAI's cutting-edge models:
@@ -348,14 +387,14 @@ const editedImage = await ai.openaiProvider.imageEdit({
**Image Generation Options:** **Image Generation Options:**
- `model`: 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' - `model`: `'gpt-image-1'` | `'dall-e-3'` | `'dall-e-2'`
- `quality`: 'low' | 'medium' | 'high' | 'auto' - `quality`: `'low'` | `'medium'` | `'high'` | `'auto'`
- `size`: Multiple aspect ratios up to 4096×4096 - `size`: Multiple aspect ratios up to 4096×4096
- `background`: 'transparent' | 'opaque' | 'auto' - `background`: `'transparent'` | `'opaque'` | `'auto'`
- `outputFormat`: 'png' | 'jpeg' | 'webp' - `outputFormat`: `'png'` | `'jpeg'` | `'webp'`
- `outputCompression`: 0-100 for webp/jpeg - `outputCompression`: 0100 for webp/jpeg
- `moderation`: 'low' | 'auto' - `moderation`: `'low'` | `'auto'`
- `n`: Number of images (1-10) - `n`: Number of images (110)
**gpt-image-1 Advantages:** **gpt-image-1 Advantages:**
@@ -391,7 +430,7 @@ await inputWriter.write('Now show me how to make it thread-safe');
```typescript ```typescript
const supportBot = new SmartAi({ const supportBot = new SmartAi({
anthropicToken: process.env.ANTHROPIC_KEY // Claude for empathetic responses anthropicToken: process.env.ANTHROPIC_KEY, // Claude for empathetic responses
}); });
async function handleCustomerQuery(query: string, history: ChatMessage[]) { async function handleCustomerQuery(query: string, history: ChatMessage[]) {
@@ -400,13 +439,13 @@ async function handleCustomerQuery(query: string, history: ChatMessage[]) {
systemMessage: `You are a helpful customer support agent. systemMessage: `You are a helpful customer support agent.
Be empathetic, professional, and solution-oriented.`, Be empathetic, professional, and solution-oriented.`,
userMessage: query, userMessage: query,
messageHistory: history messageHistory: history,
}); });
return response.message; return response.message;
} catch (error) { } catch (error) {
// Fallback to another provider if needed // Fallback to another provider if needed
return await supportBot.openaiProvider.chat({...}); return await supportBot.openaiProvider.chat({ /* ... */ });
} }
} }
``` ```
@@ -419,8 +458,6 @@ const codeReviewer = new SmartAi({
}); });
async function reviewCode(code: string, language: string) { async function reviewCode(code: string, language: string) {
const startTime = Date.now();
const review = await codeReviewer.groqProvider.chat({ const review = await codeReviewer.groqProvider.chat({
systemMessage: `You are a ${language} expert. Review code for: systemMessage: `You are a ${language} expert. Review code for:
- Security vulnerabilities - Security vulnerabilities
@@ -431,7 +468,6 @@ async function reviewCode(code: string, language: string) {
messageHistory: [], messageHistory: [],
}); });
console.log(`Review completed in ${Date.now() - startTime}ms`);
return review.message; return review.message;
} }
``` ```
@@ -445,14 +481,15 @@ const researcher = new SmartAi({
async function research(topic: string) { async function research(topic: string) {
// Perplexity excels at web-aware research // Perplexity excels at web-aware research
const findings = await researcher.perplexityProvider.chat({ const findings = await researcher.perplexityProvider.research({
systemMessage: query: `Research the latest developments in ${topic}`,
'You are a research assistant. Provide factual, cited information.', searchDepth: 'deep',
userMessage: `Research the latest developments in ${topic}`,
messageHistory: [],
}); });
return findings.message; return {
answer: findings.answer,
sources: findings.sources,
};
} }
``` ```
@@ -489,23 +526,26 @@ async function analyzeSensitiveDoc(pdfBuffer: Buffer) {
class SmartAIRouter { class SmartAIRouter {
constructor(private ai: SmartAi) {} constructor(private ai: SmartAi) {}
async query(message: string, requirements: { async query(
speed?: boolean; message: string,
accuracy?: boolean; requirements: {
cost?: boolean; speed?: boolean;
privacy?: boolean; accuracy?: boolean;
}) { cost?: boolean;
privacy?: boolean;
}
) {
if (requirements.privacy) { if (requirements.privacy) {
return this.ai.ollamaProvider.chat({...}); // Local only return this.ai.ollamaProvider.chat({ /* ... */ }); // Local only
} }
if (requirements.speed) { if (requirements.speed) {
return this.ai.groqProvider.chat({...}); // 10x faster return this.ai.groqProvider.chat({ /* ... */ }); // 10x faster
} }
if (requirements.accuracy) { if (requirements.accuracy) {
return this.ai.anthropicProvider.chat({...}); // Best reasoning return this.ai.anthropicProvider.chat({ /* ... */ }); // Best reasoning
} }
// Default fallback // Default fallback
return this.ai.openaiProvider.chat({...}); return this.ai.openaiProvider.chat({ /* ... */ });
} }
} }
``` ```
@@ -516,7 +556,7 @@ class SmartAIRouter {
// Don't wait for the entire response // Don't wait for the entire response
async function streamResponse(userQuery: string) { async function streamResponse(userQuery: string) {
const stream = await ai.openaiProvider.chatStream( const stream = await ai.openaiProvider.chatStream(
createInputStream(userQuery), createInputStream(userQuery)
); );
// Process tokens as they arrive // Process tokens as they arrive
@@ -533,9 +573,9 @@ async function streamResponse(userQuery: string) {
// Get the best answer from multiple AIs // Get the best answer from multiple AIs
async function consensusQuery(question: string) { async function consensusQuery(question: string) {
const providers = [ const providers = [
ai.openaiProvider.chat({...}), ai.openaiProvider.chat({ /* ... */ }),
ai.anthropicProvider.chat({...}), ai.anthropicProvider.chat({ /* ... */ }),
ai.perplexityProvider.chat({...}) ai.perplexityProvider.chat({ /* ... */ }),
]; ];
const responses = await Promise.all(providers); const responses = await Promise.all(providers);
@@ -543,21 +583,61 @@ async function consensusQuery(question: string) {
} }
``` ```
## 🛠️ Advanced Features ## 🛠️ Advanced Configuration
### Custom Streaming Transformations ### Provider-Specific Options
```typescript ```typescript
// Add real-time translation const ai = new SmartAi({
const translationStream = new TransformStream({ // OpenAI
async transform(chunk, controller) { openaiToken: 'sk-...',
const translated = await translateChunk(chunk);
controller.enqueue(translated); // Anthropic with extended thinking
anthropicToken: 'sk-ant-...',
// Perplexity for research
perplexityToken: 'pplx-...',
// Groq for speed
groqToken: 'gsk_...',
// Mistral with OCR settings
mistralToken: 'your-key',
mistral: {
chatModel: 'mistral-large-latest',
ocrModel: 'mistral-ocr-latest',
tableFormat: 'markdown',
},
// XAI (Grok)
xaiToken: 'xai-...',
// ElevenLabs TTS
elevenlabsToken: 'sk-...',
elevenlabs: {
defaultVoiceId: '19STyYD15bswVz51nqLf',
defaultModelId: 'eleven_v3',
},
// Ollama (local)
ollama: {
baseUrl: 'http://localhost:11434',
model: 'llama2',
visionModel: 'llava',
defaultOptions: {
num_ctx: 4096,
temperature: 0.7,
top_p: 0.9,
},
defaultTimeout: 120000,
},
// Exo (distributed)
exo: {
baseUrl: 'http://localhost:8080/v1',
apiKey: 'optional-key',
}, },
}); });
const responseStream = await ai.openaiProvider.chatStream(input);
const translatedStream = responseStream.pipeThrough(translationStream);
``` ```
### Error Handling & Fallbacks ### Error Handling & Fallbacks
@@ -580,83 +660,27 @@ class ResilientAI {
} }
``` ```
### Token Counting & Cost Management
```typescript
// Track usage across providers
class UsageTracker {
async trackedChat(provider: string, options: ChatOptions) {
const start = Date.now();
const response = await ai[`${provider}Provider`].chat(options);
const usage = {
provider,
duration: Date.now() - start,
inputTokens: estimateTokens(options),
outputTokens: estimateTokens(response.message),
};
await this.logUsage(usage);
return response;
}
}
```
## 📦 Installation & Setup
### Prerequisites
- Node.js 16+
- TypeScript 4.5+
- API keys for your chosen providers
### Environment Setup
```bash
# Install
npm install @push.rocks/smartai
# Set up environment variables
export OPENAI_API_KEY=sk-...
export ANTHROPIC_API_KEY=sk-ant-...
export PERPLEXITY_API_KEY=pplx-...
export ELEVENLABS_API_KEY=sk-...
# ... etc
```
### TypeScript Configuration
```json
{
"compilerOptions": {
"target": "ES2022",
"module": "NodeNext",
"lib": ["ES2022"],
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true
}
}
```
## 🎯 Choosing the Right Provider ## 🎯 Choosing the Right Provider
| Use Case | Recommended Provider | Why | | Use Case | Recommended Provider | Why |
| --------------------- | -------------------- | --------------------------------------------------------- | | --------------------- | -------------------- | --------------------------------------------------------- |
| **General Purpose** | OpenAI | Most features, stable, well-documented | | **General Purpose** | OpenAI | Most features, stable, well-documented |
| **Complex Reasoning** | Anthropic | Superior logical thinking, safer outputs | | **Complex Reasoning** | Anthropic | Superior logical thinking, extended thinking, safer |
| **Document OCR** | Mistral | Native PDF processing, no image conversion overhead |
| **Research & Facts** | Perplexity | Web-aware, provides citations | | **Research & Facts** | Perplexity | Web-aware, provides citations |
| **Deep Research** | OpenAI | Deep Research API with comprehensive analysis | | **Deep Research** | OpenAI | Deep Research API with comprehensive analysis |
| **Premium TTS** | ElevenLabs | Most natural voices, 70+ languages, superior quality (v3) | | **Premium TTS** | ElevenLabs | Most natural voices, 70+ languages, v3 model |
| **Speed Critical** | Groq | 10x faster inference, sub-second responses | | **Speed Critical** | Groq | 10x faster inference, sub-second responses |
| **Privacy Critical** | Ollama | 100% local, no data leaves your servers | | **Privacy Critical** | Ollama | 100% local, no data leaves your servers |
| **Real-time Data** | XAI | Access to current information | | **Real-time Data** | XAI | Grok with access to current information |
| **Cost Sensitive** | Ollama/Exo | Free (local) or distributed compute | | **Cost Sensitive** | Ollama/Exo | Free (local) or distributed compute |
## 📈 Roadmap ## 📈 Roadmap
- [x] Research & Web Search API - [x] Research & Web Search API
- [x] Image generation support (gpt-image-1, DALL-E 3, DALL-E 2) - [x] Image generation support (gpt-image-1, DALL-E 3, DALL-E 2)
- [x] Extended thinking (Anthropic)
- [x] Native PDF OCR (Mistral)
- [ ] Streaming function calls - [ ] Streaming function calls
- [ ] Voice input processing - [ ] Voice input processing
- [ ] Fine-tuning integration - [ ] Fine-tuning integration

View File

@@ -1,8 +1,9 @@
import { expect, tap } from '@git.zone/tstest/tapbundle'; import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as qenv from '@push.rocks/qenv'; import * as qenv from '@push.rocks/qenv';
import * as smartfile from '@push.rocks/smartfile'; import { SmartFs, SmartFsProviderNode } from '@push.rocks/smartfs';
const testQenv = new qenv.Qenv('./', './.nogit/'); const testQenv = new qenv.Qenv('./', './.nogit/');
const smartfs = new SmartFs(new SmartFsProviderNode());
import * as smartai from '../ts/index.js'; import * as smartai from '../ts/index.js';
@@ -27,7 +28,7 @@ tap.test('ElevenLabs Audio: should create audio response', async () => {
chunks.push(chunk as Uint8Array); chunks.push(chunk as Uint8Array);
} }
const audioBuffer = Buffer.concat(chunks); const audioBuffer = Buffer.concat(chunks);
await smartfile.fs.toFs(audioBuffer, './.nogit/testoutput_elevenlabs.mp3'); await smartfs.file('./.nogit/testoutput_elevenlabs.mp3').write(audioBuffer);
console.log(`Audio Buffer length: ${audioBuffer.length}`); console.log(`Audio Buffer length: ${audioBuffer.length}`);
expect(audioBuffer.length).toBeGreaterThan(0); expect(audioBuffer.length).toBeGreaterThan(0);
}); });
@@ -42,7 +43,7 @@ tap.test('ElevenLabs Audio: should create audio with custom voice', async () =>
chunks.push(chunk as Uint8Array); chunks.push(chunk as Uint8Array);
} }
const audioBuffer = Buffer.concat(chunks); const audioBuffer = Buffer.concat(chunks);
await smartfile.fs.toFs(audioBuffer, './.nogit/testoutput_elevenlabs_custom.mp3'); await smartfs.file('./.nogit/testoutput_elevenlabs_custom.mp3').write(audioBuffer);
console.log(`Audio Buffer length (custom voice): ${audioBuffer.length}`); console.log(`Audio Buffer length (custom voice): ${audioBuffer.length}`);
expect(audioBuffer.length).toBeGreaterThan(0); expect(audioBuffer.length).toBeGreaterThan(0);
}); });

View File

@@ -1,8 +1,9 @@
import { expect, tap } from '@git.zone/tstest/tapbundle'; import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as qenv from '@push.rocks/qenv'; import * as qenv from '@push.rocks/qenv';
import * as smartfile from '@push.rocks/smartfile'; import { SmartFs, SmartFsProviderNode } from '@push.rocks/smartfs';
const testQenv = new qenv.Qenv('./', './.nogit/'); const testQenv = new qenv.Qenv('./', './.nogit/');
const smartfs = new SmartFs(new SmartFsProviderNode());
import * as smartai from '../ts/index.js'; import * as smartai from '../ts/index.js';
@@ -26,7 +27,7 @@ tap.test('OpenAI Audio: should create audio response', async () => {
chunks.push(chunk as Uint8Array); chunks.push(chunk as Uint8Array);
} }
const audioBuffer = Buffer.concat(chunks); const audioBuffer = Buffer.concat(chunks);
await smartfile.fs.toFs(audioBuffer, './.nogit/testoutput.mp3'); await smartfs.file('./.nogit/testoutput.mp3').write(audioBuffer);
console.log(`Audio Buffer length: ${audioBuffer.length}`); console.log(`Audio Buffer length: ${audioBuffer.length}`);
// Assert that the resulting buffer is not empty. // Assert that the resulting buffer is not empty.
expect(audioBuffer.length).toBeGreaterThan(0); expect(audioBuffer.length).toBeGreaterThan(0);

66
test/test.chat.mistral.ts Normal file
View File

@@ -0,0 +1,66 @@
import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as qenv from '@push.rocks/qenv';
const testQenv = new qenv.Qenv('./', './.nogit/');
import * as smartai from '../ts/index.js';
let mistralProvider: smartai.MistralProvider;
tap.test('Mistral Chat: should create and start Mistral provider', async () => {
mistralProvider = new smartai.MistralProvider({
mistralToken: await testQenv.getEnvVarOnDemand('MISTRAL_API_KEY'),
});
await mistralProvider.start();
expect(mistralProvider).toBeInstanceOf(smartai.MistralProvider);
});
tap.test('Mistral Chat: should create chat response', async () => {
const userMessage = 'What is the capital of France? Answer in one word.';
const response = await mistralProvider.chat({
systemMessage: 'You are a helpful assistant. Be concise.',
userMessage: userMessage,
messageHistory: [],
});
console.log(`Mistral Chat - User: ${userMessage}`);
console.log(`Mistral Chat - Response: ${response.message}`);
expect(response.role).toEqual('assistant');
expect(response.message).toBeTruthy();
expect(response.message.toLowerCase()).toInclude('paris');
});
tap.test('Mistral Chat: should handle message history', async () => {
const messageHistory: smartai.ChatMessage[] = [
{ role: 'user', content: 'My name is Claude Test' },
{ role: 'assistant', content: 'Nice to meet you, Claude Test!' }
];
const response = await mistralProvider.chat({
systemMessage: 'You are a helpful assistant with good memory.',
userMessage: 'What is my name?',
messageHistory: messageHistory,
});
console.log(`Mistral Memory Test - Response: ${response.message}`);
expect(response.message.toLowerCase()).toInclude('claude test');
});
tap.test('Mistral Chat: should handle longer conversations', async () => {
const response = await mistralProvider.chat({
systemMessage: 'You are a helpful coding assistant.',
userMessage: 'Write a simple hello world function in TypeScript. Keep it brief.',
messageHistory: [],
});
console.log(`Mistral Coding Test - Response: ${response.message}`);
expect(response.message).toBeTruthy();
// Should contain some TypeScript/function code
expect(response.message).toInclude('function');
});
tap.test('Mistral Chat: should stop the provider', async () => {
await mistralProvider.stop();
});
export default tap.start();

View File

@@ -1,9 +1,10 @@
import { expect, tap } from '@git.zone/tstest/tapbundle'; import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as qenv from '@push.rocks/qenv'; import * as qenv from '@push.rocks/qenv';
import * as smartrequest from '@push.rocks/smartrequest'; import * as smartrequest from '@push.rocks/smartrequest';
import * as smartfile from '@push.rocks/smartfile'; import { SmartFs, SmartFsProviderNode } from '@push.rocks/smartfs';
const testQenv = new qenv.Qenv('./', './.nogit/'); const testQenv = new qenv.Qenv('./', './.nogit/');
const smartfs = new SmartFs(new SmartFsProviderNode());
import * as smartai from '../ts/index.js'; import * as smartai from '../ts/index.js';
@@ -41,7 +42,7 @@ tap.test('Anthropic Document: should handle complex document analysis', async ()
let pdfBuffer: Uint8Array; let pdfBuffer: Uint8Array;
try { try {
pdfBuffer = await smartfile.fs.toBuffer(pdfPath); pdfBuffer = await smartfs.file(pdfPath).read();
} catch (error) { } catch (error) {
// If the file doesn't exist, use the dummy PDF // If the file doesn't exist, use the dummy PDF
console.log('Demo PDF not found, using dummy PDF instead'); console.log('Demo PDF not found, using dummy PDF instead');

View File

@@ -0,0 +1,100 @@
import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as qenv from '@push.rocks/qenv';
import * as smartrequest from '@push.rocks/smartrequest';
import { SmartFs, SmartFsProviderNode } from '@push.rocks/smartfs';
const testQenv = new qenv.Qenv('./', './.nogit/');
const smartfs = new SmartFs(new SmartFsProviderNode());
import * as smartai from '../ts/index.js';
let mistralProvider: smartai.MistralProvider;
tap.test('Mistral Document: should create and start Mistral provider', async () => {
mistralProvider = new smartai.MistralProvider({
mistralToken: await testQenv.getEnvVarOnDemand('MISTRAL_API_KEY'),
tableFormat: 'markdown',
});
await mistralProvider.start();
expect(mistralProvider).toBeInstanceOf(smartai.MistralProvider);
});
tap.test('Mistral Document: should process a PDF document', async () => {
const pdfUrl = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf';
const pdfResponse = await smartrequest.SmartRequest.create()
.url(pdfUrl)
.get();
const result = await mistralProvider.document({
systemMessage: 'Classify the document. Only the following answers are allowed: "invoice", "bank account statement", "contract", "test document", "other". The answer should only contain the keyword for machine use.',
userMessage: 'Classify this document.',
messageHistory: [],
pdfDocuments: [Buffer.from(await pdfResponse.arrayBuffer())],
});
console.log(`Mistral Document - Result:`, result);
expect(result).toBeTruthy();
expect(result.message).toBeTruthy();
});
tap.test('Mistral Document: should handle complex document analysis', async () => {
// Test with the demo PDF if it exists
const pdfPath = './.nogit/demo_without_textlayer.pdf';
let pdfBuffer: Uint8Array;
try {
pdfBuffer = await smartfs.file(pdfPath).read();
} catch (error) {
// If the file doesn't exist, use the dummy PDF
console.log('Demo PDF not found, using dummy PDF instead');
const pdfUrl = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf';
const pdfResponse = await smartrequest.SmartRequest.create()
.url(pdfUrl)
.get();
pdfBuffer = Buffer.from(await pdfResponse.arrayBuffer());
}
const result = await mistralProvider.document({
systemMessage: `
Analyze this document and provide a JSON response with the following structure:
{
"documentType": "string",
"hasText": boolean,
"summary": "string"
}
`,
userMessage: 'Analyze this document.',
messageHistory: [],
pdfDocuments: [pdfBuffer],
});
console.log(`Mistral Complex Document Analysis:`, result);
expect(result).toBeTruthy();
expect(result.message).toBeTruthy();
});
tap.test('Mistral Document: should process multiple PDF documents', async () => {
const pdfUrl = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf';
const pdfResponse = await smartrequest.SmartRequest.create()
.url(pdfUrl)
.get();
const pdfBuffer = Buffer.from(await pdfResponse.arrayBuffer());
const result = await mistralProvider.document({
systemMessage: 'You are a document comparison assistant.',
userMessage: 'Are these two documents the same? Answer yes or no.',
messageHistory: [],
pdfDocuments: [pdfBuffer, pdfBuffer], // Same document twice for test
});
console.log(`Mistral Multi-Document - Result:`, result);
expect(result).toBeTruthy();
expect(result.message).toBeTruthy();
});
tap.test('Mistral Document: should stop the provider', async () => {
await mistralProvider.stop();
});
export default tap.start();

View File

@@ -1,9 +1,10 @@
import { expect, tap } from '@git.zone/tstest/tapbundle'; import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as qenv from '@push.rocks/qenv'; import * as qenv from '@push.rocks/qenv';
import * as smartrequest from '@push.rocks/smartrequest'; import * as smartrequest from '@push.rocks/smartrequest';
import * as smartfile from '@push.rocks/smartfile'; import { SmartFs, SmartFsProviderNode } from '@push.rocks/smartfs';
const testQenv = new qenv.Qenv('./', './.nogit/'); const testQenv = new qenv.Qenv('./', './.nogit/');
const smartfs = new SmartFs(new SmartFsProviderNode());
import * as smartai from '../ts/index.js'; import * as smartai from '../ts/index.js';
@@ -32,7 +33,7 @@ tap.test('OpenAI Document: should document a pdf', async () => {
}); });
tap.test('OpenAI Document: should recognize companies in a pdf', async () => { tap.test('OpenAI Document: should recognize companies in a pdf', async () => {
const pdfBuffer = await smartfile.fs.toBuffer('./.nogit/demo_without_textlayer.pdf'); const pdfBuffer = await smartfs.file('./.nogit/demo_without_textlayer.pdf').read();
const result = await testSmartai.openaiProvider.document({ const result = await testSmartai.openaiProvider.document({
systemMessage: ` systemMessage: `
summarize the document. summarize the document.

View File

@@ -1,8 +1,9 @@
import { expect, tap } from '@git.zone/tstest/tapbundle'; import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as qenv from '@push.rocks/qenv'; import * as qenv from '@push.rocks/qenv';
import * as smartfile from '@push.rocks/smartfile'; import { SmartFs, SmartFsProviderNode } from '@push.rocks/smartfs';
const testQenv = new qenv.Qenv('./', './.nogit/'); const testQenv = new qenv.Qenv('./', './.nogit/');
const smartfs = new SmartFs(new SmartFsProviderNode());
import * as smartai from '../ts/index.js'; import * as smartai from '../ts/index.js';
@@ -21,7 +22,7 @@ tap.test('Anthropic Vision: should analyze coffee image with latte art', async (
const imagePath = './test/testimages/coffee-dani/coffee.jpg'; const imagePath = './test/testimages/coffee-dani/coffee.jpg';
console.log(`Loading coffee image from: ${imagePath}`); console.log(`Loading coffee image from: ${imagePath}`);
const imageBuffer = await smartfile.fs.toBuffer(imagePath); const imageBuffer = await smartfs.file(imagePath).read();
console.log(`Image loaded, size: ${imageBuffer.length} bytes`); console.log(`Image loaded, size: ${imageBuffer.length} bytes`);
const result = await anthropicProvider.vision({ const result = await anthropicProvider.vision({
@@ -45,7 +46,7 @@ tap.test('Anthropic Vision: should analyze laptop/workspace image', async () =>
const imagePath = './test/testimages/laptop-nicolas/laptop.jpg'; const imagePath = './test/testimages/laptop-nicolas/laptop.jpg';
console.log(`Loading laptop image from: ${imagePath}`); console.log(`Loading laptop image from: ${imagePath}`);
const imageBuffer = await smartfile.fs.toBuffer(imagePath); const imageBuffer = await smartfs.file(imagePath).read();
console.log(`Image loaded, size: ${imageBuffer.length} bytes`); console.log(`Image loaded, size: ${imageBuffer.length} bytes`);
const result = await anthropicProvider.vision({ const result = await anthropicProvider.vision({
@@ -69,7 +70,7 @@ tap.test('Anthropic Vision: should analyze receipt/document image', async () =>
const imagePath = './test/testimages/receipt-annie/receipt.jpg'; const imagePath = './test/testimages/receipt-annie/receipt.jpg';
console.log(`Loading receipt image from: ${imagePath}`); console.log(`Loading receipt image from: ${imagePath}`);
const imageBuffer = await smartfile.fs.toBuffer(imagePath); const imageBuffer = await smartfs.file(imagePath).read();
console.log(`Image loaded, size: ${imageBuffer.length} bytes`); console.log(`Image loaded, size: ${imageBuffer.length} bytes`);
const result = await anthropicProvider.vision({ const result = await anthropicProvider.vision({

View File

@@ -3,6 +3,6 @@
*/ */
export const commitinfo = { export const commitinfo = {
name: '@push.rocks/smartai', name: '@push.rocks/smartai',
version: '0.9.0', version: '0.13.0',
description: 'SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.' description: 'SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.'
} }

View File

@@ -6,6 +6,10 @@ import * as plugins from './plugins.js';
export interface ChatMessage { export interface ChatMessage {
role: 'assistant' | 'user' | 'system'; role: 'assistant' | 'user' | 'system';
content: string; content: string;
/** Base64-encoded images for vision-capable models */
images?: string[];
/** Chain-of-thought reasoning for GPT-OSS models (e.g., Ollama) */
reasoning?: string;
} }
/** /**
@@ -15,6 +19,16 @@ export interface ChatOptions {
systemMessage: string; systemMessage: string;
userMessage: string; userMessage: string;
messageHistory: ChatMessage[]; messageHistory: ChatMessage[];
/** Base64-encoded images for the current message (vision-capable models) */
images?: string[];
}
/**
* Options for streaming chat interactions
*/
export interface StreamingChatOptions extends ChatOptions {
/** Callback fired for each token during generation */
onToken?: (token: string) => void;
} }
/** /**
@@ -23,6 +37,8 @@ export interface ChatOptions {
export interface ChatResponse { export interface ChatResponse {
role: 'assistant'; role: 'assistant';
message: string; message: string;
/** Chain-of-thought reasoning from reasoning models */
reasoning?: string;
} }
/** /**
@@ -161,6 +177,14 @@ export abstract class MultiModalModel {
*/ */
public abstract chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>>; public abstract chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>>;
/**
* Streaming chat with token callback
* Calls onToken for each token generated, returns final response
* @param optionsArg Options containing system message, user message, message history, and onToken callback
* @returns Promise resolving to the assistant's response
*/
public chatStreaming?(optionsArg: StreamingChatOptions): Promise<ChatResponse>;
/** /**
* Text-to-speech conversion * Text-to-speech conversion
* @param optionsArg Options containing the message to convert to speech * @param optionsArg Options containing the message to convert to speech

View File

@@ -84,6 +84,18 @@ export class Conversation {
return conversation; return conversation;
} }
public static async createWithMistral(smartaiRefArg: SmartAi) {
if (!smartaiRefArg.mistralProvider) {
throw new Error('Mistral provider not available');
}
const conversation = new Conversation(smartaiRefArg, {
processFunction: async (input) => {
return '' // TODO implement proper streaming
}
});
return conversation;
}
public static async createWithXai(smartaiRefArg: SmartAi) { public static async createWithXai(smartaiRefArg: SmartAi) {
if (!smartaiRefArg.xaiProvider) { if (!smartaiRefArg.xaiProvider) {
throw new Error('XAI provider not available'); throw new Error('XAI provider not available');

View File

@@ -2,6 +2,7 @@ import { Conversation } from './classes.conversation.js';
import * as plugins from './plugins.js'; import * as plugins from './plugins.js';
import { AnthropicProvider } from './provider.anthropic.js'; import { AnthropicProvider } from './provider.anthropic.js';
import { ElevenLabsProvider } from './provider.elevenlabs.js'; import { ElevenLabsProvider } from './provider.elevenlabs.js';
import { MistralProvider } from './provider.mistral.js';
import { OllamaProvider } from './provider.ollama.js'; import { OllamaProvider } from './provider.ollama.js';
import { OpenAiProvider } from './provider.openai.js'; import { OpenAiProvider } from './provider.openai.js';
import { PerplexityProvider } from './provider.perplexity.js'; import { PerplexityProvider } from './provider.perplexity.js';
@@ -15,16 +16,33 @@ export interface ISmartAiOptions {
anthropicToken?: string; anthropicToken?: string;
perplexityToken?: string; perplexityToken?: string;
groqToken?: string; groqToken?: string;
mistralToken?: string;
xaiToken?: string; xaiToken?: string;
elevenlabsToken?: string; elevenlabsToken?: string;
exo?: { exo?: {
baseUrl?: string; baseUrl?: string;
apiKey?: string; apiKey?: string;
}; };
mistral?: {
chatModel?: string;
ocrModel?: string;
tableFormat?: 'markdown' | 'html';
};
ollama?: { ollama?: {
baseUrl?: string; baseUrl?: string;
model?: string; model?: string;
visionModel?: string; visionModel?: string;
defaultOptions?: {
num_ctx?: number;
temperature?: number;
top_k?: number;
top_p?: number;
repeat_penalty?: number;
num_predict?: number;
stop?: string[];
seed?: number;
};
defaultTimeout?: number;
}; };
elevenlabs?: { elevenlabs?: {
defaultVoiceId?: string; defaultVoiceId?: string;
@@ -32,7 +50,7 @@ export interface ISmartAiOptions {
}; };
} }
export type TProvider = 'openai' | 'anthropic' | 'perplexity' | 'ollama' | 'exo' | 'groq' | 'xai' | 'elevenlabs'; export type TProvider = 'openai' | 'anthropic' | 'perplexity' | 'ollama' | 'exo' | 'groq' | 'mistral' | 'xai' | 'elevenlabs';
export class SmartAi { export class SmartAi {
public options: ISmartAiOptions; public options: ISmartAiOptions;
@@ -43,6 +61,7 @@ export class SmartAi {
public ollamaProvider: OllamaProvider; public ollamaProvider: OllamaProvider;
public exoProvider: ExoProvider; public exoProvider: ExoProvider;
public groqProvider: GroqProvider; public groqProvider: GroqProvider;
public mistralProvider: MistralProvider;
public xaiProvider: XAIProvider; public xaiProvider: XAIProvider;
public elevenlabsProvider: ElevenLabsProvider; public elevenlabsProvider: ElevenLabsProvider;
@@ -75,6 +94,15 @@ export class SmartAi {
}); });
await this.groqProvider.start(); await this.groqProvider.start();
} }
if (this.options.mistralToken) {
this.mistralProvider = new MistralProvider({
mistralToken: this.options.mistralToken,
chatModel: this.options.mistral?.chatModel,
ocrModel: this.options.mistral?.ocrModel,
tableFormat: this.options.mistral?.tableFormat,
});
await this.mistralProvider.start();
}
if (this.options.xaiToken) { if (this.options.xaiToken) {
this.xaiProvider = new XAIProvider({ this.xaiProvider = new XAIProvider({
xaiToken: this.options.xaiToken, xaiToken: this.options.xaiToken,
@@ -94,6 +122,8 @@ export class SmartAi {
baseUrl: this.options.ollama.baseUrl, baseUrl: this.options.ollama.baseUrl,
model: this.options.ollama.model, model: this.options.ollama.model,
visionModel: this.options.ollama.visionModel, visionModel: this.options.ollama.visionModel,
defaultOptions: this.options.ollama.defaultOptions,
defaultTimeout: this.options.ollama.defaultTimeout,
}); });
await this.ollamaProvider.start(); await this.ollamaProvider.start();
} }
@@ -119,6 +149,9 @@ export class SmartAi {
if (this.groqProvider) { if (this.groqProvider) {
await this.groqProvider.stop(); await this.groqProvider.stop();
} }
if (this.mistralProvider) {
await this.mistralProvider.stop();
}
if (this.xaiProvider) { if (this.xaiProvider) {
await this.xaiProvider.stop(); await this.xaiProvider.stop();
} }
@@ -150,6 +183,8 @@ export class SmartAi {
return Conversation.createWithOllama(this); return Conversation.createWithOllama(this);
case 'groq': case 'groq':
return Conversation.createWithGroq(this); return Conversation.createWithGroq(this);
case 'mistral':
return Conversation.createWithMistral(this);
case 'xai': case 'xai':
return Conversation.createWithXai(this); return Conversation.createWithXai(this);
case 'elevenlabs': case 'elevenlabs':

View File

@@ -4,6 +4,7 @@ export * from './provider.openai.js';
export * from './provider.anthropic.js'; export * from './provider.anthropic.js';
export * from './provider.perplexity.js'; export * from './provider.perplexity.js';
export * from './provider.groq.js'; export * from './provider.groq.js';
export * from './provider.mistral.js';
export * from './provider.ollama.js'; export * from './provider.ollama.js';
export * from './provider.xai.js'; export * from './provider.xai.js';
export * from './provider.exo.js'; export * from './provider.exo.js';

View File

@@ -8,7 +8,7 @@ export {
// @push.rocks scope // @push.rocks scope
import * as qenv from '@push.rocks/qenv'; import * as qenv from '@push.rocks/qenv';
import * as smartarray from '@push.rocks/smartarray'; import * as smartarray from '@push.rocks/smartarray';
import * as smartfile from '@push.rocks/smartfile'; import * as smartfs from '@push.rocks/smartfs';
import * as smartpath from '@push.rocks/smartpath'; import * as smartpath from '@push.rocks/smartpath';
import * as smartpdf from '@push.rocks/smartpdf'; import * as smartpdf from '@push.rocks/smartpdf';
import * as smartpromise from '@push.rocks/smartpromise'; import * as smartpromise from '@push.rocks/smartpromise';
@@ -18,7 +18,7 @@ import * as webstream from '@push.rocks/webstream';
export { export {
smartarray, smartarray,
qenv, qenv,
smartfile, smartfs,
smartpath, smartpath,
smartpdf, smartpdf,
smartpromise, smartpromise,
@@ -28,9 +28,11 @@ export {
// third party // third party
import * as anthropic from '@anthropic-ai/sdk'; import * as anthropic from '@anthropic-ai/sdk';
import * as mistralai from '@mistralai/mistralai';
import * as openai from 'openai'; import * as openai from 'openai';
export { export {
anthropic, anthropic,
mistralai,
openai, openai,
} }

352
ts/provider.mistral.ts Normal file
View File

@@ -0,0 +1,352 @@
import * as plugins from './plugins.js';
import { MultiModalModel } from './abstract.classes.multimodal.js';
import type {
ChatOptions,
ChatResponse,
ChatMessage,
ResearchOptions,
ResearchResponse,
ImageGenerateOptions,
ImageEditOptions,
ImageResponse
} from './abstract.classes.multimodal.js';
export interface IMistralProviderOptions {
mistralToken: string;
chatModel?: string; // default: 'mistral-large-latest'
ocrModel?: string; // default: 'mistral-ocr-latest'
tableFormat?: 'markdown' | 'html';
}
export class MistralProvider extends MultiModalModel {
private options: IMistralProviderOptions;
public mistralClient: plugins.mistralai.Mistral;
constructor(optionsArg: IMistralProviderOptions) {
super();
this.options = optionsArg;
}
async start() {
await super.start();
this.mistralClient = new plugins.mistralai.Mistral({
apiKey: this.options.mistralToken,
});
}
async stop() {
await super.stop();
}
/**
* Synchronous chat interaction using Mistral's chat API
*/
public async chat(optionsArg: ChatOptions): Promise<ChatResponse> {
// Convert message history to Mistral format
const messages: Array<{
role: 'system' | 'user' | 'assistant';
content: string;
}> = [];
// Add system message first
if (optionsArg.systemMessage) {
messages.push({
role: 'system',
content: optionsArg.systemMessage
});
}
// Add message history
for (const msg of optionsArg.messageHistory) {
messages.push({
role: msg.role === 'system' ? 'system' : msg.role === 'assistant' ? 'assistant' : 'user',
content: msg.content
});
}
// Add current user message
messages.push({
role: 'user',
content: optionsArg.userMessage
});
const result = await this.mistralClient.chat.complete({
model: this.options.chatModel || 'mistral-large-latest',
messages: messages,
});
// Extract content from response
const choice = result.choices?.[0];
let content = '';
if (choice?.message?.content) {
if (typeof choice.message.content === 'string') {
content = choice.message.content;
} else if (Array.isArray(choice.message.content)) {
// Handle array of content chunks
content = choice.message.content
.map((chunk: any) => {
if (typeof chunk === 'string') return chunk;
if (chunk && typeof chunk === 'object' && 'text' in chunk) return chunk.text;
return '';
})
.join('');
}
}
return {
role: 'assistant',
message: content,
};
}
/**
* Streaming chat using Mistral's streaming API
*/
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
const decoder = new TextDecoder();
let buffer = '';
const mistralClient = this.mistralClient;
const chatModel = this.options.chatModel || 'mistral-large-latest';
const transform = new TransformStream<Uint8Array, string>({
async transform(chunk, controller) {
buffer += decoder.decode(chunk, { stream: true });
// Try to parse complete JSON messages from the buffer
while (true) {
const newlineIndex = buffer.indexOf('\n');
if (newlineIndex === -1) break;
const line = buffer.slice(0, newlineIndex);
buffer = buffer.slice(newlineIndex + 1);
if (line.trim()) {
try {
const message = JSON.parse(line);
// Build messages array
const messages: Array<{
role: 'system' | 'user' | 'assistant';
content: string;
}> = [];
if (message.systemMessage) {
messages.push({
role: 'system',
content: message.systemMessage
});
}
messages.push({
role: message.role === 'assistant' ? 'assistant' : 'user',
content: message.content
});
// Use Mistral streaming
const stream = await mistralClient.chat.stream({
model: chatModel,
messages: messages,
});
// Process streaming events
for await (const event of stream) {
const delta = event.data?.choices?.[0]?.delta;
if (delta?.content) {
if (typeof delta.content === 'string') {
controller.enqueue(delta.content);
} else if (Array.isArray(delta.content)) {
for (const chunk of delta.content) {
if (typeof chunk === 'string') {
controller.enqueue(chunk);
} else if (chunk && typeof chunk === 'object' && 'text' in chunk) {
controller.enqueue((chunk as any).text);
}
}
}
}
}
} catch (e) {
console.error('Failed to parse message:', e);
}
}
}
},
flush(controller) {
if (buffer.trim()) {
try {
const message = JSON.parse(buffer);
controller.enqueue(message.content || '');
} catch (e) {
console.error('Failed to parse remaining buffer:', e);
}
}
}
});
return input.pipeThrough(transform);
}
/**
* Audio generation is not supported by Mistral
*/
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
throw new Error('Audio generation is not supported by Mistral. Please use ElevenLabs or OpenAI provider for audio generation.');
}
/**
* Vision using Mistral's OCR API for image analysis
*/
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
const base64Image = optionsArg.image.toString('base64');
// Detect image type from buffer header
let mimeType = 'image/jpeg';
if (optionsArg.image[0] === 0x89 && optionsArg.image[1] === 0x50) {
mimeType = 'image/png';
} else if (optionsArg.image[0] === 0x47 && optionsArg.image[1] === 0x49) {
mimeType = 'image/gif';
} else if (optionsArg.image[0] === 0x52 && optionsArg.image[1] === 0x49) {
mimeType = 'image/webp';
}
// Use OCR API with image data URL
const ocrResult = await this.mistralClient.ocr.process({
model: this.options.ocrModel || 'mistral-ocr-latest',
document: {
imageUrl: `data:${mimeType};base64,${base64Image}`,
type: 'image_url',
},
});
// Combine markdown from all pages
const extractedText = ocrResult.pages.map(page => page.markdown).join('\n\n');
// If a prompt is provided, use chat to analyze the extracted text
if (optionsArg.prompt && optionsArg.prompt.trim()) {
const chatResponse = await this.chat({
systemMessage: 'You are an assistant analyzing image content. The following is text extracted from an image using OCR.',
userMessage: `${optionsArg.prompt}\n\nExtracted content:\n${extractedText}`,
messageHistory: [],
});
return chatResponse.message;
}
return extractedText;
}
/**
* Document processing using Mistral's OCR API
* PDFs are uploaded via Files API first, then processed with OCR
*/
public async document(optionsArg: {
systemMessage: string;
userMessage: string;
pdfDocuments: Uint8Array[];
messageHistory: ChatMessage[];
}): Promise<{ message: any }> {
const extractedTexts: string[] = [];
const uploadedFileIds: string[] = [];
try {
// Process each PDF document using Mistral OCR
for (let i = 0; i < optionsArg.pdfDocuments.length; i++) {
const pdfDocument = optionsArg.pdfDocuments[i];
// Upload the PDF to Mistral's Files API first
const uploadResult = await this.mistralClient.files.upload({
file: {
fileName: `document_${i + 1}.pdf`,
content: pdfDocument,
},
purpose: 'ocr',
});
uploadedFileIds.push(uploadResult.id);
// Now use OCR with the uploaded file
const ocrResult = await this.mistralClient.ocr.process({
model: this.options.ocrModel || 'mistral-ocr-latest',
document: {
type: 'file',
fileId: uploadResult.id,
},
tableFormat: this.options.tableFormat || 'markdown',
});
// Combine all page markdown with page separators
const pageTexts = ocrResult.pages.map((page, index) => {
let pageContent = `--- Page ${index + 1} ---\n${page.markdown}`;
// Include tables if present
if (page.tables && page.tables.length > 0) {
pageContent += '\n\n**Tables:**\n' + page.tables.map((t: any) => t.markdown || t.html || '').join('\n');
}
// Include header/footer if present
if (page.header) {
pageContent = `Header: ${page.header}\n${pageContent}`;
}
if (page.footer) {
pageContent += `\nFooter: ${page.footer}`;
}
return pageContent;
}).join('\n\n');
extractedTexts.push(pageTexts);
}
// Combine all document texts
const allDocumentText = extractedTexts.length === 1
? extractedTexts[0]
: extractedTexts.map((text, i) => `=== Document ${i + 1} ===\n${text}`).join('\n\n');
// Use chat API to process the extracted text with the user's query
const chatResponse = await this.chat({
systemMessage: optionsArg.systemMessage || 'You are a helpful assistant analyzing document content.',
userMessage: `${optionsArg.userMessage}\n\n---\nDocument Content:\n${allDocumentText}`,
messageHistory: optionsArg.messageHistory,
});
return {
message: {
role: 'assistant',
content: chatResponse.message
}
};
} finally {
// Clean up uploaded files
for (const fileId of uploadedFileIds) {
try {
await this.mistralClient.files.delete({ fileId });
} catch (cleanupError) {
// Ignore cleanup errors - files may have already been auto-deleted
console.warn(`Failed to delete temporary file ${fileId}:`, cleanupError);
}
}
}
}
/**
* Research is not natively supported by Mistral
*/
public async research(optionsArg: ResearchOptions): Promise<ResearchResponse> {
throw new Error('Research/web search is not supported by Mistral. Please use Perplexity or Anthropic provider for research capabilities.');
}
/**
* Image generation is not supported by Mistral
*/
public async imageGenerate(optionsArg: ImageGenerateOptions): Promise<ImageResponse> {
throw new Error('Image generation is not supported by Mistral. Please use OpenAI provider for image generation.');
}
/**
* Image editing is not supported by Mistral
*/
public async imageEdit(optionsArg: ImageEditOptions): Promise<ImageResponse> {
throw new Error('Image editing is not supported by Mistral. Please use OpenAI provider for image editing.');
}
}

View File

@@ -9,13 +9,65 @@ import type {
ResearchResponse, ResearchResponse,
ImageGenerateOptions, ImageGenerateOptions,
ImageEditOptions, ImageEditOptions,
ImageResponse ImageResponse,
StreamingChatOptions
} from './abstract.classes.multimodal.js'; } from './abstract.classes.multimodal.js';
/**
* Ollama model runtime options
* @see https://github.com/ollama/ollama/blob/main/docs/modelfile.md
*/
export interface IOllamaModelOptions {
num_ctx?: number; // Context window (default: 2048)
temperature?: number; // 0 = deterministic (default: 0.8)
top_k?: number; // Top-k sampling (default: 40)
top_p?: number; // Nucleus sampling (default: 0.9)
repeat_penalty?: number;// Repeat penalty (default: 1.1)
num_predict?: number; // Max tokens to predict
stop?: string[]; // Stop sequences
seed?: number; // Random seed for reproducibility
}
export interface IOllamaProviderOptions { export interface IOllamaProviderOptions {
baseUrl?: string; baseUrl?: string;
model?: string; model?: string;
visionModel?: string; // Model to use for vision tasks (e.g. 'llava') visionModel?: string; // Model to use for vision tasks (e.g. 'llava')
defaultOptions?: IOllamaModelOptions; // Default model options
defaultTimeout?: number; // Default timeout in ms (default: 120000)
}
/**
* Extended chat options with Ollama-specific settings
*/
export interface IOllamaChatOptions extends ChatOptions {
options?: IOllamaModelOptions; // Per-request model options
timeout?: number; // Per-request timeout in ms
model?: string; // Per-request model override
// images is inherited from ChatOptions
}
/**
* Chunk emitted during streaming
*/
export interface IOllamaStreamChunk {
content: string;
thinking?: string; // For models with extended thinking
done: boolean;
stats?: {
totalDuration?: number;
evalCount?: number;
};
}
/**
* Extended chat response with Ollama-specific fields
*/
export interface IOllamaChatResponse extends ChatResponse {
thinking?: string;
stats?: {
totalDuration?: number;
evalCount?: number;
};
} }
export class OllamaProvider extends MultiModalModel { export class OllamaProvider extends MultiModalModel {
@@ -23,6 +75,8 @@ export class OllamaProvider extends MultiModalModel {
private baseUrl: string; private baseUrl: string;
private model: string; private model: string;
private visionModel: string; private visionModel: string;
private defaultOptions: IOllamaModelOptions;
private defaultTimeout: number;
constructor(optionsArg: IOllamaProviderOptions = {}) { constructor(optionsArg: IOllamaProviderOptions = {}) {
super(); super();
@@ -30,6 +84,8 @@ export class OllamaProvider extends MultiModalModel {
this.baseUrl = optionsArg.baseUrl || 'http://localhost:11434'; this.baseUrl = optionsArg.baseUrl || 'http://localhost:11434';
this.model = optionsArg.model || 'llama2'; this.model = optionsArg.model || 'llama2';
this.visionModel = optionsArg.visionModel || 'llava'; this.visionModel = optionsArg.visionModel || 'llava';
this.defaultOptions = optionsArg.defaultOptions || {};
this.defaultTimeout = optionsArg.defaultTimeout || 120000;
} }
async start() { async start() {
@@ -148,13 +204,36 @@ export class OllamaProvider extends MultiModalModel {
// Implementing the synchronous chat interaction // Implementing the synchronous chat interaction
public async chat(optionsArg: ChatOptions): Promise<ChatResponse> { public async chat(optionsArg: ChatOptions): Promise<ChatResponse> {
// Format messages for Ollama // Format messages for Ollama
const historyMessages = optionsArg.messageHistory.map((msg) => {
const formatted: { role: string; content: string; images?: string[]; reasoning?: string } = {
role: msg.role,
content: msg.content,
};
if (msg.images && msg.images.length > 0) {
formatted.images = msg.images;
}
if (msg.reasoning) {
formatted.reasoning = msg.reasoning;
}
return formatted;
});
// Build user message with optional images
const userMessage: { role: string; content: string; images?: string[] } = {
role: 'user',
content: optionsArg.userMessage,
};
if (optionsArg.images && optionsArg.images.length > 0) {
userMessage.images = optionsArg.images;
}
const messages = [ const messages = [
{ role: 'system', content: optionsArg.systemMessage }, { role: 'system', content: optionsArg.systemMessage },
...optionsArg.messageHistory, ...historyMessages,
{ role: 'user', content: optionsArg.userMessage } userMessage,
]; ];
// Make API call to Ollama // Make API call to Ollama with defaultOptions and timeout
const response = await fetch(`${this.baseUrl}/api/chat`, { const response = await fetch(`${this.baseUrl}/api/chat`, {
method: 'POST', method: 'POST',
headers: { headers: {
@@ -163,8 +242,10 @@ export class OllamaProvider extends MultiModalModel {
body: JSON.stringify({ body: JSON.stringify({
model: this.model, model: this.model,
messages: messages, messages: messages,
stream: false stream: false,
options: this.defaultOptions,
}), }),
signal: AbortSignal.timeout(this.defaultTimeout),
}); });
if (!response.ok) { if (!response.ok) {
@@ -176,6 +257,222 @@ export class OllamaProvider extends MultiModalModel {
return { return {
role: 'assistant' as const, role: 'assistant' as const,
message: result.message.content, message: result.message.content,
reasoning: result.message.thinking || result.message.reasoning,
};
}
/**
* Streaming chat with token callback (implements MultiModalModel interface)
* Calls onToken for each token generated during the response
*/
public async chatStreaming(optionsArg: StreamingChatOptions): Promise<ChatResponse> {
const onToken = optionsArg.onToken;
// Use existing collectStreamResponse with callback, including images
const response = await this.collectStreamResponse(
{
systemMessage: optionsArg.systemMessage,
userMessage: optionsArg.userMessage,
messageHistory: optionsArg.messageHistory,
images: optionsArg.images,
},
(chunk) => {
if (onToken) {
if (chunk.thinking) onToken(chunk.thinking);
if (chunk.content) onToken(chunk.content);
}
}
);
return {
role: 'assistant' as const,
message: response.message,
reasoning: response.thinking,
};
}
/**
* Streaming chat with async iteration and options support
*/
public async chatStreamResponse(
optionsArg: IOllamaChatOptions
): Promise<AsyncIterable<IOllamaStreamChunk>> {
const model = optionsArg.model || this.model;
const timeout = optionsArg.timeout || this.defaultTimeout;
const modelOptions = { ...this.defaultOptions, ...optionsArg.options };
// Format history messages with optional images and reasoning
const historyMessages = optionsArg.messageHistory.map((msg) => {
const formatted: { role: string; content: string; images?: string[]; reasoning?: string } = {
role: msg.role,
content: msg.content,
};
if (msg.images && msg.images.length > 0) {
formatted.images = msg.images;
}
if (msg.reasoning) {
formatted.reasoning = msg.reasoning;
}
return formatted;
});
// Build user message with optional images
const userMessage: { role: string; content: string; images?: string[] } = {
role: 'user',
content: optionsArg.userMessage,
};
if (optionsArg.images && optionsArg.images.length > 0) {
userMessage.images = optionsArg.images;
}
const messages = [
{ role: 'system', content: optionsArg.systemMessage },
...historyMessages,
userMessage,
];
const response = await fetch(`${this.baseUrl}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model,
messages,
stream: true,
options: modelOptions,
}),
signal: AbortSignal.timeout(timeout),
});
if (!response.ok) {
throw new Error(`Ollama API error: ${response.status}`);
}
const reader = response.body!.getReader();
const decoder = new TextDecoder();
return {
[Symbol.asyncIterator]: async function* () {
let buffer = '';
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop() || '';
for (const line of lines) {
if (!line.trim()) continue;
try {
const json = JSON.parse(line);
yield {
content: json.message?.content || '',
thinking: json.message?.thinking,
done: json.done || false,
stats: json.done ? {
totalDuration: json.total_duration,
evalCount: json.eval_count,
} : undefined,
} as IOllamaStreamChunk;
} catch { /* skip malformed */ }
}
}
} finally {
reader.releaseLock();
}
}
};
}
/**
* Stream and collect full response with optional progress callback
*/
public async collectStreamResponse(
optionsArg: IOllamaChatOptions,
onChunk?: (chunk: IOllamaStreamChunk) => void
): Promise<IOllamaChatResponse> {
const stream = await this.chatStreamResponse(optionsArg);
let content = '';
let thinking = '';
let stats: IOllamaChatResponse['stats'];
for await (const chunk of stream) {
if (chunk.content) content += chunk.content;
if (chunk.thinking) thinking += chunk.thinking;
if (chunk.stats) stats = chunk.stats;
if (onChunk) onChunk(chunk);
}
return {
role: 'assistant' as const,
message: content,
thinking: thinking || undefined,
stats,
};
}
/**
* Non-streaming chat with full options support
*/
public async chatWithOptions(optionsArg: IOllamaChatOptions): Promise<IOllamaChatResponse> {
const model = optionsArg.model || this.model;
const timeout = optionsArg.timeout || this.defaultTimeout;
const modelOptions = { ...this.defaultOptions, ...optionsArg.options };
// Format history messages with optional images and reasoning
const historyMessages = optionsArg.messageHistory.map((msg) => {
const formatted: { role: string; content: string; images?: string[]; reasoning?: string } = {
role: msg.role,
content: msg.content,
};
if (msg.images && msg.images.length > 0) {
formatted.images = msg.images;
}
if (msg.reasoning) {
formatted.reasoning = msg.reasoning;
}
return formatted;
});
// Build user message with optional images
const userMessage: { role: string; content: string; images?: string[] } = {
role: 'user',
content: optionsArg.userMessage,
};
if (optionsArg.images && optionsArg.images.length > 0) {
userMessage.images = optionsArg.images;
}
const messages = [
{ role: 'system', content: optionsArg.systemMessage },
...historyMessages,
userMessage,
];
const response = await fetch(`${this.baseUrl}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model,
messages,
stream: false,
options: modelOptions,
}),
signal: AbortSignal.timeout(timeout),
});
if (!response.ok) {
throw new Error(`Ollama API error: ${response.statusText}`);
}
const result = await response.json();
return {
role: 'assistant' as const,
message: result.message.content,
thinking: result.message.thinking,
stats: {
totalDuration: result.total_duration,
evalCount: result.eval_count,
},
}; };
} }