Compare commits

...

73 Commits

Author SHA1 Message Date
a51b002141 v2.0.0
Some checks failed
Default (tags) / security (push) Failing after 0s
Default (tags) / test (push) Failing after 0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-05 19:37:29 +00:00
c24010c9bc BREAKING CHANGE(vercel-ai-sdk): migrate to Vercel AI SDK v6 and introduce provider registry (getModel) returning LanguageModelV3 2026-03-05 19:37:29 +00:00
27cef60900 v0.13.3
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-20 03:55:09 +00:00
2b00e36b02 fix(): no changes detected 2026-01-20 03:55:09 +00:00
8eb3111e7e fix(ollama): preserve tool_calls in message history for native tool calling
When using native tool calling, the assistant's tool_calls must be saved in
message history. Without this, the model doesn't know it already called a
tool and may loop indefinitely calling the same tool.

This fix adds tool_calls forwarding in chatStreamResponse and chatWithOptions
history formatting.
2026-01-20 03:54:51 +00:00
d296a1b676 v0.13.2
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-20 02:50:46 +00:00
f74d1cf2ba fix(repo): no changes detected in diff; nothing to commit 2026-01-20 02:50:46 +00:00
b29d7f5df3 fix(classes.smartai): use IOllamaModelOptions type for defaultOptions instead of inline type 2026-01-20 02:50:32 +00:00
00b8312fa7 v0.13.1
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-20 02:40:29 +00:00
4be91d678a fix(): no changes detected; no release required 2026-01-20 02:40:29 +00:00
1156320546 feat(provider.ollama): add native tool calling support for Ollama API
- Add IOllamaTool and IOllamaToolCall types for native function calling
- Add think parameter to IOllamaModelOptions for reasoning models (GPT-OSS, QwQ)
- Add tools parameter to IOllamaChatOptions
- Add toolCalls to response interfaces (IOllamaStreamChunk, IOllamaChatResponse)
- Update chat(), chatStreamResponse(), collectStreamResponse(), chatWithOptions() to support native tools
- Parse tool_calls from Ollama API responses
- Add support for tool message role in conversation history
2026-01-20 02:39:28 +00:00
7cb9bc24dc v0.13.0
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-20 02:03:20 +00:00
9ad039f77b feat(provider.ollama): add chain-of-thought reasoning support to chat messages and Ollama provider 2026-01-20 02:03:20 +00:00
6c6652d75d v0.12.1
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-20 01:27:52 +00:00
2040b3c629 fix(docs): update documentation: clarify provider capabilities, add provider capabilities summary, polish examples and formatting, and remove Serena project config 2026-01-20 01:27:52 +00:00
ae8d3ccf33 v0.12.0
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-20 01:10:27 +00:00
3b900d0ba9 feat(ollama): add support for base64-encoded images in chat messages and forward them to the Ollama provider 2026-01-20 01:10:27 +00:00
d49152390f v0.11.1 2026-01-20 00:37:59 +00:00
d615ec9227 feat(streaming): add chatStreaming method with token callback for real-time generation progress
- Add StreamingChatOptions interface with onToken callback
- Add optional chatStreaming method to MultiModalModel abstract class
- Implement chatStreaming in OllamaProvider using collectStreamResponse
2026-01-20 00:37:49 +00:00
dfa863ee7d v0.11.0
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-20 00:12:21 +00:00
c84ede1f1d feat(ollama): support defaultOptions and defaultTimeout for ollama provider 2026-01-20 00:12:21 +00:00
4937dbf6ab v0.10.1
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-20 00:03:06 +00:00
8cb052449e fix(): no changes detected — no release necessary 2026-01-20 00:03:06 +00:00
126e9b239b feat(OllamaProvider): add model options, streaming support, and thinking tokens
- Add IOllamaModelOptions interface for runtime options (num_ctx, temperature, etc.)
- Extend IOllamaProviderOptions with defaultOptions and defaultTimeout
- Add IOllamaChatOptions for per-request overrides
- Add IOllamaStreamChunk and IOllamaChatResponse interfaces
- Add chatStreamResponse() for async iteration with options
- Add collectStreamResponse() for streaming with progress callback
- Add chatWithOptions() for non-streaming with full options
- Update chat() to use defaultOptions and defaultTimeout
2026-01-20 00:02:45 +00:00
a556053510 v0.10.0
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-18 22:11:52 +00:00
e4dc81edc9 feat(mistral): add Mistral provider with native PDF OCR and chat integration 2026-01-18 22:11:52 +00:00
6f79dc3535 v0.9.0
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-01-18 16:26:16 +00:00
b4ced080f2 feat(providers): Add Anthropic extended thinking and adapt providers to new streaming/file APIs; bump dependencies and update docs, tests and configuration 2026-01-18 16:26:16 +00:00
e8a2a3ff1b 0.8.0
Some checks failed
Default (tags) / security (push) Failing after 24s
Default (tags) / test (push) Failing after 14s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-10-30 12:11:18 +00:00
cbc9d8d45b feat(provider.anthropic): Add extended thinking modes to AnthropicProvider and apply thinking budgets to API calls 2025-10-30 12:11:18 +00:00
d52e6ae67d 0.7.7
Some checks failed
Default (tags) / security (push) Failing after 23s
Default (tags) / test (push) Failing after 14s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-10-10 07:32:21 +00:00
b9745a1869 fix(MultiModalModel): Lazy-load SmartPdf and guard document processing across providers; ensure SmartPdf is initialized only when needed 2025-10-10 07:32:21 +00:00
af3b61cf74 0.7.6
Some checks failed
Default (tags) / security (push) Failing after 23s
Default (tags) / test (push) Failing after 14s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-10-09 07:00:15 +00:00
8666876879 fix(provider.elevenlabs): Provide default ElevenLabs TTS voice fallback and add local tool/project configs 2025-10-09 07:00:15 +00:00
b78168307b 0.7.5
Some checks failed
Default (tags) / security (push) Failing after 24s
Default (tags) / test (push) Failing after 15s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-10-08 22:56:53 +00:00
bbd8770205 fix(provider.elevenlabs): Update ElevenLabs default TTS model to eleven_v3 and add local Claude permissions file 2025-10-08 22:56:53 +00:00
28bb13dc0c update 2025-10-08 22:49:08 +00:00
3a24c2c4bd 0.7.4
Some checks failed
Default (tags) / security (push) Failing after 21s
Default (tags) / test (push) Failing after 14s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-10-03 15:47:15 +00:00
8244ac6eb0 fix(provider.anthropic): Use image/png for embedded PDF images in Anthropic provider and add local Claude settings for development permissions 2025-10-03 15:47:15 +00:00
2791d738d6 0.7.3
Some checks failed
Default (tags) / security (push) Failing after 22s
Default (tags) / test (push) Failing after 14s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-10-03 14:21:25 +00:00
3fbd054985 fix(tests): Add extensive provider/feature tests and local Claude CI permissions 2025-10-03 14:21:25 +00:00
8e8830ef92 0.7.2
Some checks failed
Default (tags) / security (push) Failing after 14s
Default (tags) / test (push) Failing after 14s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-10-03 13:51:49 +00:00
34931875ad fix(anthropic): Update Anthropic provider branding to Claude Sonnet 4.5 and add local Claude permissions 2025-10-03 13:51:49 +00:00
2672509d3f 0.7.1
Some checks failed
Default (tags) / security (push) Failing after 23s
Default (tags) / test (push) Failing after 13s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-10-03 13:49:46 +00:00
ee3a635852 fix(docs): Add README image generation docs and .claude local settings 2025-10-03 13:49:46 +00:00
a222b1c2fa 0.7.0
Some checks failed
Default (tags) / security (push) Failing after 24s
Default (tags) / test (push) Failing after 15s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-10-03 13:43:29 +00:00
f0556e89f3 feat(providers): Add research API and image generation/editing support; extend providers and tests 2025-10-03 13:43:29 +00:00
fe8540c8ba feat(research): Implement research APIs. 2025-10-03 12:50:42 +00:00
e34bf19698 0.6.1
Some checks failed
Default (tags) / security (push) Failing after 21s
Default (tags) / test (push) Failing after 13s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-09-28 15:51:50 +00:00
f70353e6ca fix(provider.anthropic): Fix Anthropic research tool identifier and add tests + local Claude permissions 2025-09-28 15:51:50 +00:00
0403443634 0.6.0
Some checks failed
Default (tags) / security (push) Failing after 23s
Default (tags) / test (push) Failing after 13s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-09-28 15:06:07 +00:00
e2ed429aac feat(research): Introduce research API with provider implementations, docs and tests 2025-09-28 15:06:07 +00:00
5c856ec3ed 0.5.11
Some checks failed
Default (tags) / security (push) Failing after 21s
Default (tags) / test (push) Failing after 12s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-08-12 13:15:37 +00:00
052f37294d fix(openaiProvider): Update default chat model to gpt-5-mini and bump dependency versions 2025-08-12 13:15:36 +00:00
93bb375059 fix(dependencies): Update SmartPdf to v4.1.1 for enhanced PDF processing capabilities
Some checks failed
Default (tags) / security (push) Failing after 19s
Default (tags) / test (push) Failing after 18s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-08-03 08:17:24 +00:00
574f7a594c fix(documentation): remove contribution section from readme
Some checks failed
Default (tags) / security (push) Failing after 23s
Default (tags) / test (push) Failing after 12s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-08-01 18:37:26 +00:00
0b2a058550 fix(core): improve SmartPdf lifecycle management and update dependencies
Some checks failed
Default (tags) / security (push) Failing after 19s
Default (tags) / test (push) Failing after 16s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-08-01 18:25:46 +00:00
88d15c89e5 0.5.6
Some checks failed
Default (tags) / security (push) Failing after 24s
Default (tags) / test (push) Failing after 13s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-07-26 16:17:11 +00:00
4bf7113334 feat(documentation): comprehensive documentation enhancement and test improvements
Some checks failed
Default (tags) / security (push) Failing after 25s
Default (tags) / test (push) Failing after 12s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-07-25 18:00:23 +00:00
6bdbeae144 0.5.4 2025-05-13 18:39:58 +00:00
09c27379cb fix(provider.openai): Update dependency versions, clean test imports, and adjust default OpenAI model configurations 2025-05-13 18:39:57 +00:00
2bc6f7ee5e 0.5.3 2025-04-03 21:46:40 +00:00
0ac50d647d fix(package.json): Add explicit packageManager field to package.json 2025-04-03 21:46:40 +00:00
5f9ffc7356 0.5.2 2025-04-03 21:46:15 +00:00
502b665224 fix(readme): Remove redundant conclusion section from README to streamline documentation. 2025-04-03 21:46:14 +00:00
bda0d7ed7e 0.5.1 2025-02-25 19:15:32 +00:00
de2a60d12f fix(OpenAiProvider): Corrected audio model ID in OpenAiProvider 2025-02-25 19:15:32 +00:00
5b3a93a43a 0.5.0 2025-02-25 19:04:40 +00:00
6b241f8889 feat(documentation and configuration): Enhanced package and README documentation 2025-02-25 19:04:40 +00:00
0a80ac0a8a 0.4.2 2025-02-25 18:23:28 +00:00
6ce442354e fix(core): Fix OpenAI chat streaming and PDF document processing logic. 2025-02-25 18:23:28 +00:00
9b38a3c06e 0.4.1 2025-02-25 13:01:23 +00:00
5dead05324 fix(provider): Fix provider modules for consistency 2025-02-25 13:01:23 +00:00
52 changed files with 11129 additions and 5317 deletions

7
.gitignore vendored
View File

@@ -3,7 +3,6 @@
# artifacts
coverage/
public/
pages/
# installs
node_modules/
@@ -17,4 +16,8 @@ node_modules/
dist/
dist_*/
# custom
# AI
.claude/
.serena/
#------# custom

View File

@@ -1,6 +1,302 @@
# Changelog
## 2026-03-05 - 2.0.0 - BREAKING CHANGE(vercel-ai-sdk)
migrate to Vercel AI SDK v6 and introduce provider registry (getModel) returning LanguageModelV3
- Major API rewrite and module reorganization; bump package version to 1.0.0
- Replace many legacy provider implementations with @ai-sdk/* providers and a new Ollama adapter (LanguageModelV3-based)
- Add subpath exports for capability packages: ./vision, ./audio, ./image, ./document, ./research
- Introduce Anthropic prompt-caching middleware and provider-level promptCaching option
- Split functionality into focused ts_* packages (ts_audio, ts_image, ts_document, ts_vision, ts_research) and adapt tests accordingly
- Update dependencies and devDependencies to use ai SDK providers and newer package versions
## 2026-01-20 - 0.13.3 - fix()
no changes detected
- No files changed in the provided diff.
- No version bump required.
## 2026-01-20 - 0.13.2 - fix(repo)
no changes detected in diff; nothing to commit
- Git diff reported no changes — no files modified
- No code or dependency updates detected, so no version bump required
## 2026-01-20 - 0.13.1 - fix()
no changes detected; no release required
- No changes found in the provided git diff
- Current package version is 0.13.0
## 2026-01-20 - 0.13.0 - feat(provider.ollama)
add chain-of-thought reasoning support to chat messages and Ollama provider
- Added optional reasoning?: string to chat message and chat response interfaces to surface chain-of-thought data.
- Propagates reasoning from message history into formatted requests sent to Ollama.
- Maps Ollama response fields (thinking or reasoning) into ChatResponse.reasoning so downstream code can access model reasoning output.
## 2026-01-20 - 0.12.1 - fix(docs)
update documentation: clarify provider capabilities, add provider capabilities summary, polish examples and formatting, and remove Serena project config
- Removed .serena/project.yml and cleaned up .serena/.gitignore
- Added Provider Capabilities Summary and expanded/clarified provider tables in readme.md and readme.hints.md
- Clarified Anthropic extended thinking details and Mistral native PDF OCR notes
- Polished example code snippets and fixed minor typos/formatting (GPT-5 mention, ElevenLabs model note, consistent punctuation)
- Updated test command references and other README usage instructions
## 2026-01-20 - 0.12.0 - feat(ollama)
add support for base64-encoded images in chat messages and forward them to the Ollama provider
- Add optional images?: string[] to ChatMessage and ChatOptions interfaces (multimodal/vision support)
- Propagate images from messageHistory and ChatOptions to the Ollama API payload in chat, chatStreaming, and streaming handlers
- Changes are non-breaking: images are optional and existing behavior is preserved when absent
## 2026-01-20 - 0.11.0 - feat(ollama)
support defaultOptions and defaultTimeout for ollama provider
- Added ollama.defaultOptions object with fields: num_ctx, temperature, top_k, top_p, repeat_penalty, num_predict, stop, seed
- Added ollama.defaultTimeout option
- Pass defaultOptions and defaultTimeout into OllamaProvider constructor when initializing the provider
- Non-breaking change: existing behavior preserved if new fields are undefined
## 2026-01-20 - 0.10.1 - fix()
no changes detected — no release necessary
- No files changed in the provided diff; there are no code, documentation, or configuration modifications to release.
## 2026-01-18 - 0.10.0 - feat(mistral)
add Mistral provider with native PDF OCR and chat integration
- Adds dependency @mistralai/mistralai
- Implements ts/provider.mistral.ts providing chat() and document() (OCR) functionality
- Registers and exposes MistralProvider in SmartAi (options, lifecycle, conversation routing)
- Adds unit/integration tests: test.chat.mistral.ts and test.document.mistral.ts
- Updates readme.hints.md with Mistral usage, configuration and notes
## 2026-01-18 - 0.9.0 - feat(providers)
Add Anthropic extended thinking and adapt providers to new streaming/file APIs; bump dependencies and update docs, tests and configuration
- Add IAnthropicProviderOptions.extendedThinking with thinking modes (quick/normal/deep/off) and getThinkingConfig mapping budgets; apply thinking to Anthropic requests and omit temperature when thinking is enabled.
- Update Anthropic research flow to include thinking configuration and conditionally set temperature.
- OpenAI image editing: use openai.toFile to convert image/mask Buffers to uploadable files (image/png) before sending.
- ElevenLabs streaming: switch from response.streamNode() to response.stream() and convert web stream to Node stream using Readable.fromWeb().
- Upgrade dependencies and dev tools: @anthropic-ai/sdk ^0.71.2, @push.rocks/smartrequest ^5.0.1, @git.zone/tsbuild and related @git.zone packages, and other bumps in package.json.
- Tests and test imports updated to use @git.zone/tstest/tapbundle; many test files adjusted accordingly.
- Docs and hints updated: README and readme.hints.md include extended thinking docs, examples, formatting fixes, security/issue reporting guidance, and trademark/license clarifications.
- Project config tweaks: package build script changed, tsconfig baseUrl/paths added, npmextra.json reorganized (release registries added), .gitignore updated to ignore .claude/.serena local tooling files.
## 2025-10-30 - 0.8.0 - feat(provider.anthropic)
Add extended thinking modes to AnthropicProvider and apply thinking budgets to API calls
- Introduce IAnthropicProviderOptions.extendedThinking to configure thinking modes: 'quick' | 'normal' | 'deep' | 'off'.
- Add getThinkingConfig() helper mapping modes to token budgets (quick=2048, normal=8000, deep=16000, off=0).
- Apply thinking configuration to Anthropic API calls (chat, chatStream, vision, document, research) and increase max_tokens where appropriate (up to 20000).
- Add comprehensive tests (test/test.thinking.anthropic.ts) and update readme.hints.md with usage examples and recommendations.
- Add .claude/settings.local.json for local assistant permissions used in development/testing.
## 2025-10-10 - 0.7.7 - fix(MultiModalModel)
Lazy-load SmartPdf and guard document processing across providers; ensure SmartPdf is initialized only when needed
- Make SmartPdf lazy-loaded: smartpdfInstance is now nullable and no longer started automatically in start()
- Add ensureSmartpdfReady() to initialize and start SmartPdf on demand before document processing
- Providers updated (OpenAI, Anthropic, Ollama, xAI) to call ensureSmartpdfReady() and use the smartpdfInstance for PDF -> image conversion
- stop() now cleans up and nullifies smartpdfInstance to release resources
- Avoids starting a browser/process unless document() is actually used (reduces unnecessary resource usage)
- Add local Claude permissions file (.claude/settings.local.json) for tooling/configuration
## 2025-10-09 - 0.7.6 - fix(provider.elevenlabs)
Provide default ElevenLabs TTS voice fallback and add local tool/project configs
- ElevenLabsProvider: fallback to Samara voice id ('19STyYD15bswVz51nqLf') when no voiceId or defaultVoiceId is provided — avoids throwing an error on TTS calls.
- ElevenLabsProvider: continue to use 'eleven_v3' as the default model for TTS.
- Add .claude/settings.local.json with expanded allowed permissions for local tooling and web search.
- Add .serena/project.yml and .serena/.gitignore to include Serena project configuration and ignore cache.
## 2025-10-08 - 0.7.5 - fix(provider.elevenlabs)
Update ElevenLabs default TTS model to eleven_v3 and add local Claude permissions file
- Changed default ElevenLabs modelId from 'eleven_multilingual_v2' to 'eleven_v3' in ts/provider.elevenlabs.ts to use the newer/default TTS model.
- Added .claude/settings.local.json with a permissions allow-list for local Claude tooling and CI tasks.
## 2025-10-03 - 0.7.4 - fix(provider.anthropic)
Use image/png for embedded PDF images in Anthropic provider and add local Claude settings for development permissions
- AnthropicProvider: change media_type from 'image/jpeg' to 'image/png' when embedding images extracted from PDFs to ensure correct format in Anthropic requests.
- Add .claude/settings.local.json with development/testing permissions for local Claude usage (shell commands, webfetch, websearch, test/run tasks).
## 2025-10-03 - 0.7.3 - fix(tests)
Add extensive provider/feature tests and local Claude CI permissions
- Add many focused test files covering providers and features: OpenAI, Anthropic, Perplexity, Groq, Ollama, Exo, XAI (chat, audio, vision, document, research, image generation, stubs, interfaces, basic)
- Introduce .claude/settings.local.json to declare allowed permissions for local Claude/CI actions
- Replace older aggregated test files with modular per-feature tests (removed legacy combined tests and split into smaller suites)
- No changes to library runtime code — this change adds tests and CI/local agent configuration only
## 2025-10-03 - 0.7.2 - fix(anthropic)
Update Anthropic provider branding to Claude Sonnet 4.5 and add local Claude permissions
- Docs: Replace 'Claude 3 Opus' with 'Claude Sonnet 4.5' in README provider capabilities matrix.
- Config: Add .claude/settings.local.json to define local Claude permissions for tests and development commands.
## 2025-10-03 - 0.7.1 - fix(docs)
Add README image generation docs and .claude local settings
- Add .claude/settings.local.json with permission allow-list for local assistant tooling and web search
- Update README provider capabilities table to include an Images column and reference gpt-image-1
- Add Image Generation & Editing section with examples, options, and gpt-image-1 advantages
- Mark image generation support as implemented in the roadmap and remove duplicate entry
## 2025-10-03 - 0.7.0 - feat(providers)
Add research API and image generation/editing support; extend providers and tests
- Introduce ResearchOptions and ResearchResponse to the MultiModalModel interface and implement research() where supported
- OpenAiProvider: implement research(), add imageGenerate() and imageEdit() methods (gpt-image-1 / DALL·E support), and expose imageModel option
- AnthropicProvider: implement research() and vision handling; explicitly throw for unsupported image generation/editing
- PerplexityProvider: implement research() (sonar / sonar-pro support) and expose citation parsing
- Add image/document-related interfaces (ImageGenerateOptions, ImageEditOptions, ImageResponse) to abstract API
- Add image generation/editing/no-op stubs for other providers (Exo, Groq, Ollama, XAI) that throw informative errors to preserve API compatibility
- Add comprehensive OpenAI image generation tests and helper to save test outputs (test/test.image.openai.ts)
- Update README with Research & Web Search documentation, capability matrix, and roadmap entry for Research & Web Search API
- Add local Claude agent permissions file (.claude/settings.local.json) and various provider type/import updates
## 2025-09-28 - 0.6.1 - fix(provider.anthropic)
Fix Anthropic research tool identifier and add tests + local Claude permissions
- Replace Anthropic research tool type from 'computer_20241022' to 'web_search_20250305' to match the expected web-search tool schema.
- Add comprehensive test suites and fixtures for providers and research features (new/updated tests under test/ including anthropic, openai, research.\* and stubs).
- Fix test usage of XAI provider class name (use XAIProvider) and adjust basic provider test expectations (provider instantiation moved to start()).
- Add .claude/settings.local.json with local Claude permissions to allow common CI/dev commands and web search during testing.
## 2025-09-28 - 0.6.0 - feat(research)
Introduce research API with provider implementations, docs and tests
- Add ResearchOptions and ResearchResponse interfaces and a new abstract research() method to MultiModalModel
- Implement research() for OpenAiProvider (deep research model selection, optional web search/tools, background flag, source extraction)
- Implement research() for AnthropicProvider (web search tool support, domain filters, citation extraction)
- Implement research() for PerplexityProvider (sonar / sonar-pro model usage and citation parsing)
- Add research() stubs to Exo, Groq, Ollama and XAI providers that throw a clear 'not yet supported' error to preserve interface compatibility
- Add tests for research interfaces and provider research methods (test files updated/added)
- Add documentation: readme.research.md describing the research API, usage and configuration
- Export additional providers from ts/index.ts and update provider typings/imports across files
- Add a 'typecheck' script to package.json
- Add .claude/settings.local.json (local agent permissions for CI/dev tasks)
## 2025-08-12 - 0.5.11 - fix(openaiProvider)
Update default chat model to gpt-5-mini and bump dependency versions
- Changed default chat model in OpenAiProvider from 'o3-mini' and 'o4-mini' to 'gpt-5-mini'
- Upgraded @anthropic-ai/sdk from ^0.57.0 to ^0.59.0
- Upgraded openai from ^5.11.0 to ^5.12.2
- Added new local Claude settings configuration (.claude/settings.local.json)
## 2025-08-03 - 0.5.10 - fix(dependencies)
Update SmartPdf to v4.1.1 for enhanced PDF processing capabilities
- Updated @push.rocks/smartpdf from ^3.3.0 to ^4.1.1
- Enhanced PDF conversion with improved scale options and quality controls
- Dependency updates for better performance and compatibility
## 2025-08-01 - 0.5.9 - fix(documentation)
Remove contribution section from readme
- Removed the contribution section from readme.md as requested
- Kept the roadmap section for future development plans
## 2025-08-01 - 0.5.8 - fix(core)
Fix SmartPdf lifecycle management and update dependencies
- Moved SmartPdf instance management to the MultiModalModel base class for better resource sharing
- Fixed memory leaks by properly implementing cleanup in the base class stop() method
- Updated SmartAi class to properly stop all providers on shutdown
- Updated @push.rocks/smartrequest from v2.1.0 to v4.2.1 with migration to new API
- Enhanced readme with professional documentation and feature matrix
## 2025-07-26 - 0.5.7 - fix(provider.openai)
Fix stream type mismatch in audio method
- Fixed type error where OpenAI SDK returns a web ReadableStream but the audio method needs to return a Node.js ReadableStream
- Added conversion using Node.js's built-in Readable.fromWeb() method
## 2025-07-25 - 0.5.5 - feat(documentation)
Comprehensive documentation enhancement and test improvements
- Completely rewrote readme.md with detailed provider comparisons, advanced usage examples, and performance tips
- Added comprehensive examples for all supported providers (OpenAI, Anthropic, Perplexity, Groq, XAI, Ollama, Exo)
- Included detailed sections on chat interactions, streaming, TTS, vision processing, and document analysis
- Added verbose flag to test script for better debugging
## 2025-05-13 - 0.5.4 - fix(provider.openai)
Update dependency versions, clean test imports, and adjust default OpenAI model configurations
- Bump dependency versions in package.json (@git.zone/tsbuild, @push.rocks/tapbundle, openai, etc.)
- Change default chatModel from 'gpt-4o' to 'o4-mini' and visionModel from 'gpt-4o' to '04-mini' in provider.openai.ts
- Remove unused 'expectAsync' import from test file
## 2025-04-03 - 0.5.3 - fix(package.json)
Add explicit packageManager field to package.json
- Include the packageManager property to specify the pnpm version and checksum.
- Align package metadata with current standards.
## 2025-04-03 - 0.5.2 - fix(readme)
Remove redundant conclusion section from README to streamline documentation.
- Eliminated the conclusion block describing SmartAi's capabilities and documentation pointers.
## 2025-02-25 - 0.5.1 - fix(OpenAiProvider)
Corrected audio model ID in OpenAiProvider
- Fixed audio model identifier from 'o3-mini' to 'tts-1-hd' in the OpenAiProvider's audio method.
- Addressed minor code formatting issues in test suite for better readability.
- Corrected spelling errors in test documentation and comments.
## 2025-02-25 - 0.5.0 - feat(documentation and configuration)
Enhanced package and README documentation
- Expanded the package description to better reflect the library's capabilities.
- Improved README with detailed usage examples for initialization, chat interactions, streaming chat, audio generation, document analysis, and vision processing.
- Provided error handling strategies and advanced streaming customization examples.
## 2025-02-25 - 0.4.2 - fix(core)
Fix OpenAI chat streaming and PDF document processing logic.
- Updated OpenAI chat streaming to handle new async iterable format.
- Improved PDF document processing by filtering out empty image buffers.
- Removed unsupported temperature options from OpenAI requests.
## 2025-02-25 - 0.4.1 - fix(provider)
Fix provider modules for consistency
- Updated TypeScript interfaces and options in provider modules for better type safety.
- Modified transform stream handlers in Exo, Groq, and Ollama providers for consistency.
- Added optional model options to OpenAI provider for custom model usage.
## 2025-02-08 - 0.4.0 - feat(core)
Added support for Exo AI provider
- Introduced ExoProvider with chat functionalities.
@@ -8,18 +304,21 @@ Added support for Exo AI provider
- Extended Conversation class to support ExoProvider.
## 2025-02-05 - 0.3.3 - fix(documentation)
Update readme with detailed license and legal information.
- Added explicit section on License and Legal Information in the README.
- Clarified the use of trademarks and company information.
## 2025-02-05 - 0.3.2 - fix(documentation)
Remove redundant badges from readme
- Removed Build Status badge from the readme file.
- Removed License badge from the readme file.
## 2025-02-05 - 0.3.1 - fix(documentation)
Updated README structure and added detailed usage examples
- Introduced a Table of Contents
@@ -28,6 +327,7 @@ Updated README structure and added detailed usage examples
- Clarified the development setup with instructions for running tests and building the project
## 2025-02-05 - 0.3.0 - feat(integration-xai)
Add support for X.AI provider with chat and document processing capabilities.
- Introduced XAIProvider class for integrating X.AI features.
@@ -35,6 +335,7 @@ Add support for X.AI provider with chat and document processing capabilities.
- Enabled document processing capabilities with PDF conversion in X.AI.
## 2025-02-03 - 0.2.0 - feat(provider.anthropic)
Add support for vision and document processing in Anthropic provider
- Implemented vision tasks for Anthropic provider using Claude-3-opus-20240229 model.
@@ -42,6 +343,7 @@ Add support for vision and document processing in Anthropic provider
- Updated documentation to reflect the new capabilities of the Anthropic provider.
## 2025-02-03 - 0.1.0 - feat(providers)
Add vision and document processing capabilities to providers
- OpenAI and Ollama providers now support vision tasks using GPT-4 Vision and Llava models respectively.
@@ -50,6 +352,7 @@ Add vision and document processing capabilities to providers
- Updated the readme file with examples for vision and document processing.
## 2025-02-03 - 0.0.19 - fix(core)
Enhanced chat streaming and error handling across providers
- Refactored chatStream method to properly handle input streams and processes in Perplexity, OpenAI, Ollama, and Anthropic providers.
@@ -58,6 +361,7 @@ Enhanced chat streaming and error handling across providers
- Adjusted the test logic in test/test.ts for the new classification response requirement.
## 2024-09-19 - 0.0.18 - fix(dependencies)
Update dependencies to the latest versions.
- Updated @git.zone/tsbuild from ^2.1.76 to ^2.1.84
@@ -71,46 +375,53 @@ Update dependencies to the latest versions.
- Updated openai from ^4.47.1 to ^4.62.1
## 2024-05-29 - 0.0.17 - Documentation
Updated project description.
- Improved project description for clarity and details.
## 2024-05-17 - 0.0.16 to 0.0.15 - Core
Fixes and updates.
- Various core updates and fixes for stability improvements.
## 2024-04-29 - 0.0.14 to 0.0.13 - Core
Fixes and updates.
- Multiple core updates and fixes for enhanced functionality.
## 2024-04-29 - 0.0.12 - Core
Fixes and updates.
- Core update and bug fixes.
## 2024-04-29 - 0.0.11 - Provider
Fix integration for anthropic provider.
- Correction in the integration process with anthropic provider for better compatibility.
## 2024-04-27 - 0.0.10 to 0.0.9 - Core
Fixes and updates.
- Updates and fixes to core components.
- Updated tsconfig for improved TypeScript configuration.
## 2024-04-01 - 0.0.8 to 0.0.7 - Core and npmextra
Core updates and npmextra configuration.
- Core fixes and updates.
- Updates to npmextra.json for githost configuration.
## 2024-03-31 - 0.0.6 to 0.0.2 - Core
Initial core updates and fixes.
- Multiple updates and fixes to core following initial versions.
This summarizes the relevant updates and changes based on the provided commit messages. The changelog excludes commits that are version tags without meaningful content or repeated entries.
This summarizes the relevant updates and changes based on the provided commit messages. The changelog excludes commits that are version tags without meaningful content or repeated entries.

View File

@@ -1,32 +1,51 @@
{
"gitzone": {
"@git.zone/cli": {
"projectType": "npm",
"module": {
"githost": "code.foss.global",
"gitscope": "push.rocks",
"gitrepo": "smartai",
"description": "A TypeScript library for integrating and interacting with multiple AI models, offering capabilities for chat and potentially audio responses.",
"description": "SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.",
"npmPackagename": "@push.rocks/smartai",
"license": "MIT",
"projectDomain": "push.rocks",
"keywords": [
"AI integration",
"chatbot",
"TypeScript",
"chatbot",
"OpenAI",
"Anthropic",
"multi-model support",
"audio responses",
"multi-model",
"audio generation",
"text-to-speech",
"streaming chat"
"document processing",
"vision processing",
"streaming chat",
"API",
"multiple providers",
"AI models",
"synchronous chat",
"asynchronous chat",
"real-time interaction",
"content analysis",
"image description",
"document classification",
"AI toolkit",
"provider switching"
]
},
"release": {
"accessLevel": "public",
"registries": [
"https://verdaccio.lossless.digital",
"https://registry.npmjs.org"
]
}
},
"npmci": {
"npmGlobalTools": [],
"npmAccessLevel": "public"
},
"tsdoc": {
"@git.zone/tsdoc": {
"legal": "\n## License and Legal Information\n\nThis repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository. \n\n**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.\n\n### Trademarks\n\nThis project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.\n\n### Company Information\n\nTask Venture Capital GmbH \nRegistered at District court Bremen HRB 35230 HB, Germany\n\nFor any legal inquiries or if you require further information, please contact us via email at hello@task.vc.\n\nBy using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.\n"
},
"@ship.zone/szci": {
"npmGlobalTools": []
}
}

View File

@@ -1,37 +1,67 @@
{
"name": "@push.rocks/smartai",
"version": "0.4.0",
"version": "2.0.0",
"private": false,
"description": "A TypeScript library for integrating and interacting with multiple AI models, offering capabilities for chat and potentially audio responses.",
"description": "Provider registry and capability utilities for ai-sdk (Vercel AI SDK). Core export returns LanguageModel; subpath exports provide vision, audio, image, document and research capabilities.",
"main": "dist_ts/index.js",
"typings": "dist_ts/index.d.ts",
"type": "module",
"exports": {
".": {
"import": "./dist_ts/index.js",
"types": "./dist_ts/index.d.ts"
},
"./vision": {
"import": "./dist_ts_vision/index.js",
"types": "./dist_ts_vision/index.d.ts"
},
"./audio": {
"import": "./dist_ts_audio/index.js",
"types": "./dist_ts_audio/index.d.ts"
},
"./image": {
"import": "./dist_ts_image/index.js",
"types": "./dist_ts_image/index.d.ts"
},
"./document": {
"import": "./dist_ts_document/index.js",
"types": "./dist_ts_document/index.d.ts"
},
"./research": {
"import": "./dist_ts_research/index.js",
"types": "./dist_ts_research/index.d.ts"
}
},
"author": "Task Venture Capital GmbH",
"license": "MIT",
"scripts": {
"test": "(tstest test/ --web)",
"build": "(tsbuild --web --allowimplicitany)",
"test": "(tstest test/ --verbose --logfile)",
"typecheck": "tsbuild check",
"build": "(tsbuild tsfolders --allowimplicitany)",
"buildDocs": "(tsdoc)"
},
"devDependencies": {
"@git.zone/tsbuild": "^2.1.84",
"@git.zone/tsbundle": "^2.0.5",
"@git.zone/tsrun": "^1.2.49",
"@git.zone/tstest": "^1.0.90",
"@push.rocks/qenv": "^6.0.5",
"@push.rocks/tapbundle": "^5.3.0",
"@types/node": "^22.5.5"
"@git.zone/tsbuild": "^4.2.6",
"@git.zone/tsbundle": "^2.9.1",
"@git.zone/tsrun": "^2.0.1",
"@git.zone/tstest": "^3.2.0",
"@push.rocks/qenv": "^6.1.3",
"@types/node": "^25.3.3",
"typescript": "^5.9.3"
},
"dependencies": {
"@anthropic-ai/sdk": "^0.27.3",
"@push.rocks/smartarray": "^1.0.8",
"@push.rocks/smartfile": "^11.0.21",
"@push.rocks/smartpath": "^5.0.18",
"@push.rocks/smartpdf": "^3.1.6",
"@push.rocks/smartpromise": "^4.0.4",
"@push.rocks/smartrequest": "^2.0.22",
"@push.rocks/webstream": "^1.0.10",
"openai": "^4.62.1"
"@ai-sdk/anthropic": "^3.0.58",
"@ai-sdk/google": "^3.0.43",
"@ai-sdk/groq": "^3.0.29",
"@ai-sdk/mistral": "^3.0.24",
"@ai-sdk/openai": "^3.0.41",
"@ai-sdk/perplexity": "^3.0.23",
"@ai-sdk/provider": "^3.0.8",
"@ai-sdk/xai": "^3.0.67",
"@anthropic-ai/sdk": "^0.78.0",
"@push.rocks/smartpdf": "^4.1.3",
"ai": "^6.0.116",
"openai": "^6.26.0"
},
"repository": {
"type": "git",
@@ -46,25 +76,46 @@
],
"files": [
"ts/**/*",
"ts_web/**/*",
"dist/**/*",
"ts_vision/**/*",
"ts_audio/**/*",
"ts_image/**/*",
"ts_document/**/*",
"ts_research/**/*",
"dist_*/**/*",
"dist_ts/**/*",
"dist_ts_web/**/*",
"assets/**/*",
"cli.js",
"npmextra.json",
"readme.md"
],
"keywords": [
"AI integration",
"chatbot",
"TypeScript",
"chatbot",
"OpenAI",
"Anthropic",
"multi-model support",
"audio responses",
"multi-model",
"audio generation",
"text-to-speech",
"streaming chat"
]
"document processing",
"vision processing",
"streaming chat",
"API",
"multiple providers",
"AI models",
"synchronous chat",
"asynchronous chat",
"real-time interaction",
"content analysis",
"image description",
"document classification",
"AI toolkit",
"provider switching"
],
"pnpm": {
"onlyBuiltDependencies": [
"esbuild",
"puppeteer"
],
"overrides": {}
},
"packageManager": "pnpm@10.7.0+sha512.6b865ad4b62a1d9842b61d674a393903b871d9244954f652b8842c2b553c72176b278f64c463e52d40fff8aba385c235c8c9ecf5cc7de4fd78b8bb6d49633ab6"
}

11534
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@@ -1 +1,50 @@
# SmartAI Project Hints
## Architecture (v1.0.0 - Vercel AI SDK rewrite)
The package is a **provider registry** built on the Vercel AI SDK (`ai` v6). The core export returns a `LanguageModelV3` from `@ai-sdk/provider`. Specialized capabilities are in subpath exports.
### Core Entry (`ts/`)
- `getModel(options)` → returns `LanguageModelV3` for any supported provider
- Providers: anthropic, openai, google, groq, mistral, xai, perplexity, ollama
- Anthropic prompt caching via `wrapLanguageModel` middleware (enabled by default)
- Custom Ollama provider implementing `LanguageModelV3` directly (for think, num_ctx support)
### Subpath Exports
- `@push.rocks/smartai/vision``analyzeImage()` using `generateText` with image content
- `@push.rocks/smartai/audio``textToSpeech()` using OpenAI SDK directly
- `@push.rocks/smartai/image``generateImage()`, `editImage()` using OpenAI SDK directly
- `@push.rocks/smartai/document``analyzeDocuments()` using SmartPdf + `generateText`
- `@push.rocks/smartai/research``research()` using `@anthropic-ai/sdk` web_search tool
## Dependencies
- `ai` ^6.0.116 — Vercel AI SDK core
- `@ai-sdk/*` — Provider packages (anthropic, openai, google, groq, mistral, xai, perplexity)
- `@ai-sdk/provider` ^3.0.8 — LanguageModelV3 types
- `@anthropic-ai/sdk` ^0.78.0 — Direct SDK for research (web search tool)
- `openai` ^6.25.0 — Direct SDK for audio TTS and image generation/editing
- `@push.rocks/smartpdf` ^4.1.3 — PDF to PNG conversion for document analysis
## Build
- `pnpm build``tsbuild tsfolders --allowimplicitany`
- Compiles: ts/, ts_vision/, ts_audio/, ts_image/, ts_document/, ts_research/
## Important Notes
- LanguageModelV3 uses `unified`/`raw` in FinishReason (not `type`/`rawType`)
- LanguageModelV3 system messages have `content: string` (not array)
- LanguageModelV3 file parts use `mediaType` (not `mimeType`)
- LanguageModelV3FunctionTool uses `inputSchema` (not `parameters`)
- Ollama `think` param goes at request body top level, not inside `options`
- Qwen models get default temperature 0.55 in the custom Ollama provider
- `qenv.getEnvVarOnDemand()` returns a Promise — must be awaited in tests
## Testing
```bash
pnpm test # all tests
tstest test/test.smartai.ts --verbose # core tests
tstest test/test.ollama.ts --verbose # ollama provider tests (mocked, no API needed)
```

620
readme.md
View File

@@ -1,345 +1,447 @@
# @push.rocks/smartai
[![npm version](https://badge.fury.io/js/%40push.rocks%2Fsmartai.svg)](https://www.npmjs.com/package/@push.rocks/smartai)
**A unified provider registry for the Vercel AI SDK** 🧠⚡
SmartAi is a comprehensive TypeScript library that provides a standardized interface for integrating and interacting with multiple AI models. It supports a range of operations from synchronous and streaming chat to audio generation, document processing, and vision tasks.
[![npm version](https://img.shields.io/npm/v/@push.rocks/smartai.svg)](https://www.npmjs.com/package/@push.rocks/smartai)
[![TypeScript](https://img.shields.io/badge/TypeScript-5.x-blue.svg)](https://www.typescriptlang.org/)
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
## Table of Contents
SmartAI gives you a single `getModel()` function that returns a standard `LanguageModelV3` for **any** supported provider — Anthropic, OpenAI, Google, Groq, Mistral, XAI, Perplexity, or Ollama. Use the returned model with the Vercel AI SDK's `generateText()`, `streamText()`, and tool ecosystem. Specialized capabilities like vision, audio, image generation, document analysis, and web research are available as dedicated subpath imports.
- [Features](#features)
- [Installation](#installation)
- [Supported AI Providers](#supported-ai-providers)
- [Quick Start](#quick-start)
- [Usage Examples](#usage-examples)
- [Chat Interactions](#chat-interactions)
- [Streaming Chat](#streaming-chat)
- [Audio Generation](#audio-generation)
- [Document Processing](#document-processing)
- [Vision Processing](#vision-processing)
- [Error Handling](#error-handling)
- [Development](#development)
- [Running Tests](#running-tests)
- [Building the Project](#building-the-project)
- [Contributing](#contributing)
- [License](#license)
- [Legal Information](#legal-information)
## Issue Reporting and Security
## Features
For reporting bugs, issues, or security vulnerabilities, please visit [community.foss.global/](https://community.foss.global/). This is the central community hub for all issue reporting. Developers who sign and comply with our contribution agreement and go through identification can also get a [code.foss.global/](https://code.foss.global/) account to submit Pull Requests directly.
- **Unified API:** Seamlessly integrate multiple AI providers with a consistent interface.
- **Chat & Streaming:** Support for both synchronous and real-time streaming chat interactions.
- **Audio & Vision:** Generate audio responses and perform detailed image analysis.
- **Document Processing:** Analyze PDFs and other documents using vision models.
- **Extensible:** Easily extend the library to support additional AI providers.
## 🎯 Why SmartAI?
## Installation
- **🔌 One function, eight providers** — `getModel()` returns a standard `LanguageModelV3`. Switch providers by changing a string.
- **🧱 Built on Vercel AI SDK** — Uses `ai` v6 under the hood. Your model works with `generateText()`, `streamText()`, tool calling, structured output, and everything else in the AI SDK ecosystem.
- **🏠 Custom Ollama provider** — A full `LanguageModelV3` implementation for Ollama with support for `think` mode, `num_ctx`, auto-tuned temperature for Qwen models, and native tool calling.
- **💰 Anthropic prompt caching** — Automatic `cacheControl` middleware reduces cost and latency on repeated calls. Enabled by default, opt out with `promptCaching: false`.
- **📦 Modular subpath exports** — Vision, audio, image, document, and research capabilities ship as separate imports. Only import what you need.
- **⚡ Zero lock-in** — Your code uses standard AI SDK types. Swap providers without touching application logic.
To install SmartAi, run the following command:
## 📦 Installation
```bash
npm install @push.rocks/smartai
pnpm install @push.rocks/smartai
```
This will add the package to your projects dependencies.
## Supported AI Providers
SmartAi supports multiple AI providers. Configure each provider with its corresponding token or settings:
### OpenAI
- **Models:** GPT-4, GPT-3.5-turbo, GPT-4-vision-preview
- **Features:** Chat, Streaming, Audio Generation, Vision, Document Processing
- **Configuration Example:**
```typescript
openaiToken: 'your-openai-token'
```
### X.AI
- **Models:** Grok-2-latest
- **Features:** Chat, Streaming, Document Processing
- **Configuration Example:**
```typescript
xaiToken: 'your-xai-token'
```
### Anthropic
- **Models:** Claude-3-opus-20240229
- **Features:** Chat, Streaming, Vision, Document Processing
- **Configuration Example:**
```typescript
anthropicToken: 'your-anthropic-token'
```
### Perplexity
- **Models:** Mixtral-8x7b-instruct
- **Features:** Chat, Streaming
- **Configuration Example:**
```typescript
perplexityToken: 'your-perplexity-token'
```
### Groq
- **Models:** Llama-3.3-70b-versatile
- **Features:** Chat, Streaming
- **Configuration Example:**
```typescript
groqToken: 'your-groq-token'
```
### Ollama
- **Models:** Configurable (default: llama2; use llava for vision/document tasks)
- **Features:** Chat, Streaming, Vision, Document Processing
- **Configuration Example:**
```typescript
ollama: {
baseUrl: 'http://localhost:11434', // Optional
model: 'llama2', // Optional
visionModel: 'llava' // Optional for vision and document tasks
}
```
### Exo
- **Models:** Configurable (supports LLaMA, Mistral, LlaVA, Qwen, and Deepseek)
- **Features:** Chat, Streaming
- **Configuration Example:**
```typescript
exo: {
baseUrl: 'http://localhost:8080/v1', // Optional
apiKey: 'your-api-key' // Optional for local deployments
}
```
## Quick Start
Initialize SmartAi with the provider configurations you plan to use:
## 🚀 Quick Start
```typescript
import { SmartAi } from '@push.rocks/smartai';
import { getModel, generateText, streamText } from '@push.rocks/smartai';
const smartAi = new SmartAi({
openaiToken: 'your-openai-token',
xaiToken: 'your-xai-token',
anthropicToken: 'your-anthropic-token',
perplexityToken: 'your-perplexity-token',
groqToken: 'your-groq-token',
ollama: {
baseUrl: 'http://localhost:11434',
model: 'llama2'
},
exo: {
baseUrl: 'http://localhost:8080/v1',
apiKey: 'your-api-key'
}
// Get a model for any provider
const model = getModel({
provider: 'anthropic',
model: 'claude-sonnet-4-5-20250929',
apiKey: process.env.ANTHROPIC_TOKEN,
});
await smartAi.start();
```
## Usage Examples
### Chat Interactions
**Synchronous Chat:**
```typescript
const response = await smartAi.openaiProvider.chat({
systemMessage: 'You are a helpful assistant.',
userMessage: 'What is the capital of France?',
messageHistory: [] // Include previous conversation messages if applicable
// Use it with the standard AI SDK functions
const result = await generateText({
model,
prompt: 'Explain quantum computing in simple terms.',
});
console.log(response.message);
console.log(result.text);
```
### Streaming Chat
That's it. Change `provider` to `'openai'` and `model` to `'gpt-4o'` and the rest of your code stays exactly the same.
**Real-Time Streaming:**
## 🔧 Core API
### `getModel(options): LanguageModelV3`
The primary export. Returns a standard `LanguageModelV3` you can use with any AI SDK function.
```typescript
const textEncoder = new TextEncoder();
const textDecoder = new TextDecoder();
import { getModel } from '@push.rocks/smartai';
import type { ISmartAiOptions } from '@push.rocks/smartai';
// Create a transform stream for sending and receiving data
const { writable, readable } = new TransformStream();
const writer = writable.getWriter();
const message = {
role: 'user',
content: 'Tell me a story about a brave knight'
const options: ISmartAiOptions = {
provider: 'anthropic', // 'anthropic' | 'openai' | 'google' | 'groq' | 'mistral' | 'xai' | 'perplexity' | 'ollama'
model: 'claude-sonnet-4-5-20250929',
apiKey: 'sk-ant-...',
// Anthropic-only: prompt caching (default: true)
promptCaching: true,
// Ollama-only: base URL (default: http://localhost:11434)
baseUrl: 'http://localhost:11434',
// Ollama-only: model runtime options
ollamaOptions: { think: true, num_ctx: 4096 },
};
writer.write(textEncoder.encode(JSON.stringify(message) + '\n'));
const model = getModel(options);
```
// Start streaming the response
const stream = await smartAi.openaiProvider.chatStream(readable);
const reader = stream.getReader();
### Re-exported AI SDK Functions
while (true) {
const { done, value } = await reader.read();
if (done) break;
console.log('AI:', value);
SmartAI re-exports the most commonly used functions from `ai` for convenience:
```typescript
import {
getModel,
generateText,
streamText,
tool,
jsonSchema,
} from '@push.rocks/smartai';
import type {
ModelMessage,
ToolSet,
StreamTextResult,
LanguageModelV3,
} from '@push.rocks/smartai';
```
## 🤖 Supported Providers
| Provider | Package | Example Models |
|----------|---------|----------------|
| **Anthropic** | `@ai-sdk/anthropic` | `claude-sonnet-4-5-20250929`, `claude-opus-4-5-20250929` |
| **OpenAI** | `@ai-sdk/openai` | `gpt-4o`, `gpt-4o-mini`, `o3-mini` |
| **Google** | `@ai-sdk/google` | `gemini-2.0-flash`, `gemini-2.5-pro` |
| **Groq** | `@ai-sdk/groq` | `llama-3.3-70b-versatile`, `mixtral-8x7b-32768` |
| **Mistral** | `@ai-sdk/mistral` | `mistral-large-latest`, `mistral-small-latest` |
| **XAI** | `@ai-sdk/xai` | `grok-3`, `grok-3-mini` |
| **Perplexity** | `@ai-sdk/perplexity` | `sonar-pro`, `sonar` |
| **Ollama** | Custom `LanguageModelV3` | `qwen3:8b`, `llama3:8b`, `deepseek-r1` |
## 💬 Text Generation
### Generate Text
```typescript
import { getModel, generateText } from '@push.rocks/smartai';
const model = getModel({
provider: 'openai',
model: 'gpt-4o',
apiKey: process.env.OPENAI_TOKEN,
});
const result = await generateText({
model,
system: 'You are a helpful assistant.',
prompt: 'What is 2 + 2?',
});
console.log(result.text); // "4"
```
### Stream Text
```typescript
import { getModel, streamText } from '@push.rocks/smartai';
const model = getModel({
provider: 'anthropic',
model: 'claude-sonnet-4-5-20250929',
apiKey: process.env.ANTHROPIC_TOKEN,
});
const result = await streamText({
model,
prompt: 'Count from 1 to 10.',
});
for await (const chunk of result.textStream) {
process.stdout.write(chunk);
}
```
### Audio Generation
Generate audio (supported by providers like OpenAI):
### Tool Calling
```typescript
const audioStream = await smartAi.openaiProvider.audio({
message: 'Hello, this is a test of text-to-speech'
import { getModel, generateText, tool, jsonSchema } from '@push.rocks/smartai';
const model = getModel({
provider: 'anthropic',
model: 'claude-sonnet-4-5-20250929',
apiKey: process.env.ANTHROPIC_TOKEN,
});
// Process the audio stream, for example, play it or save to a file.
```
### Document Processing
Analyze and extract key information from documents:
```typescript
// Example using OpenAI
const documentResult = await smartAi.openaiProvider.document({
systemMessage: 'Classify the document type',
userMessage: 'What type of document is this?',
messageHistory: [],
pdfDocuments: [pdfBuffer] // Uint8Array containing the PDF content
const result = await generateText({
model,
prompt: 'What is the weather in London?',
tools: {
getWeather: tool({
description: 'Get weather for a location',
parameters: jsonSchema({
type: 'object',
properties: {
location: { type: 'string' },
},
required: ['location'],
}),
execute: async ({ location }) => {
return { temperature: 18, condition: 'cloudy' };
},
}),
},
});
```
Other providers (e.g., Ollama and Anthropic) follow a similar pattern:
## 🏠 Ollama (Local Models)
The custom Ollama provider implements `LanguageModelV3` directly, calling Ollama's native `/api/chat` endpoint. This gives you features that generic OpenAI-compatible wrappers miss:
```typescript
// Using Ollama for document processing
const ollamaResult = await smartAi.ollamaProvider.document({
systemMessage: 'You are a document analysis assistant',
userMessage: 'Extract key information from this document',
messageHistory: [],
pdfDocuments: [pdfBuffer]
import { getModel, generateText } from '@push.rocks/smartai';
const model = getModel({
provider: 'ollama',
model: 'qwen3:8b',
baseUrl: 'http://localhost:11434', // default
ollamaOptions: {
think: true, // Enable thinking/reasoning mode
num_ctx: 8192, // Context window size
temperature: 0.7, // Override default (Qwen models auto-default to 0.55)
},
});
const result = await generateText({
model,
prompt: 'Solve this step by step: what is 15% of 340?',
});
console.log(result.text);
```
### Ollama Features
- **`think` mode** — Enables reasoning for models that support it (Qwen3, QwQ, DeepSeek-R1). The `think` parameter is sent at the top level of the request body as required by the Ollama API.
- **Auto-tuned temperature** — Qwen models automatically get `temperature: 0.55` when no explicit temperature is set, matching the recommended inference setting.
- **Native tool calling** — Full tool call support via Ollama's native format (not shimmed through OpenAI-compatible endpoints).
- **Streaming with reasoning** — `doStream()` emits proper `reasoning-start`, `reasoning-delta`, `reasoning-end` parts alongside text.
- **All Ollama options** — `num_ctx`, `top_k`, `top_p`, `repeat_penalty`, `num_predict`, `stop`, `seed`.
## 💰 Anthropic Prompt Caching
When using the Anthropic provider, SmartAI automatically wraps the model with caching middleware that adds `cacheControl: { type: 'ephemeral' }` to the last system message and last user message. This can significantly reduce cost and latency for repeated calls with the same system prompt.
```typescript
// Caching enabled by default
const model = getModel({
provider: 'anthropic',
model: 'claude-sonnet-4-5-20250929',
apiKey: process.env.ANTHROPIC_TOKEN,
});
// Opt out of caching
const modelNoCaching = getModel({
provider: 'anthropic',
model: 'claude-sonnet-4-5-20250929',
apiKey: process.env.ANTHROPIC_TOKEN,
promptCaching: false,
});
```
You can also use the middleware directly:
```typescript
// Using Anthropic for document processing
const anthropicResult = await smartAi.anthropicProvider.document({
systemMessage: 'Analyze the document',
userMessage: 'Please extract the main points',
messageHistory: [],
pdfDocuments: [pdfBuffer]
});
import { createAnthropicCachingMiddleware } from '@push.rocks/smartai';
import { wrapLanguageModel } from 'ai';
const middleware = createAnthropicCachingMiddleware();
const cachedModel = wrapLanguageModel({ model: baseModel, middleware });
```
### Vision Processing
## 📦 Subpath Exports
Analyze images with vision capabilities:
SmartAI provides specialized capabilities as separate subpath imports. Each one is a focused utility that takes a model (or API key) and does one thing well.
### 👁️ Vision — `@push.rocks/smartai/vision`
Analyze images using any vision-capable model.
```typescript
// Using OpenAI GPT-4 Vision
const imageDescription = await smartAi.openaiProvider.vision({
image: imageBuffer, // Uint8Array containing image data
prompt: 'What do you see in this image?'
import { analyzeImage } from '@push.rocks/smartai/vision';
import { getModel } from '@push.rocks/smartai';
import * as fs from 'fs';
const model = getModel({
provider: 'anthropic',
model: 'claude-sonnet-4-5-20250929',
apiKey: process.env.ANTHROPIC_TOKEN,
});
// Using Ollama for vision tasks
const ollamaImageAnalysis = await smartAi.ollamaProvider.vision({
const description = await analyzeImage({
model,
image: fs.readFileSync('photo.jpg'),
prompt: 'Describe this image in detail.',
mediaType: 'image/jpeg', // optional, defaults to 'image/jpeg'
});
console.log(description);
```
**`analyzeImage(options)`** accepts:
- `model` — Any `LanguageModelV3` with vision support
- `image``Buffer` or `Uint8Array`
- `prompt` — What to ask about the image
- `mediaType``'image/jpeg'` | `'image/png'` | `'image/webp'` | `'image/gif'`
### 🎙️ Audio — `@push.rocks/smartai/audio`
Text-to-speech using OpenAI's TTS models.
```typescript
import { textToSpeech } from '@push.rocks/smartai/audio';
import * as fs from 'fs';
const stream = await textToSpeech({
apiKey: process.env.OPENAI_TOKEN,
text: 'Welcome to the future of AI development!',
voice: 'nova', // 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer'
model: 'tts-1-hd', // 'tts-1' | 'tts-1-hd'
responseFormat: 'mp3', // 'mp3' | 'opus' | 'aac' | 'flac'
speed: 1.0, // 0.25 to 4.0
});
stream.pipe(fs.createWriteStream('welcome.mp3'));
```
### 🎨 Image — `@push.rocks/smartai/image`
Generate and edit images using OpenAI's image models.
```typescript
import { generateImage, editImage } from '@push.rocks/smartai/image';
// Generate an image
const result = await generateImage({
apiKey: process.env.OPENAI_TOKEN,
prompt: 'A futuristic cityscape at sunset, digital art',
model: 'gpt-image-1', // 'gpt-image-1' | 'dall-e-3' | 'dall-e-2'
quality: 'high', // 'low' | 'medium' | 'high' | 'auto'
size: '1024x1024',
background: 'transparent', // gpt-image-1 only
outputFormat: 'png', // 'png' | 'jpeg' | 'webp'
n: 1,
});
// result.images[0].b64_json — base64-encoded image data
const imageBuffer = Buffer.from(result.images[0].b64_json!, 'base64');
// Edit an existing image
const edited = await editImage({
apiKey: process.env.OPENAI_TOKEN,
image: imageBuffer,
prompt: 'Analyze this image in detail'
});
// Using Anthropic for vision analysis
const anthropicImageAnalysis = await smartAi.anthropicProvider.vision({
image: imageBuffer,
prompt: 'Describe the contents of this image'
prompt: 'Add a rainbow in the sky',
model: 'gpt-image-1',
});
```
## Error Handling
### 📄 Document — `@push.rocks/smartai/document`
Always wrap API calls in try-catch blocks to manage errors effectively:
Analyze PDF documents by converting them to images and using a vision model. Uses `@push.rocks/smartpdf` for PDF-to-PNG conversion (requires Chromium/Puppeteer).
```typescript
try {
const response = await smartAi.openaiProvider.chat({
systemMessage: 'You are a helpful assistant.',
userMessage: 'Hello!',
messageHistory: []
});
console.log(response.message);
} catch (error: any) {
console.error('AI provider error:', error.message);
}
import { analyzeDocuments, stopSmartpdf } from '@push.rocks/smartai/document';
import { getModel } from '@push.rocks/smartai';
import * as fs from 'fs';
const model = getModel({
provider: 'anthropic',
model: 'claude-sonnet-4-5-20250929',
apiKey: process.env.ANTHROPIC_TOKEN,
});
const analysis = await analyzeDocuments({
model,
systemMessage: 'You are a legal document analyst.',
userMessage: 'Summarize the key terms and conditions.',
pdfDocuments: [fs.readFileSync('contract.pdf')],
messageHistory: [], // optional: prior conversation context
});
console.log(analysis);
// Clean up the SmartPdf instance when done
await stopSmartpdf();
```
## Development
### 🔬 Research — `@push.rocks/smartai/research`
### Running Tests
Perform web-search-powered research using Anthropic's `web_search_20250305` tool.
To run the test suite, use the following command:
```typescript
import { research } from '@push.rocks/smartai/research';
const result = await research({
apiKey: process.env.ANTHROPIC_TOKEN,
query: 'What are the latest developments in quantum computing?',
searchDepth: 'basic', // 'basic' | 'advanced' | 'deep'
maxSources: 10, // optional: limit number of search results
allowedDomains: ['nature.com', 'arxiv.org'], // optional: restrict to domains
blockedDomains: ['reddit.com'], // optional: exclude domains
});
console.log(result.answer);
console.log('Sources:', result.sources); // Array<{ url, title, snippet }>
console.log('Queries:', result.searchQueries); // search queries the model used
```
## 🧪 Testing
```bash
npm run test
# All tests
pnpm test
# Individual test files
tstest test/test.smartai.ts --verbose # Core getModel + generateText + streamText
tstest test/test.ollama.ts --verbose # Ollama provider (mocked, no API needed)
tstest test/test.vision.ts --verbose # Vision analysis
tstest test/test.image.ts --verbose # Image generation
tstest test/test.research.ts --verbose # Web research
tstest test/test.audio.ts --verbose # Text-to-speech
tstest test/test.document.ts --verbose # Document analysis (needs Chromium)
```
Ensure your environment is configured with the appropriate tokens and settings for the providers you are testing.
Most tests skip gracefully when API keys are not set. The Ollama tests are fully mocked and require no external services.
### Building the Project
## 📐 Architecture
Compile the TypeScript code and build the package using:
```bash
npm run build
```
@push.rocks/smartai
├── ts/ # Core package
│ ├── index.ts # Re-exports getModel, AI SDK functions, types
│ ├── smartai.classes.smartai.ts # getModel() — provider switch
│ ├── smartai.interfaces.ts # ISmartAiOptions, TProvider, IOllamaModelOptions
│ ├── smartai.provider.ollama.ts # Custom LanguageModelV3 for Ollama
│ ├── smartai.middleware.anthropic.ts # Prompt caching middleware
│ └── plugins.ts # AI SDK provider factories
├── ts_vision/ # @push.rocks/smartai/vision
├── ts_audio/ # @push.rocks/smartai/audio
├── ts_image/ # @push.rocks/smartai/image
├── ts_document/ # @push.rocks/smartai/document
└── ts_research/ # @push.rocks/smartai/research
```
This command prepares the library for distribution.
The core package is a thin registry. `getModel()` creates the appropriate `@ai-sdk/*` provider, calls it with the model ID, and returns the resulting `LanguageModelV3`. For Anthropic, it optionally wraps the model with prompt caching middleware. For Ollama, it returns a custom `LanguageModelV3` implementation that talks directly to Ollama's `/api/chat` endpoint.
## Contributing
Contributions are welcome! Please follow these steps:
1. Fork the repository.
2. Create a feature branch:
```bash
git checkout -b feature/my-feature
```
3. Commit your changes with clear messages:
```bash
git commit -m 'Add new feature'
```
4. Push your branch to your fork:
```bash
git push origin feature/my-feature
```
5. Open a Pull Request with a detailed description of your changes.
Subpath modules are independent — they import `ai` and provider SDKs directly, not through the core package. This keeps the dependency graph clean and allows tree-shaking.
## License and Legal Information
This repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository.
This repository contains open-source code licensed under the MIT License. A copy of the license can be found in the [LICENSE](./LICENSE) file.
**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.
### Trademarks
This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.
This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH or third parties, and are not included within the scope of the MIT license granted herein.
Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines or the guidelines of the respective third-party owners, and any usage must be approved in writing. Third-party trademarks used herein are the property of their respective owners and used only in a descriptive manner, e.g. for an implementation of an API or similar.
### Company Information
Task Venture Capital GmbH
Registered at District court Bremen HRB 35230 HB, Germany
Task Venture Capital GmbH
Registered at District Court Bremen HRB 35230 HB, Germany
For any legal inquiries or if you require further information, please contact us via email at hello@task.vc.
For any legal inquiries or further information, please contact us via email at hello@task.vc.
By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.
By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.

36
test/test.audio.ts Normal file
View File

@@ -0,0 +1,36 @@
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as qenv from '@push.rocks/qenv';
import { textToSpeech } from '../ts_audio/index.js';
const testQenv = new qenv.Qenv('./', './.nogit/');
tap.test('textToSpeech should return a readable stream', async () => {
const apiKey = await testQenv.getEnvVarOnDemand('OPENAI_TOKEN');
if (!apiKey) {
console.log('OPENAI_TOKEN not set, skipping test');
return;
}
const stream = await textToSpeech({
apiKey,
text: 'Hello, this is a test of the text to speech system.',
voice: 'alloy',
model: 'tts-1',
});
expect(stream).toBeTruthy();
expect(stream.readable).toBeTrue();
// Read some bytes to verify it's actual audio data
const chunks: Buffer[] = [];
for await (const chunk of stream) {
chunks.push(Buffer.from(chunk));
if (chunks.length > 2) break; // Just read a few chunks to verify
}
const totalBytes = chunks.reduce((sum, c) => sum + c.length, 0);
console.log(`Audio stream produced ${totalBytes} bytes in ${chunks.length} chunks`);
expect(totalBytes).toBeGreaterThan(0);
});
export default tap.start();

50
test/test.document.ts Normal file
View File

@@ -0,0 +1,50 @@
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as qenv from '@push.rocks/qenv';
import { getModel } from '../ts/index.js';
import { analyzeDocuments, stopSmartpdf } from '../ts_document/index.js';
const testQenv = new qenv.Qenv('./', './.nogit/');
tap.test('analyzeDocuments should analyze a PDF', async () => {
const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN');
if (!apiKey) {
console.log('ANTHROPIC_TOKEN not set, skipping test');
return;
}
// Create a minimal test PDF (this is a valid minimal PDF)
const minimalPdf = Buffer.from(
'%PDF-1.0\n1 0 obj<</Type/Catalog/Pages 2 0 R>>endobj\n' +
'2 0 obj<</Type/Pages/Kids[3 0 R]/Count 1>>endobj\n' +
'3 0 obj<</Type/Page/MediaBox[0 0 612 792]/Parent 2 0 R/Contents 4 0 R/Resources<</Font<</F1 5 0 R>>>>>>endobj\n' +
'4 0 obj<</Length 44>>stream\nBT /F1 12 Tf 100 700 Td (Hello World) Tj ET\nendstream\nendobj\n' +
'5 0 obj<</Type/Font/Subtype/Type1/BaseFont/Helvetica>>endobj\n' +
'xref\n0 6\n0000000000 65535 f \n0000000009 00000 n \n0000000058 00000 n \n0000000115 00000 n \n0000000266 00000 n \n0000000360 00000 n \n' +
'trailer<</Size 6/Root 1 0 R>>\nstartxref\n434\n%%EOF'
);
const model = getModel({
provider: 'anthropic',
model: 'claude-sonnet-4-5-20250929',
apiKey,
promptCaching: false,
});
try {
const result = await analyzeDocuments({
model,
systemMessage: 'You are a document analysis assistant.',
userMessage: 'What text is visible in this document?',
pdfDocuments: [minimalPdf],
});
console.log('Document analysis result:', result);
expect(result).toBeTruthy();
} catch (error) {
console.log('Document test failed (may need puppeteer):', error.message);
} finally {
await stopSmartpdf();
}
});
export default tap.start();

35
test/test.image.ts Normal file
View File

@@ -0,0 +1,35 @@
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as qenv from '@push.rocks/qenv';
import { generateImage } from '../ts_image/index.js';
const testQenv = new qenv.Qenv('./', './.nogit/');
tap.test('generateImage should return an image response', async () => {
const apiKey = await testQenv.getEnvVarOnDemand('OPENAI_TOKEN');
if (!apiKey) {
console.log('OPENAI_TOKEN not set, skipping test');
return;
}
const result = await generateImage({
apiKey,
prompt: 'A simple red circle on a white background',
model: 'gpt-image-1',
size: '1024x1024',
quality: 'low',
n: 1,
});
console.log('Image generation result: images count =', result.images.length);
expect(result.images).toBeArray();
expect(result.images.length).toBeGreaterThan(0);
const firstImage = result.images[0];
// gpt-image-1 returns b64_json by default
expect(firstImage.b64_json || firstImage.url).toBeTruthy();
expect(result.metadata).toBeTruthy();
expect(result.metadata!.model).toEqual('gpt-image-1');
});
export default tap.start();

390
test/test.ollama.ts Normal file
View File

@@ -0,0 +1,390 @@
import { tap, expect } from '@git.zone/tstest/tapbundle';
import { createOllamaModel } from '../ts/smartai.provider.ollama.js';
import type { ISmartAiOptions } from '../ts/smartai.interfaces.js';
tap.test('createOllamaModel returns valid LanguageModelV3', async () => {
const model = createOllamaModel({
provider: 'ollama',
model: 'qwen3:8b',
ollamaOptions: { think: true, num_ctx: 4096 },
});
expect(model.specificationVersion).toEqual('v3');
expect(model.provider).toEqual('ollama');
expect(model.modelId).toEqual('qwen3:8b');
expect(model).toHaveProperty('doGenerate');
expect(model).toHaveProperty('doStream');
});
tap.test('Qwen models get default temperature 0.55', async () => {
// Mock fetch to capture the request body
const originalFetch = globalThis.fetch;
let capturedBody: Record<string, unknown> | undefined;
globalThis.fetch = async (input: RequestInfo | URL, init?: RequestInit) => {
capturedBody = JSON.parse(init?.body as string);
return new Response(JSON.stringify({
message: { content: 'test response', role: 'assistant' },
done: true,
prompt_eval_count: 10,
eval_count: 5,
}), { status: 200 });
};
try {
const model = createOllamaModel({
provider: 'ollama',
model: 'qwen3:8b',
});
await model.doGenerate({
prompt: [{ role: 'user', content: [{ type: 'text', text: 'hello' }] }],
inputFormat: 'prompt',
} as any);
expect(capturedBody).toBeTruthy();
// Temperature 0.55 should be in the options
expect((capturedBody!.options as Record<string, unknown>).temperature).toEqual(0.55);
} finally {
globalThis.fetch = originalFetch;
}
});
tap.test('think option is passed at top level of request body', async () => {
const originalFetch = globalThis.fetch;
let capturedBody: Record<string, unknown> | undefined;
globalThis.fetch = async (input: RequestInfo | URL, init?: RequestInit) => {
capturedBody = JSON.parse(init?.body as string);
return new Response(JSON.stringify({
message: { content: 'test', role: 'assistant', thinking: 'let me think...' },
done: true,
prompt_eval_count: 10,
eval_count: 5,
}), { status: 200 });
};
try {
const model = createOllamaModel({
provider: 'ollama',
model: 'qwen3:8b',
ollamaOptions: { think: true, num_ctx: 4096 },
});
await model.doGenerate({
prompt: [{ role: 'user', content: [{ type: 'text', text: 'hello' }] }],
inputFormat: 'prompt',
} as any);
expect(capturedBody).toBeTruthy();
// think should be at top level, not inside options
expect(capturedBody!.think).toEqual(true);
// num_ctx should be in options
expect((capturedBody!.options as Record<string, unknown>).num_ctx).toEqual(4096);
} finally {
globalThis.fetch = originalFetch;
}
});
tap.test('Non-qwen models do not get default temperature', async () => {
const originalFetch = globalThis.fetch;
let capturedBody: Record<string, unknown> | undefined;
globalThis.fetch = async (input: RequestInfo | URL, init?: RequestInit) => {
capturedBody = JSON.parse(init?.body as string);
return new Response(JSON.stringify({
message: { content: 'test', role: 'assistant' },
done: true,
}), { status: 200 });
};
try {
const model = createOllamaModel({
provider: 'ollama',
model: 'llama3:8b',
});
await model.doGenerate({
prompt: [{ role: 'user', content: [{ type: 'text', text: 'hello' }] }],
inputFormat: 'prompt',
} as any);
expect(capturedBody).toBeTruthy();
// No temperature should be set
expect((capturedBody!.options as Record<string, unknown>).temperature).toBeUndefined();
} finally {
globalThis.fetch = originalFetch;
}
});
tap.test('doGenerate parses reasoning/thinking from response', async () => {
const originalFetch = globalThis.fetch;
globalThis.fetch = async (input: RequestInfo | URL, init?: RequestInit) => {
return new Response(JSON.stringify({
message: {
content: 'The answer is 42.',
role: 'assistant',
thinking: 'Let me reason about this carefully...',
},
done: true,
prompt_eval_count: 20,
eval_count: 15,
}), { status: 200 });
};
try {
const model = createOllamaModel({
provider: 'ollama',
model: 'qwen3:8b',
ollamaOptions: { think: true },
});
const result = await model.doGenerate({
prompt: [{ role: 'user', content: [{ type: 'text', text: 'What is the meaning of life?' }] }],
} as any);
// Should have both reasoning and text content
const reasoningParts = result.content.filter(c => c.type === 'reasoning');
const textParts = result.content.filter(c => c.type === 'text');
expect(reasoningParts.length).toEqual(1);
expect((reasoningParts[0] as any).text).toEqual('Let me reason about this carefully...');
expect(textParts.length).toEqual(1);
expect((textParts[0] as any).text).toEqual('The answer is 42.');
expect(result.finishReason.unified).toEqual('stop');
} finally {
globalThis.fetch = originalFetch;
}
});
tap.test('doGenerate parses tool calls from response', async () => {
const originalFetch = globalThis.fetch;
globalThis.fetch = async (input: RequestInfo | URL, init?: RequestInit) => {
return new Response(JSON.stringify({
message: {
content: '',
role: 'assistant',
tool_calls: [
{
function: {
name: 'get_weather',
arguments: { location: 'London', unit: 'celsius' },
},
},
],
},
done: true,
prompt_eval_count: 30,
eval_count: 10,
}), { status: 200 });
};
try {
const model = createOllamaModel({
provider: 'ollama',
model: 'qwen3:8b',
});
const result = await model.doGenerate({
prompt: [{ role: 'user', content: [{ type: 'text', text: 'What is the weather in London?' }] }],
tools: [{
type: 'function' as const,
name: 'get_weather',
description: 'Get weather for a location',
inputSchema: {
type: 'object',
properties: {
location: { type: 'string' },
unit: { type: 'string' },
},
},
}],
} as any);
const toolCalls = result.content.filter(c => c.type === 'tool-call');
expect(toolCalls.length).toEqual(1);
expect((toolCalls[0] as any).toolName).toEqual('get_weather');
expect(JSON.parse((toolCalls[0] as any).input)).toEqual({ location: 'London', unit: 'celsius' });
expect(result.finishReason.unified).toEqual('tool-calls');
} finally {
globalThis.fetch = originalFetch;
}
});
tap.test('doStream produces correct stream parts', async () => {
const originalFetch = globalThis.fetch;
// Simulate Ollama's newline-delimited JSON streaming
const chunks = [
JSON.stringify({ message: { content: 'Hello', role: 'assistant' }, done: false }) + '\n',
JSON.stringify({ message: { content: ' world', role: 'assistant' }, done: false }) + '\n',
JSON.stringify({ message: { content: '!', role: 'assistant' }, done: true, prompt_eval_count: 5, eval_count: 3 }) + '\n',
];
globalThis.fetch = async (input: RequestInfo | URL, init?: RequestInit) => {
const encoder = new TextEncoder();
const stream = new ReadableStream({
start(controller) {
for (const chunk of chunks) {
controller.enqueue(encoder.encode(chunk));
}
controller.close();
},
});
return new Response(stream, { status: 200 });
};
try {
const model = createOllamaModel({
provider: 'ollama',
model: 'llama3:8b',
});
const result = await model.doStream({
prompt: [{ role: 'user', content: [{ type: 'text', text: 'hello' }] }],
} as any);
const parts: any[] = [];
const reader = result.stream.getReader();
while (true) {
const { done, value } = await reader.read();
if (done) break;
parts.push(value);
}
// Should have: text-start, text-delta x3, text-end, finish
const textDeltas = parts.filter(p => p.type === 'text-delta');
const finishParts = parts.filter(p => p.type === 'finish');
const textStarts = parts.filter(p => p.type === 'text-start');
const textEnds = parts.filter(p => p.type === 'text-end');
expect(textStarts.length).toEqual(1);
expect(textDeltas.length).toEqual(3);
expect(textDeltas.map((d: any) => d.delta).join('')).toEqual('Hello world!');
expect(textEnds.length).toEqual(1);
expect(finishParts.length).toEqual(1);
expect(finishParts[0].finishReason.unified).toEqual('stop');
} finally {
globalThis.fetch = originalFetch;
}
});
tap.test('doStream handles thinking/reasoning in stream', async () => {
const originalFetch = globalThis.fetch;
const chunks = [
JSON.stringify({ message: { thinking: 'Let me think...', content: '', role: 'assistant' }, done: false }) + '\n',
JSON.stringify({ message: { thinking: ' about this.', content: '', role: 'assistant' }, done: false }) + '\n',
JSON.stringify({ message: { content: 'The answer.', role: 'assistant' }, done: false }) + '\n',
JSON.stringify({ message: { content: '', role: 'assistant' }, done: true, prompt_eval_count: 10, eval_count: 8 }) + '\n',
];
globalThis.fetch = async (input: RequestInfo | URL, init?: RequestInit) => {
const encoder = new TextEncoder();
const stream = new ReadableStream({
start(controller) {
for (const chunk of chunks) {
controller.enqueue(encoder.encode(chunk));
}
controller.close();
},
});
return new Response(stream, { status: 200 });
};
try {
const model = createOllamaModel({
provider: 'ollama',
model: 'qwen3:8b',
ollamaOptions: { think: true },
});
const result = await model.doStream({
prompt: [{ role: 'user', content: [{ type: 'text', text: 'think about this' }] }],
} as any);
const parts: any[] = [];
const reader = result.stream.getReader();
while (true) {
const { done, value } = await reader.read();
if (done) break;
parts.push(value);
}
const reasoningStarts = parts.filter(p => p.type === 'reasoning-start');
const reasoningDeltas = parts.filter(p => p.type === 'reasoning-delta');
const reasoningEnds = parts.filter(p => p.type === 'reasoning-end');
const textDeltas = parts.filter(p => p.type === 'text-delta');
expect(reasoningStarts.length).toEqual(1);
expect(reasoningDeltas.length).toEqual(2);
expect(reasoningDeltas.map((d: any) => d.delta).join('')).toEqual('Let me think... about this.');
expect(reasoningEnds.length).toEqual(1);
expect(textDeltas.length).toEqual(1);
expect(textDeltas[0].delta).toEqual('The answer.');
} finally {
globalThis.fetch = originalFetch;
}
});
tap.test('message conversion handles system, assistant, and tool messages', async () => {
const originalFetch = globalThis.fetch;
let capturedBody: Record<string, unknown> | undefined;
globalThis.fetch = async (input: RequestInfo | URL, init?: RequestInit) => {
capturedBody = JSON.parse(init?.body as string);
return new Response(JSON.stringify({
message: { content: 'response', role: 'assistant' },
done: true,
}), { status: 200 });
};
try {
const model = createOllamaModel({
provider: 'ollama',
model: 'llama3:8b',
});
await model.doGenerate({
prompt: [
{ role: 'system', content: 'You are helpful.' },
{ role: 'user', content: [{ type: 'text', text: 'Hi' }] },
{
role: 'assistant',
content: [
{ type: 'text', text: 'Let me check.' },
{ type: 'tool-call', toolCallId: 'tc1', toolName: 'search', input: '{"q":"test"}' },
],
},
{
role: 'tool',
content: [
{ type: 'tool-result', toolCallId: 'tc1', output: { type: 'text', value: 'result data' } },
],
},
{ role: 'user', content: [{ type: 'text', text: 'What did you find?' }] },
],
} as any);
const messages = capturedBody!.messages as Array<Record<string, unknown>>;
expect(messages.length).toEqual(5);
expect(messages[0].role).toEqual('system');
expect(messages[0].content).toEqual('You are helpful.');
expect(messages[1].role).toEqual('user');
expect(messages[1].content).toEqual('Hi');
expect(messages[2].role).toEqual('assistant');
expect(messages[2].content).toEqual('Let me check.');
expect((messages[2].tool_calls as any[]).length).toEqual(1);
expect((messages[2].tool_calls as any[])[0].function.name).toEqual('search');
expect(messages[3].role).toEqual('tool');
expect(messages[3].content).toEqual('result data');
expect(messages[4].role).toEqual('user');
expect(messages[4].content).toEqual('What did you find?');
} finally {
globalThis.fetch = originalFetch;
}
});
export default tap.start();

31
test/test.research.ts Normal file
View File

@@ -0,0 +1,31 @@
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as qenv from '@push.rocks/qenv';
import { research } from '../ts_research/index.js';
const testQenv = new qenv.Qenv('./', './.nogit/');
tap.test('research should return answer and sources', async () => {
const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN');
if (!apiKey) {
console.log('ANTHROPIC_TOKEN not set, skipping test');
return;
}
const result = await research({
apiKey,
query: 'What is the current version of Node.js?',
searchDepth: 'basic',
});
console.log('Research answer:', result.answer.substring(0, 200));
console.log('Research sources:', result.sources.length);
if (result.searchQueries) {
console.log('Search queries:', result.searchQueries);
}
expect(result.answer).toBeTruthy();
expect(result.answer.length).toBeGreaterThan(10);
expect(result.sources).toBeArray();
});
export default tap.start();

161
test/test.smartai.ts Normal file
View File

@@ -0,0 +1,161 @@
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as qenv from '@push.rocks/qenv';
import * as smartai from '../ts/index.js';
const testQenv = new qenv.Qenv('./', './.nogit/');
tap.test('getModel should return a LanguageModelV3 for anthropic', async () => {
const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN');
if (!apiKey) {
console.log('ANTHROPIC_TOKEN not set, skipping test');
return;
}
const model = smartai.getModel({
provider: 'anthropic',
model: 'claude-sonnet-4-5-20250929',
apiKey,
});
expect(model).toHaveProperty('specificationVersion');
expect(model).toHaveProperty('provider');
expect(model).toHaveProperty('modelId');
expect(model).toHaveProperty('doGenerate');
expect(model).toHaveProperty('doStream');
});
tap.test('getModel with anthropic prompt caching returns wrapped model', async () => {
const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN');
if (!apiKey) {
console.log('ANTHROPIC_TOKEN not set, skipping test');
return;
}
// Default: prompt caching enabled
const model = smartai.getModel({
provider: 'anthropic',
model: 'claude-sonnet-4-5-20250929',
apiKey,
});
// With caching disabled
const modelNoCaching = smartai.getModel({
provider: 'anthropic',
model: 'claude-sonnet-4-5-20250929',
apiKey,
promptCaching: false,
});
// Both should be valid models
expect(model).toHaveProperty('doGenerate');
expect(modelNoCaching).toHaveProperty('doGenerate');
});
tap.test('generateText with anthropic model', async () => {
const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN');
if (!apiKey) {
console.log('ANTHROPIC_TOKEN not set, skipping test');
return;
}
const model = smartai.getModel({
provider: 'anthropic',
model: 'claude-sonnet-4-5-20250929',
apiKey,
});
const result = await smartai.generateText({
model,
prompt: 'Say hello in exactly 3 words.',
});
console.log('Anthropic response:', result.text);
expect(result.text).toBeTruthy();
expect(result.text.length).toBeGreaterThan(0);
});
tap.test('getModel should return a LanguageModelV3 for openai', async () => {
const apiKey = await testQenv.getEnvVarOnDemand('OPENAI_TOKEN');
if (!apiKey) {
console.log('OPENAI_TOKEN not set, skipping test');
return;
}
const model = smartai.getModel({
provider: 'openai',
model: 'gpt-4o-mini',
apiKey,
});
expect(model).toHaveProperty('doGenerate');
expect(model).toHaveProperty('doStream');
});
tap.test('streamText with anthropic model', async () => {
const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN');
if (!apiKey) {
console.log('ANTHROPIC_TOKEN not set, skipping test');
return;
}
const model = smartai.getModel({
provider: 'anthropic',
model: 'claude-sonnet-4-5-20250929',
apiKey,
});
const result = await smartai.streamText({
model,
prompt: 'Count from 1 to 5.',
});
const tokens: string[] = [];
for await (const chunk of result.textStream) {
tokens.push(chunk);
}
const fullText = tokens.join('');
console.log('Streamed text:', fullText);
expect(fullText).toBeTruthy();
expect(fullText.length).toBeGreaterThan(0);
expect(tokens.length).toBeGreaterThan(1); // Should have multiple chunks
});
tap.test('generateText with openai model', async () => {
const apiKey = await testQenv.getEnvVarOnDemand('OPENAI_TOKEN');
if (!apiKey) {
console.log('OPENAI_TOKEN not set, skipping test');
return;
}
const model = smartai.getModel({
provider: 'openai',
model: 'gpt-4o-mini',
apiKey,
});
const result = await smartai.generateText({
model,
prompt: 'What is 2+2? Reply with just the number.',
});
console.log('OpenAI response:', result.text);
expect(result.text).toBeTruthy();
expect(result.text).toInclude('4');
});
tap.test('getModel should throw for unknown provider', async () => {
let threw = false;
try {
smartai.getModel({
provider: 'nonexistent' as any,
model: 'test',
});
} catch (e) {
threw = true;
expect(e.message).toInclude('Unknown provider');
}
expect(threw).toBeTrue();
});
export default tap.start();

View File

@@ -1,84 +0,0 @@
import { expect, expectAsync, tap } from '@push.rocks/tapbundle';
import * as qenv from '@push.rocks/qenv';
import * as smartrequest from '@push.rocks/smartrequest';
import * as smartfile from '@push.rocks/smartfile';
const testQenv = new qenv.Qenv('./', './.nogit/');
import * as smartai from '../ts/index.js';
let testSmartai: smartai.SmartAi;
tap.test('should create a smartai instance', async () => {
testSmartai = new smartai.SmartAi({
openaiToken: await testQenv.getEnvVarOnDemand('OPENAI_TOKEN'),
});
await testSmartai.start();
});
tap.test('should create chat response with openai', async () => {
const userMessage = 'How are you?';
const response = await testSmartai.openaiProvider.chat({
systemMessage: 'Hello',
userMessage: userMessage,
messageHistory: [
],
});
console.log(`userMessage: ${userMessage}`);
console.log(response.message);
});
tap.test('should document a pdf', async () => {
const pdfUrl = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf';
const pdfResponse = await smartrequest.getBinary(pdfUrl);
const result = await testSmartai.openaiProvider.document({
systemMessage: 'Classify the document. Only the following answers are allowed: "invoice", "bank account statement", "contract", "other". The answer should only contain the keyword for machine use.',
userMessage: "Classify the document.",
messageHistory: [],
pdfDocuments: [pdfResponse.body],
});
console.log(result);
});
tap.test('should recognize companies in a pdf', async () => {
const pdfBuffer = await smartfile.fs.toBuffer('./.nogit/demo_without_textlayer.pdf');
const result = await testSmartai.openaiProvider.document({
systemMessage: `
summarize the document.
answer in JSON format, adhering to the following schema:
\`\`\`typescript
type TAnswer = {
entitySender: {
type: 'official state entity' | 'company' | 'person';
name: string;
address: string;
city: string;
country: string;
EU: boolean; // wether the entity is within EU
};
entityReceiver: {
type: 'official state entity' | 'company' | 'person';
name: string;
address: string;
city: string;
country: string;
EU: boolean; // wether the entity is within EU
};
date: string; // the date of the document as YYYY-MM-DD
title: string; // a short title, suitable for a filename
}
\`\`\`
`,
userMessage: "Classify the document.",
messageHistory: [],
pdfDocuments: [pdfBuffer],
});
console.log(result);
})
tap.test('should stop the smartai instance', async () => {
await testSmartai.stop();
});
export default tap.start();

66
test/test.vision.ts Normal file
View File

@@ -0,0 +1,66 @@
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as qenv from '@push.rocks/qenv';
import * as fs from 'fs';
import * as path from 'path';
import { getModel } from '../ts/index.js';
import { analyzeImage } from '../ts_vision/index.js';
const testQenv = new qenv.Qenv('./', './.nogit/');
tap.test('analyzeImage should describe a test image', async () => {
const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN');
if (!apiKey) {
console.log('ANTHROPIC_TOKEN not set, skipping test');
return;
}
// Find an image file recursively in testimages/
const testImageDir = path.join(process.cwd(), 'test', 'testimages');
if (!fs.existsSync(testImageDir)) {
console.log('No test images directory found, skipping test');
return;
}
const findImage = (dir: string): string | null => {
for (const entry of fs.readdirSync(dir, { withFileTypes: true })) {
const fullPath = path.join(dir, entry.name);
if (entry.isDirectory()) {
const found = findImage(fullPath);
if (found) return found;
} else if (/\.(jpg|jpeg|png)$/i.test(entry.name)) {
return fullPath;
}
}
return null;
};
const imagePath = findImage(testImageDir);
if (!imagePath) {
console.log('No test images found, skipping test');
return;
}
const imageBuffer = fs.readFileSync(imagePath);
const ext = path.extname(imagePath).toLowerCase();
const mediaType = ext === '.png' ? 'image/png' : 'image/jpeg';
const model = getModel({
provider: 'anthropic',
model: 'claude-sonnet-4-5-20250929',
apiKey,
promptCaching: false,
});
const result = await analyzeImage({
model,
image: imageBuffer,
prompt: 'Describe this image briefly.',
mediaType: mediaType as 'image/jpeg' | 'image/png',
});
console.log('Vision result:', result);
expect(result).toBeTruthy();
expect(result.length).toBeGreaterThan(10);
});
export default tap.start();

View File

@@ -0,0 +1,36 @@
# Coffee Image Attribution
## coffee.jpg
**Photographer:** Dani (@frokz)
**Source URL:** https://unsplash.com/photos/cup-of-coffee-on-saucer-ZLqxSzvVr7I
**Direct Link:** https://images.unsplash.com/photo-1506372023823-741c83b836fe
### Metadata
- **Title:** Cup of coffee on saucer
- **Description:** One of many coffee-moments in my life ;)
- **Date Published:** September 25, 2017
- **Location:** Stockholm, Sweden
- **Tags:** coffee, cafe, heart, coffee cup, cup, barista, latte, mug, saucer, food, sweden, stockholm
### License
**Unsplash License** - Free to use
- ✅ Commercial and non-commercial use
- ✅ No permission needed
- ❌ Cannot be sold without significant modification
- ❌ Cannot be used to replicate Unsplash or similar service
Full license: https://unsplash.com/license
### Usage in This Project
This image is used for testing vision/image processing capabilities in the SmartAI library test suite, specifically for:
- Testing coffee/beverage recognition
- Latte art pattern detection (heart shape)
- Scene/environment analysis
- Multi-element image understanding (cup, saucer, table)
### Download Information
- **Downloaded:** September 28, 2025
- **Original Filename:** dani-ZLqxSzvVr7I-unsplash.jpg
- **Resolution:** High resolution (3.7 MB)
- **Format:** JPEG

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.7 MiB

View File

@@ -0,0 +1,40 @@
# Laptop Image Attribution
## laptop.jpg
**Photographer:** Nicolas Bichon (@nicol3a)
**Source URL:** https://unsplash.com/photos/a-laptop-computer-sitting-on-top-of-a-wooden-desk-ZhV4iqAXxyA
**Direct Link:** https://images.unsplash.com/photo-1704230972797-e0e3aba0fce7
### Metadata
- **Title:** A laptop computer sitting on top of a wooden desk
- **Description:** Lifestyle photo I took for my indie app Type, a macOS app to take notes without interrupting your flow. https://usetype.app.
- **Date Published:** January 2, 2024
- **Camera:** FUJIFILM, X-T20
- **Tags:** computer, laptop, mac, keyboard, computer keyboard, computer hardware, furniture, table, electronics, screen, monitor, hardware, display, tabletop, lcd screen, digital display
### Statistics
- **Views:** 183,020
- **Downloads:** 757
### License
**Unsplash License** - Free to use
- ✅ Commercial and non-commercial use
- ✅ No permission needed
- ❌ Cannot be sold without significant modification
- ❌ Cannot be used to replicate Unsplash or similar service
Full license: https://unsplash.com/license
### Usage in This Project
This image is used for testing vision/image processing capabilities in the SmartAI library test suite, specifically for:
- Testing technology/computer equipment recognition
- Workspace/office environment analysis
- Object detection (laptop, keyboard, monitor, table)
- Scene understanding and context analysis
### Download Information
- **Downloaded:** September 28, 2025
- **Original Filename:** nicolas-bichon-ZhV4iqAXxyA-unsplash.jpg
- **Resolution:** High resolution (1.8 MB)
- **Format:** JPEG

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 MiB

View File

@@ -0,0 +1,40 @@
# Receipt Image Attribution
## receipt.jpg
**Photographer:** Annie Spratt (@anniespratt)
**Source URL:** https://unsplash.com/photos/a-receipt-sitting-on-top-of-a-wooden-table-recgFWxDO1Y
**Direct Link:** https://images.unsplash.com/photo-1731686602391-7484df33a03c
### Metadata
- **Title:** A receipt sitting on top of a wooden table
- **Description:** Download this free HD photo of text, document, invoice, and receipt by Annie Spratt
- **Date Published:** November 15, 2024
- **Tags:** text, document, invoice, receipt, diaper
### Statistics
- **Views:** 54,593
- **Downloads:** 764
### License
**Unsplash License** - Free to use
- ✅ Commercial and non-commercial use
- ✅ No permission needed
- ❌ Cannot be sold without significant modification
- ❌ Cannot be used to replicate Unsplash or similar service
Full license: https://unsplash.com/license
### Usage in This Project
This image is used for testing vision/image processing capabilities in the SmartAI library test suite, specifically for:
- Testing text extraction and OCR capabilities
- Document recognition and classification
- Receipt/invoice analysis
- Text-heavy image understanding
- Structured data extraction from documents
### Download Information
- **Downloaded:** September 28, 2025
- **Original Filename:** annie-spratt-recgFWxDO1Y-unsplash.jpg
- **Resolution:** High resolution (3.3 MB)
- **Format:** JPEG

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.3 MiB

View File

@@ -3,6 +3,6 @@
*/
export const commitinfo = {
name: '@push.rocks/smartai',
version: '0.4.0',
description: 'A TypeScript library for integrating and interacting with multiple AI models, offering capabilities for chat and potentially audio responses.'
version: '2.0.0',
description: 'Provider registry and capability utilities for ai-sdk (Vercel AI SDK). Core export returns LanguageModel; subpath exports provide vision, audio, image, document and research capabilities.'
}

View File

@@ -1,86 +0,0 @@
/**
* Message format for chat interactions
*/
export interface ChatMessage {
role: 'assistant' | 'user' | 'system';
content: string;
}
/**
* Options for chat interactions
*/
export interface ChatOptions {
systemMessage: string;
userMessage: string;
messageHistory: ChatMessage[];
}
/**
* Response format for chat interactions
*/
export interface ChatResponse {
role: 'assistant';
message: string;
}
/**
* Abstract base class for multi-modal AI models.
* Provides a common interface for different AI providers (OpenAI, Anthropic, Perplexity, Ollama)
*/
export abstract class MultiModalModel {
/**
* Initializes the model and any necessary resources
* Should be called before using any other methods
*/
abstract start(): Promise<void>;
/**
* Cleans up any resources used by the model
* Should be called when the model is no longer needed
*/
abstract stop(): Promise<void>;
/**
* Synchronous chat interaction with the model
* @param optionsArg Options containing system message, user message, and message history
* @returns Promise resolving to the assistant's response
*/
public abstract chat(optionsArg: ChatOptions): Promise<ChatResponse>;
/**
* Streaming interface for chat interactions
* Allows for real-time responses from the model
* @param input Stream of user messages
* @returns Stream of model responses
*/
public abstract chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>>;
/**
* Text-to-speech conversion
* @param optionsArg Options containing the message to convert to speech
* @returns Promise resolving to a readable stream of audio data
* @throws Error if the provider doesn't support audio generation
*/
public abstract audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream>;
/**
* Vision-language processing
* @param optionsArg Options containing the image and prompt for analysis
* @returns Promise resolving to the model's description or analysis of the image
* @throws Error if the provider doesn't support vision tasks
*/
public abstract vision(optionsArg: { image: Buffer; prompt: string }): Promise<string>;
/**
* Document analysis and processing
* @param optionsArg Options containing system message, user message, PDF documents, and message history
* @returns Promise resolving to the model's analysis of the documents
* @throws Error if the provider doesn't support document processing
*/
public abstract document(optionsArg: {
systemMessage: string;
userMessage: string;
pdfDocuments: Uint8Array[];
messageHistory: ChatMessage[];
}): Promise<{ message: any }>;
}

View File

@@ -1,152 +0,0 @@
import type { SmartAi } from "./classes.smartai.js";
import { OpenAiProvider } from "./provider.openai.js";
type TProcessFunction = (input: string) => Promise<string>;
export interface IConversationOptions {
processFunction: TProcessFunction;
}
/**
* a conversation
*/
export class Conversation {
// STATIC
public static async createWithOpenAi(smartaiRefArg: SmartAi) {
if (!smartaiRefArg.openaiProvider) {
throw new Error('OpenAI provider not available');
}
const conversation = new Conversation(smartaiRefArg, {
processFunction: async (input) => {
return '' // TODO implement proper streaming
}
});
return conversation;
}
public static async createWithAnthropic(smartaiRefArg: SmartAi) {
if (!smartaiRefArg.anthropicProvider) {
throw new Error('Anthropic provider not available');
}
const conversation = new Conversation(smartaiRefArg, {
processFunction: async (input) => {
return '' // TODO implement proper streaming
}
});
return conversation;
}
public static async createWithPerplexity(smartaiRefArg: SmartAi) {
if (!smartaiRefArg.perplexityProvider) {
throw new Error('Perplexity provider not available');
}
const conversation = new Conversation(smartaiRefArg, {
processFunction: async (input) => {
return '' // TODO implement proper streaming
}
});
return conversation;
}
public static async createWithExo(smartaiRefArg: SmartAi) {
if (!smartaiRefArg.exoProvider) {
throw new Error('Exo provider not available');
}
const conversation = new Conversation(smartaiRefArg, {
processFunction: async (input) => {
return '' // TODO implement proper streaming
}
});
return conversation;
}
public static async createWithOllama(smartaiRefArg: SmartAi) {
if (!smartaiRefArg.ollamaProvider) {
throw new Error('Ollama provider not available');
}
const conversation = new Conversation(smartaiRefArg, {
processFunction: async (input) => {
return '' // TODO implement proper streaming
}
});
return conversation;
}
public static async createWithGroq(smartaiRefArg: SmartAi) {
if (!smartaiRefArg.groqProvider) {
throw new Error('Groq provider not available');
}
const conversation = new Conversation(smartaiRefArg, {
processFunction: async (input) => {
return '' // TODO implement proper streaming
}
});
return conversation;
}
public static async createWithXai(smartaiRefArg: SmartAi) {
if (!smartaiRefArg.xaiProvider) {
throw new Error('XAI provider not available');
}
const conversation = new Conversation(smartaiRefArg, {
processFunction: async (input) => {
return '' // TODO implement proper streaming
}
});
return conversation;
}
// INSTANCE
smartaiRef: SmartAi
private systemMessage: string;
private processFunction: TProcessFunction;
private inputStreamWriter: WritableStreamDefaultWriter<string> | null = null;
private outputStreamController: ReadableStreamDefaultController<string> | null = null;
constructor(smartairefArg: SmartAi, options: IConversationOptions) {
this.processFunction = options.processFunction;
}
public async setSystemMessage(systemMessageArg: string) {
this.systemMessage = systemMessageArg;
}
private setupOutputStream(): ReadableStream<string> {
return new ReadableStream<string>({
start: (controller) => {
this.outputStreamController = controller;
}
});
}
private setupInputStream(): WritableStream<string> {
const writableStream = new WritableStream<string>({
write: async (chunk) => {
const processedData = await this.processFunction(chunk);
if (this.outputStreamController) {
this.outputStreamController.enqueue(processedData);
}
},
close: () => {
this.outputStreamController?.close();
},
abort: (err) => {
console.error('Stream aborted', err);
this.outputStreamController?.error(err);
}
});
return writableStream;
}
public getInputStreamWriter(): WritableStreamDefaultWriter<string> {
if (!this.inputStreamWriter) {
const inputStream = this.setupInputStream();
this.inputStreamWriter = inputStream.getWriter();
}
return this.inputStreamWriter;
}
public getOutputStream(): ReadableStream<string> {
return this.setupOutputStream();
}
}

View File

@@ -1,119 +0,0 @@
import { Conversation } from './classes.conversation.js';
import * as plugins from './plugins.js';
import { AnthropicProvider } from './provider.anthropic.js';
import { OllamaProvider } from './provider.ollama.js';
import { OpenAiProvider } from './provider.openai.js';
import { PerplexityProvider } from './provider.perplexity.js';
import { ExoProvider } from './provider.exo.js';
import { GroqProvider } from './provider.groq.js';
import { XAIProvider } from './provider.xai.js';
export interface ISmartAiOptions {
openaiToken?: string;
anthropicToken?: string;
perplexityToken?: string;
groqToken?: string;
xaiToken?: string;
exo?: {
baseUrl?: string;
apiKey?: string;
};
ollama?: {
baseUrl?: string;
model?: string;
visionModel?: string;
};
}
export type TProvider = 'openai' | 'anthropic' | 'perplexity' | 'ollama' | 'exo' | 'groq' | 'xai';
export class SmartAi {
public options: ISmartAiOptions;
public openaiProvider: OpenAiProvider;
public anthropicProvider: AnthropicProvider;
public perplexityProvider: PerplexityProvider;
public ollamaProvider: OllamaProvider;
public exoProvider: ExoProvider;
public groqProvider: GroqProvider;
public xaiProvider: XAIProvider;
constructor(optionsArg: ISmartAiOptions) {
this.options = optionsArg;
}
public async start() {
if (this.options.openaiToken) {
this.openaiProvider = new OpenAiProvider({
openaiToken: this.options.openaiToken,
});
await this.openaiProvider.start();
}
if (this.options.anthropicToken) {
this.anthropicProvider = new AnthropicProvider({
anthropicToken: this.options.anthropicToken,
});
await this.anthropicProvider.start();
}
if (this.options.perplexityToken) {
this.perplexityProvider = new PerplexityProvider({
perplexityToken: this.options.perplexityToken,
});
await this.perplexityProvider.start();
}
if (this.options.groqToken) {
this.groqProvider = new GroqProvider({
groqToken: this.options.groqToken,
});
await this.groqProvider.start();
}
if (this.options.xaiToken) {
this.xaiProvider = new XAIProvider({
xaiToken: this.options.xaiToken,
});
await this.xaiProvider.start();
}
if (this.options.ollama) {
this.ollamaProvider = new OllamaProvider({
baseUrl: this.options.ollama.baseUrl,
model: this.options.ollama.model,
visionModel: this.options.ollama.visionModel,
});
await this.ollamaProvider.start();
}
if (this.options.exo) {
this.exoProvider = new ExoProvider({
exoBaseUrl: this.options.exo.baseUrl,
apiKey: this.options.exo.apiKey,
});
await this.exoProvider.start();
}
}
public async stop() {}
/**
* create a new conversation
*/
createConversation(provider: TProvider) {
switch (provider) {
case 'exo':
return Conversation.createWithExo(this);
case 'openai':
return Conversation.createWithOpenAi(this);
case 'anthropic':
return Conversation.createWithAnthropic(this);
case 'perplexity':
return Conversation.createWithPerplexity(this);
case 'ollama':
return Conversation.createWithOllama(this);
case 'groq':
return Conversation.createWithGroq(this);
case 'xai':
return Conversation.createWithXai(this);
default:
throw new Error('Provider not available');
}
}
}

View File

@@ -1,15 +0,0 @@
import type { SmartAi } from './classes.smartai.js';
import * as plugins from './plugins.js';
export class TTS {
public static async createWithOpenAi(smartaiRef: SmartAi): Promise<TTS> {
return new TTS(smartaiRef);
}
// INSTANCE
smartaiRef: SmartAi;
constructor(smartairefArg: SmartAi) {
this.smartaiRef = smartairefArg;
}
}

View File

@@ -1,3 +1,8 @@
export * from './classes.smartai.js';
export * from './abstract.classes.multimodal.js';
export * from './provider.openai.js';
export { getModel } from './smartai.classes.smartai.js';
export type { ISmartAiOptions, TProvider, IOllamaModelOptions, LanguageModelV3 } from './smartai.interfaces.js';
export { createAnthropicCachingMiddleware } from './smartai.middleware.anthropic.js';
export { createOllamaModel } from './smartai.provider.ollama.js';
// Re-export commonly used ai-sdk functions for consumer convenience
export { generateText, streamText, tool, jsonSchema } from 'ai';
export type { ModelMessage, ToolSet, StreamTextResult } from 'ai';

View File

View File

@@ -1,4 +0,0 @@
import * as plugins from './plugins.js';
export const packageDir = plugins.path.join(plugins.smartpath.get.dirnameFromImportMetaUrl(import.meta.url), '../');
export const nogitDir = plugins.path.join(packageDir, './.nogit');

View File

@@ -1,36 +1,22 @@
// node native
import * as path from 'path';
// ai sdk core
import { generateText, streamText, wrapLanguageModel, tool, jsonSchema } from 'ai';
export { generateText, streamText, wrapLanguageModel, tool, jsonSchema };
// ai sdk providers
import { createAnthropic } from '@ai-sdk/anthropic';
import { createOpenAI } from '@ai-sdk/openai';
import { createGoogleGenerativeAI } from '@ai-sdk/google';
import { createGroq } from '@ai-sdk/groq';
import { createMistral } from '@ai-sdk/mistral';
import { createXai } from '@ai-sdk/xai';
import { createPerplexity } from '@ai-sdk/perplexity';
export {
path,
}
// @push.rocks scope
import * as qenv from '@push.rocks/qenv';
import * as smartarray from '@push.rocks/smartarray';
import * as smartfile from '@push.rocks/smartfile';
import * as smartpath from '@push.rocks/smartpath';
import * as smartpdf from '@push.rocks/smartpdf';
import * as smartpromise from '@push.rocks/smartpromise';
import * as smartrequest from '@push.rocks/smartrequest';
import * as webstream from '@push.rocks/webstream';
export {
smartarray,
qenv,
smartfile,
smartpath,
smartpdf,
smartpromise,
smartrequest,
webstream,
}
// third party
import * as anthropic from '@anthropic-ai/sdk';
import * as openai from 'openai';
export {
anthropic,
openai,
}
createAnthropic,
createOpenAI,
createGoogleGenerativeAI,
createGroq,
createMistral,
createXai,
createPerplexity,
};

View File

@@ -1,240 +0,0 @@
import * as plugins from './plugins.js';
import * as paths from './paths.js';
import { MultiModalModel } from './abstract.classes.multimodal.js';
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js';
import type { ImageBlockParam, TextBlockParam } from '@anthropic-ai/sdk/resources/messages';
type ContentBlock = ImageBlockParam | TextBlockParam;
export interface IAnthropicProviderOptions {
anthropicToken: string;
}
export class AnthropicProvider extends MultiModalModel {
private options: IAnthropicProviderOptions;
public anthropicApiClient: plugins.anthropic.default;
constructor(optionsArg: IAnthropicProviderOptions) {
super();
this.options = optionsArg // Ensure the token is stored
}
async start() {
this.anthropicApiClient = new plugins.anthropic.default({
apiKey: this.options.anthropicToken,
});
}
async stop() {}
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
// Create a TextDecoder to handle incoming chunks
const decoder = new TextDecoder();
let buffer = '';
let currentMessage: { role: string; content: string; } | null = null;
// Create a TransformStream to process the input
const transform = new TransformStream<Uint8Array, string>({
async transform(chunk, controller) {
buffer += decoder.decode(chunk, { stream: true });
// Try to parse complete JSON messages from the buffer
while (true) {
const newlineIndex = buffer.indexOf('\n');
if (newlineIndex === -1) break;
const line = buffer.slice(0, newlineIndex);
buffer = buffer.slice(newlineIndex + 1);
if (line.trim()) {
try {
const message = JSON.parse(line);
currentMessage = {
role: message.role || 'user',
content: message.content || '',
};
} catch (e) {
console.error('Failed to parse message:', e);
}
}
}
// If we have a complete message, send it to Anthropic
if (currentMessage) {
const stream = await this.anthropicApiClient.messages.create({
model: 'claude-3-opus-20240229',
messages: [{ role: currentMessage.role, content: currentMessage.content }],
system: '',
stream: true,
max_tokens: 4000,
});
// Process each chunk from Anthropic
for await (const chunk of stream) {
const content = chunk.delta?.text;
if (content) {
controller.enqueue(content);
}
}
currentMessage = null;
}
},
flush(controller) {
if (buffer) {
try {
const message = JSON.parse(buffer);
controller.enqueue(message.content || '');
} catch (e) {
console.error('Failed to parse remaining buffer:', e);
}
}
}
});
// Connect the input to our transform stream
return input.pipeThrough(transform);
}
// Implementing the synchronous chat interaction
public async chat(optionsArg: ChatOptions): Promise<ChatResponse> {
// Convert message history to Anthropic format
const messages = optionsArg.messageHistory.map(msg => ({
role: msg.role === 'assistant' ? 'assistant' as const : 'user' as const,
content: msg.content
}));
const result = await this.anthropicApiClient.messages.create({
model: 'claude-3-opus-20240229',
system: optionsArg.systemMessage,
messages: [
...messages,
{ role: 'user' as const, content: optionsArg.userMessage }
],
max_tokens: 4000,
});
// Extract text content from the response
let message = '';
for (const block of result.content) {
if ('text' in block) {
message += block.text;
}
}
return {
role: 'assistant' as const,
message,
};
}
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
// Anthropic does not provide an audio API, so this method is not implemented.
throw new Error('Audio generation is not yet supported by Anthropic.');
}
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
const base64Image = optionsArg.image.toString('base64');
const content: ContentBlock[] = [
{
type: 'text',
text: optionsArg.prompt
},
{
type: 'image',
source: {
type: 'base64',
media_type: 'image/jpeg',
data: base64Image
}
}
];
const result = await this.anthropicApiClient.messages.create({
model: 'claude-3-opus-20240229',
messages: [{
role: 'user',
content
}],
max_tokens: 1024
});
// Extract text content from the response
let message = '';
for (const block of result.content) {
if ('text' in block) {
message += block.text;
}
}
return message;
}
public async document(optionsArg: {
systemMessage: string;
userMessage: string;
pdfDocuments: Uint8Array[];
messageHistory: ChatMessage[];
}): Promise<{ message: any }> {
// Convert PDF documents to images using SmartPDF
const smartpdfInstance = new plugins.smartpdf.SmartPdf();
let documentImageBytesArray: Uint8Array[] = [];
for (const pdfDocument of optionsArg.pdfDocuments) {
const documentImageArray = await smartpdfInstance.convertPDFToPngBytes(pdfDocument);
documentImageBytesArray = documentImageBytesArray.concat(documentImageArray);
}
// Convert message history to Anthropic format
const messages = optionsArg.messageHistory.map(msg => ({
role: msg.role === 'assistant' ? 'assistant' as const : 'user' as const,
content: msg.content
}));
// Create content array with text and images
const content: ContentBlock[] = [
{
type: 'text',
text: optionsArg.userMessage
}
];
// Add each document page as an image
for (const imageBytes of documentImageBytesArray) {
content.push({
type: 'image',
source: {
type: 'base64',
media_type: 'image/jpeg',
data: Buffer.from(imageBytes).toString('base64')
}
});
}
const result = await this.anthropicApiClient.messages.create({
model: 'claude-3-opus-20240229',
system: optionsArg.systemMessage,
messages: [
...messages,
{ role: 'user', content }
],
max_tokens: 4096
});
// Extract text content from the response
let message = '';
for (const block of result.content) {
if ('text' in block) {
message += block.text;
}
}
return {
message: {
role: 'assistant',
content: message
}
};
}
}

View File

@@ -1,128 +0,0 @@
import * as plugins from './plugins.js';
import * as paths from './paths.js';
import { MultiModalModel } from './abstract.classes.multimodal.js';
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js';
import type { ChatCompletionMessageParam } from 'openai/resources/chat/completions';
export interface IExoProviderOptions {
exoBaseUrl?: string;
apiKey?: string;
}
export class ExoProvider extends MultiModalModel {
private options: IExoProviderOptions;
public openAiApiClient: plugins.openai.default;
constructor(optionsArg: IExoProviderOptions = {}) {
super();
this.options = {
exoBaseUrl: 'http://localhost:8080/v1', // Default Exo API endpoint
...optionsArg
};
}
public async start() {
this.openAiApiClient = new plugins.openai.default({
apiKey: this.options.apiKey || 'not-needed', // Exo might not require an API key for local deployment
baseURL: this.options.exoBaseUrl,
});
}
public async stop() {}
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
// Create a TextDecoder to handle incoming chunks
const decoder = new TextDecoder();
let buffer = '';
let currentMessage: { role: string; content: string; } | null = null;
// Create a TransformStream to process the input
const transform = new TransformStream<Uint8Array, string>({
async transform(chunk, controller) {
buffer += decoder.decode(chunk, { stream: true });
// Try to parse complete JSON messages from the buffer
while (true) {
const newlineIndex = buffer.indexOf('\n');
if (newlineIndex === -1) break;
const line = buffer.slice(0, newlineIndex);
buffer = buffer.slice(newlineIndex + 1);
if (line.trim()) {
try {
const message = JSON.parse(line);
currentMessage = message;
// Process the message based on its type
if (message.type === 'message') {
const response = await this.chat({
systemMessage: '',
userMessage: message.content,
messageHistory: [{ role: message.role as 'user' | 'assistant' | 'system', content: message.content }]
});
controller.enqueue(JSON.stringify(response) + '\n');
}
} catch (error) {
console.error('Error processing message:', error);
}
}
}
},
flush(controller) {
if (buffer) {
try {
const message = JSON.parse(buffer);
currentMessage = message;
} catch (error) {
console.error('Error processing remaining buffer:', error);
}
}
}
});
return input.pipeThrough(transform);
}
public async chat(options: ChatOptions): Promise<ChatResponse> {
const messages: ChatCompletionMessageParam[] = [
{ role: 'system', content: options.systemMessage },
...options.messageHistory,
{ role: 'user', content: options.userMessage }
];
try {
const response = await this.openAiApiClient.chat.completions.create({
model: 'local-model', // Exo uses local models
messages: messages,
stream: false
});
return {
role: 'assistant',
message: response.choices[0]?.message?.content || ''
};
} catch (error) {
console.error('Error in chat completion:', error);
throw error;
}
}
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
throw new Error('Audio generation is not supported by Exo provider');
}
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
throw new Error('Vision processing is not supported by Exo provider');
}
public async document(optionsArg: {
systemMessage: string;
userMessage: string;
pdfDocuments: Uint8Array[];
messageHistory: ChatMessage[];
}): Promise<{ message: any }> {
throw new Error('Document processing is not supported by Exo provider');
}
}

View File

@@ -1,192 +0,0 @@
import * as plugins from './plugins.js';
import * as paths from './paths.js';
import { MultiModalModel } from './abstract.classes.multimodal.js';
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js';
export interface IGroqProviderOptions {
groqToken: string;
model?: string;
}
export class GroqProvider extends MultiModalModel {
private options: IGroqProviderOptions;
private baseUrl = 'https://api.groq.com/v1';
constructor(optionsArg: IGroqProviderOptions) {
super();
this.options = {
...optionsArg,
model: optionsArg.model || 'llama-3.3-70b-versatile', // Default model
};
}
async start() {}
async stop() {}
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
// Create a TextDecoder to handle incoming chunks
const decoder = new TextDecoder();
let buffer = '';
let currentMessage: { role: string; content: string; } | null = null;
// Create a TransformStream to process the input
const transform = new TransformStream<Uint8Array, string>({
async transform(chunk, controller) {
buffer += decoder.decode(chunk, { stream: true });
// Try to parse complete JSON messages from the buffer
while (true) {
const newlineIndex = buffer.indexOf('\n');
if (newlineIndex === -1) break;
const line = buffer.slice(0, newlineIndex);
buffer = buffer.slice(newlineIndex + 1);
if (line.trim()) {
try {
const message = JSON.parse(line);
currentMessage = {
role: message.role || 'user',
content: message.content || '',
};
} catch (e) {
console.error('Failed to parse message:', e);
}
}
}
// If we have a complete message, send it to Groq
if (currentMessage) {
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: 'POST',
headers: {
'Authorization': `Bearer ${this.options.groqToken}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: this.options.model,
messages: [{ role: currentMessage.role, content: currentMessage.content }],
stream: true,
}),
});
// Process each chunk from Groq
const reader = response.body?.getReader();
if (reader) {
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = new TextDecoder().decode(value);
const lines = chunk.split('\n');
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6);
if (data === '[DONE]') break;
try {
const parsed = JSON.parse(data);
const content = parsed.choices[0]?.delta?.content;
if (content) {
controller.enqueue(content);
}
} catch (e) {
console.error('Failed to parse SSE data:', e);
}
}
}
}
} finally {
reader.releaseLock();
}
}
currentMessage = null;
}
},
flush(controller) {
if (buffer) {
try {
const message = JSON.parse(buffer);
controller.enqueue(message.content || '');
} catch (e) {
console.error('Failed to parse remaining buffer:', e);
}
}
}
});
// Connect the input to our transform stream
return input.pipeThrough(transform);
}
// Implementing the synchronous chat interaction
public async chat(optionsArg: ChatOptions): Promise<ChatResponse> {
const messages = [
// System message
{
role: 'system',
content: optionsArg.systemMessage,
},
// Message history
...optionsArg.messageHistory.map(msg => ({
role: msg.role,
content: msg.content,
})),
// User message
{
role: 'user',
content: optionsArg.userMessage,
},
];
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: 'POST',
headers: {
'Authorization': `Bearer ${this.options.groqToken}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: this.options.model,
messages,
temperature: 0.7,
max_completion_tokens: 1024,
stream: false,
}),
});
if (!response.ok) {
const error = await response.json();
throw new Error(`Groq API error: ${error.message || response.statusText}`);
}
const result = await response.json();
return {
role: 'assistant',
message: result.choices[0].message.content,
};
}
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
// Groq does not provide an audio API, so this method is not implemented.
throw new Error('Audio generation is not yet supported by Groq.');
}
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
throw new Error('Vision tasks are not yet supported by Groq.');
}
public async document(optionsArg: {
systemMessage: string;
userMessage: string;
pdfDocuments: Uint8Array[];
messageHistory: ChatMessage[];
}): Promise<{ message: any }> {
throw new Error('Document processing is not yet supported by Groq.');
}
}

View File

@@ -1,252 +0,0 @@
import * as plugins from './plugins.js';
import * as paths from './paths.js';
import { MultiModalModel } from './abstract.classes.multimodal.js';
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js';
export interface IOllamaProviderOptions {
baseUrl?: string;
model?: string;
visionModel?: string; // Model to use for vision tasks (e.g. 'llava')
}
export class OllamaProvider extends MultiModalModel {
private options: IOllamaProviderOptions;
private baseUrl: string;
private model: string;
private visionModel: string;
constructor(optionsArg: IOllamaProviderOptions = {}) {
super();
this.options = optionsArg;
this.baseUrl = optionsArg.baseUrl || 'http://localhost:11434';
this.model = optionsArg.model || 'llama2';
this.visionModel = optionsArg.visionModel || 'llava';
}
async start() {
// Verify Ollama is running
try {
const response = await fetch(`${this.baseUrl}/api/tags`);
if (!response.ok) {
throw new Error('Failed to connect to Ollama server');
}
} catch (error) {
throw new Error(`Failed to connect to Ollama server at ${this.baseUrl}: ${error.message}`);
}
}
async stop() {}
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
// Create a TextDecoder to handle incoming chunks
const decoder = new TextDecoder();
let buffer = '';
let currentMessage: { role: string; content: string; } | null = null;
// Create a TransformStream to process the input
const transform = new TransformStream<Uint8Array, string>({
async transform(chunk, controller) {
buffer += decoder.decode(chunk, { stream: true });
// Try to parse complete JSON messages from the buffer
while (true) {
const newlineIndex = buffer.indexOf('\n');
if (newlineIndex === -1) break;
const line = buffer.slice(0, newlineIndex);
buffer = buffer.slice(newlineIndex + 1);
if (line.trim()) {
try {
const message = JSON.parse(line);
currentMessage = {
role: message.role || 'user',
content: message.content || '',
};
} catch (e) {
console.error('Failed to parse message:', e);
}
}
}
// If we have a complete message, send it to Ollama
if (currentMessage) {
const response = await fetch(`${this.baseUrl}/api/chat`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: this.model,
messages: [{ role: currentMessage.role, content: currentMessage.content }],
stream: true,
}),
});
// Process each chunk from Ollama
const reader = response.body?.getReader();
if (reader) {
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = new TextDecoder().decode(value);
const lines = chunk.split('\n');
for (const line of lines) {
if (line.trim()) {
try {
const parsed = JSON.parse(line);
const content = parsed.message?.content;
if (content) {
controller.enqueue(content);
}
} catch (e) {
console.error('Failed to parse Ollama response:', e);
}
}
}
}
} finally {
reader.releaseLock();
}
}
currentMessage = null;
}
},
flush(controller) {
if (buffer) {
try {
const message = JSON.parse(buffer);
controller.enqueue(message.content || '');
} catch (e) {
console.error('Failed to parse remaining buffer:', e);
}
}
}
});
// Connect the input to our transform stream
return input.pipeThrough(transform);
}
// Implementing the synchronous chat interaction
public async chat(optionsArg: ChatOptions): Promise<ChatResponse> {
// Format messages for Ollama
const messages = [
{ role: 'system', content: optionsArg.systemMessage },
...optionsArg.messageHistory,
{ role: 'user', content: optionsArg.userMessage }
];
// Make API call to Ollama
const response = await fetch(`${this.baseUrl}/api/chat`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: this.model,
messages: messages,
stream: false
}),
});
if (!response.ok) {
throw new Error(`Ollama API error: ${response.statusText}`);
}
const result = await response.json();
return {
role: 'assistant' as const,
message: result.message.content,
};
}
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
throw new Error('Audio generation is not supported by Ollama.');
}
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
const base64Image = optionsArg.image.toString('base64');
const response = await fetch(`${this.baseUrl}/api/chat`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: this.visionModel,
messages: [{
role: 'user',
content: optionsArg.prompt,
images: [base64Image]
}],
stream: false
}),
});
if (!response.ok) {
throw new Error(`Ollama API error: ${response.statusText}`);
}
const result = await response.json();
return result.message.content;
}
public async document(optionsArg: {
systemMessage: string;
userMessage: string;
pdfDocuments: Uint8Array[];
messageHistory: ChatMessage[];
}): Promise<{ message: any }> {
// Convert PDF documents to images using SmartPDF
const smartpdfInstance = new plugins.smartpdf.SmartPdf();
let documentImageBytesArray: Uint8Array[] = [];
for (const pdfDocument of optionsArg.pdfDocuments) {
const documentImageArray = await smartpdfInstance.convertPDFToPngBytes(pdfDocument);
documentImageBytesArray = documentImageBytesArray.concat(documentImageArray);
}
// Convert images to base64
const base64Images = documentImageBytesArray.map(bytes => Buffer.from(bytes).toString('base64'));
// Send request to Ollama with images
const response = await fetch(`${this.baseUrl}/api/chat`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: this.visionModel,
messages: [
{ role: 'system', content: optionsArg.systemMessage },
...optionsArg.messageHistory,
{
role: 'user',
content: optionsArg.userMessage,
images: base64Images
}
],
stream: false
}),
});
if (!response.ok) {
throw new Error(`Ollama API error: ${response.statusText}`);
}
const result = await response.json();
return {
message: {
role: 'assistant',
content: result.message.content
}
};
}
}

View File

@@ -1,218 +0,0 @@
import * as plugins from './plugins.js';
import * as paths from './paths.js';
import { MultiModalModel } from './abstract.classes.multimodal.js';
export interface IOpenaiProviderOptions {
openaiToken: string;
}
export class OpenAiProvider extends MultiModalModel {
private options: IOpenaiProviderOptions;
public openAiApiClient: plugins.openai.default;
public smartpdfInstance: plugins.smartpdf.SmartPdf;
constructor(optionsArg: IOpenaiProviderOptions) {
super();
this.options = optionsArg;
}
public async start() {
this.openAiApiClient = new plugins.openai.default({
apiKey: this.options.openaiToken,
dangerouslyAllowBrowser: true,
});
this.smartpdfInstance = new plugins.smartpdf.SmartPdf();
}
public async stop() {}
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
// Create a TextDecoder to handle incoming chunks
const decoder = new TextDecoder();
let buffer = '';
let currentMessage: { role: string; content: string; } | null = null;
// Create a TransformStream to process the input
const transform = new TransformStream<Uint8Array, string>({
async transform(chunk, controller) {
buffer += decoder.decode(chunk, { stream: true });
// Try to parse complete JSON messages from the buffer
while (true) {
const newlineIndex = buffer.indexOf('\n');
if (newlineIndex === -1) break;
const line = buffer.slice(0, newlineIndex);
buffer = buffer.slice(newlineIndex + 1);
if (line.trim()) {
try {
const message = JSON.parse(line);
currentMessage = {
role: message.role || 'user',
content: message.content || '',
};
} catch (e) {
console.error('Failed to parse message:', e);
}
}
}
// If we have a complete message, send it to OpenAI
if (currentMessage) {
const stream = await this.openAiApiClient.chat.completions.create({
model: 'gpt-4',
messages: [{ role: currentMessage.role, content: currentMessage.content }],
stream: true,
});
// Process each chunk from OpenAI
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content;
if (content) {
controller.enqueue(content);
}
}
currentMessage = null;
}
},
flush(controller) {
if (buffer) {
try {
const message = JSON.parse(buffer);
controller.enqueue(message.content || '');
} catch (e) {
console.error('Failed to parse remaining buffer:', e);
}
}
}
});
// Connect the input to our transform stream
return input.pipeThrough(transform);
}
// Implementing the synchronous chat interaction
public async chat(optionsArg: {
systemMessage: string;
userMessage: string;
messageHistory: {
role: 'assistant' | 'user';
content: string;
}[];
}) {
const result = await this.openAiApiClient.chat.completions.create({
model: 'gpt-4o',
messages: [
{ role: 'system', content: optionsArg.systemMessage },
...optionsArg.messageHistory,
{ role: 'user', content: optionsArg.userMessage },
],
});
return {
role: result.choices[0].message.role as 'assistant',
message: result.choices[0].message.content,
};
}
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
const done = plugins.smartpromise.defer<NodeJS.ReadableStream>();
const result = await this.openAiApiClient.audio.speech.create({
model: 'tts-1-hd',
input: optionsArg.message,
voice: 'nova',
response_format: 'mp3',
speed: 1,
});
const stream = result.body;
done.resolve(stream);
return done.promise;
}
public async document(optionsArg: {
systemMessage: string;
userMessage: string;
pdfDocuments: Uint8Array[];
messageHistory: {
role: 'assistant' | 'user';
content: any;
}[];
}) {
let pdfDocumentImageBytesArray: Uint8Array[] = [];
for (const pdfDocument of optionsArg.pdfDocuments) {
const documentImageArray = await this.smartpdfInstance.convertPDFToPngBytes(pdfDocument);
pdfDocumentImageBytesArray = pdfDocumentImageBytesArray.concat(documentImageArray);
}
console.log(`image smartfile array`);
console.log(pdfDocumentImageBytesArray.map((smartfile) => smartfile.length));
const smartfileArray = await plugins.smartarray.map(
pdfDocumentImageBytesArray,
async (pdfDocumentImageBytes) => {
return plugins.smartfile.SmartFile.fromBuffer(
'pdfDocumentImage.jpg',
Buffer.from(pdfDocumentImageBytes)
);
}
);
const result = await this.openAiApiClient.chat.completions.create({
model: 'gpt-4o',
// response_format: { type: "json_object" }, // not supported for now
messages: [
{ role: 'system', content: optionsArg.systemMessage },
...optionsArg.messageHistory,
{
role: 'user',
content: [
{ type: 'text', text: optionsArg.userMessage },
...(() => {
const returnArray = [];
for (const imageBytes of pdfDocumentImageBytesArray) {
returnArray.push({
type: 'image_url',
image_url: {
url: 'data:image/png;base64,' + Buffer.from(imageBytes).toString('base64'),
},
});
}
return returnArray;
})(),
],
},
],
});
return {
message: result.choices[0].message,
};
}
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
const result = await this.openAiApiClient.chat.completions.create({
model: 'gpt-4-vision-preview',
messages: [
{
role: 'user',
content: [
{ type: 'text', text: optionsArg.prompt },
{
type: 'image_url',
image_url: {
url: `data:image/jpeg;base64,${optionsArg.image.toString('base64')}`
}
}
]
}
],
max_tokens: 300
});
return result.choices[0].message.content || '';
}
}

View File

@@ -1,171 +0,0 @@
import * as plugins from './plugins.js';
import * as paths from './paths.js';
import { MultiModalModel } from './abstract.classes.multimodal.js';
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js';
export interface IPerplexityProviderOptions {
perplexityToken: string;
}
export class PerplexityProvider extends MultiModalModel {
private options: IPerplexityProviderOptions;
constructor(optionsArg: IPerplexityProviderOptions) {
super();
this.options = optionsArg;
}
async start() {
// Initialize any necessary clients or resources
}
async stop() {}
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
// Create a TextDecoder to handle incoming chunks
const decoder = new TextDecoder();
let buffer = '';
let currentMessage: { role: string; content: string; } | null = null;
// Create a TransformStream to process the input
const transform = new TransformStream<Uint8Array, string>({
async transform(chunk, controller) {
buffer += decoder.decode(chunk, { stream: true });
// Try to parse complete JSON messages from the buffer
while (true) {
const newlineIndex = buffer.indexOf('\n');
if (newlineIndex === -1) break;
const line = buffer.slice(0, newlineIndex);
buffer = buffer.slice(newlineIndex + 1);
if (line.trim()) {
try {
const message = JSON.parse(line);
currentMessage = {
role: message.role || 'user',
content: message.content || '',
};
} catch (e) {
console.error('Failed to parse message:', e);
}
}
}
// If we have a complete message, send it to Perplexity
if (currentMessage) {
const response = await fetch('https://api.perplexity.ai/chat/completions', {
method: 'POST',
headers: {
'Authorization': `Bearer ${this.options.perplexityToken}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: 'mixtral-8x7b-instruct',
messages: [{ role: currentMessage.role, content: currentMessage.content }],
stream: true,
}),
});
// Process each chunk from Perplexity
const reader = response.body?.getReader();
if (reader) {
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = new TextDecoder().decode(value);
const lines = chunk.split('\n');
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6);
if (data === '[DONE]') break;
try {
const parsed = JSON.parse(data);
const content = parsed.choices[0]?.delta?.content;
if (content) {
controller.enqueue(content);
}
} catch (e) {
console.error('Failed to parse SSE data:', e);
}
}
}
}
} finally {
reader.releaseLock();
}
}
currentMessage = null;
}
},
flush(controller) {
if (buffer) {
try {
const message = JSON.parse(buffer);
controller.enqueue(message.content || '');
} catch (e) {
console.error('Failed to parse remaining buffer:', e);
}
}
}
});
// Connect the input to our transform stream
return input.pipeThrough(transform);
}
// Implementing the synchronous chat interaction
public async chat(optionsArg: ChatOptions): Promise<ChatResponse> {
// Make API call to Perplexity
const response = await fetch('https://api.perplexity.ai/chat/completions', {
method: 'POST',
headers: {
'Authorization': `Bearer ${this.options.perplexityToken}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: 'mixtral-8x7b-instruct', // Using Mixtral model
messages: [
{ role: 'system', content: optionsArg.systemMessage },
...optionsArg.messageHistory,
{ role: 'user', content: optionsArg.userMessage }
],
}),
});
if (!response.ok) {
throw new Error(`Perplexity API error: ${response.statusText}`);
}
const result = await response.json();
return {
role: 'assistant' as const,
message: result.choices[0].message.content,
};
}
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
throw new Error('Audio generation is not supported by Perplexity.');
}
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
throw new Error('Vision tasks are not supported by Perplexity.');
}
public async document(optionsArg: {
systemMessage: string;
userMessage: string;
pdfDocuments: Uint8Array[];
messageHistory: ChatMessage[];
}): Promise<{ message: any }> {
throw new Error('Document processing is not supported by Perplexity.');
}
}

View File

@@ -1,183 +0,0 @@
import * as plugins from './plugins.js';
import * as paths from './paths.js';
import { MultiModalModel } from './abstract.classes.multimodal.js';
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js';
import type { ChatCompletionMessageParam } from 'openai/resources/chat/completions';
export interface IXAIProviderOptions {
xaiToken: string;
}
export class XAIProvider extends MultiModalModel {
private options: IXAIProviderOptions;
public openAiApiClient: plugins.openai.default;
public smartpdfInstance: plugins.smartpdf.SmartPdf;
constructor(optionsArg: IXAIProviderOptions) {
super();
this.options = optionsArg;
}
public async start() {
this.openAiApiClient = new plugins.openai.default({
apiKey: this.options.xaiToken,
baseURL: 'https://api.x.ai/v1',
});
this.smartpdfInstance = new plugins.smartpdf.SmartPdf();
}
public async stop() {}
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
// Create a TextDecoder to handle incoming chunks
const decoder = new TextDecoder();
let buffer = '';
let currentMessage: { role: string; content: string; } | null = null;
// Create a TransformStream to process the input
const transform = new TransformStream<Uint8Array, string>({
async transform(chunk, controller) {
buffer += decoder.decode(chunk, { stream: true });
// Try to parse complete JSON messages from the buffer
while (true) {
const newlineIndex = buffer.indexOf('\n');
if (newlineIndex === -1) break;
const line = buffer.slice(0, newlineIndex);
buffer = buffer.slice(newlineIndex + 1);
if (line.trim()) {
try {
const message = JSON.parse(line);
currentMessage = {
role: message.role || 'user',
content: message.content || '',
};
} catch (e) {
console.error('Failed to parse message:', e);
}
}
}
// If we have a complete message, send it to X.AI
if (currentMessage) {
const stream = await this.openAiApiClient.chat.completions.create({
model: 'grok-2-latest',
messages: [{ role: currentMessage.role, content: currentMessage.content }],
stream: true,
});
// Process each chunk from X.AI
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content;
if (content) {
controller.enqueue(content);
}
}
currentMessage = null;
}
},
flush(controller) {
if (buffer) {
try {
const message = JSON.parse(buffer);
controller.enqueue(message.content || '');
} catch (e) {
console.error('Failed to parse remaining buffer:', e);
}
}
}
});
// Connect the input to our transform stream
return input.pipeThrough(transform);
}
public async chat(optionsArg: {
systemMessage: string;
userMessage: string;
messageHistory: { role: string; content: string; }[];
}): Promise<{ role: 'assistant'; message: string; }> {
// Prepare messages array with system message, history, and user message
const messages: ChatCompletionMessageParam[] = [
{ role: 'system', content: optionsArg.systemMessage },
...optionsArg.messageHistory.map(msg => ({
role: msg.role as 'system' | 'user' | 'assistant',
content: msg.content
})),
{ role: 'user', content: optionsArg.userMessage }
];
// Call X.AI's chat completion API
const completion = await this.openAiApiClient.chat.completions.create({
model: 'grok-2-latest',
messages: messages,
stream: false,
});
// Return the assistant's response
return {
role: 'assistant',
message: completion.choices[0]?.message?.content || ''
};
}
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
throw new Error('Audio generation is not supported by X.AI');
}
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
throw new Error('Vision tasks are not supported by X.AI');
}
public async document(optionsArg: {
systemMessage: string;
userMessage: string;
pdfDocuments: Uint8Array[];
messageHistory: { role: string; content: string; }[];
}): Promise<{ message: any }> {
// First convert PDF documents to images
let pdfDocumentImageBytesArray: Uint8Array[] = [];
for (const pdfDocument of optionsArg.pdfDocuments) {
const documentImageArray = await this.smartpdfInstance.convertPDFToPngBytes(pdfDocument);
pdfDocumentImageBytesArray = pdfDocumentImageBytesArray.concat(documentImageArray);
}
// Convert images to base64 for inclusion in the message
const imageBase64Array = pdfDocumentImageBytesArray.map(bytes =>
Buffer.from(bytes).toString('base64')
);
// Combine document images into the user message
const enhancedUserMessage = `
${optionsArg.userMessage}
Document contents (as images):
${imageBase64Array.map((img, i) => `Image ${i + 1}: <image data>`).join('\n')}
`;
// Use chat completion to analyze the documents
const messages: ChatCompletionMessageParam[] = [
{ role: 'system', content: optionsArg.systemMessage },
...optionsArg.messageHistory.map(msg => ({
role: msg.role as 'system' | 'user' | 'assistant',
content: msg.content
})),
{ role: 'user', content: enhancedUserMessage }
];
const completion = await this.openAiApiClient.chat.completions.create({
model: 'grok-2-latest',
messages: messages,
stream: false,
});
return {
message: completion.choices[0]?.message?.content || ''
};
}
}

View File

@@ -0,0 +1,51 @@
import * as plugins from './plugins.js';
import type { ISmartAiOptions, LanguageModelV3 } from './smartai.interfaces.js';
import { createOllamaModel } from './smartai.provider.ollama.js';
import { createAnthropicCachingMiddleware } from './smartai.middleware.anthropic.js';
/**
* Returns a LanguageModelV3 for the given provider and model.
* This is the primary API — consumers use the returned model with AI SDK's
* generateText(), streamText(), etc.
*/
export function getModel(options: ISmartAiOptions): LanguageModelV3 {
switch (options.provider) {
case 'anthropic': {
const p = plugins.createAnthropic({ apiKey: options.apiKey });
const base = p(options.model) as LanguageModelV3;
if (options.promptCaching === false) return base;
return plugins.wrapLanguageModel({
model: base,
middleware: createAnthropicCachingMiddleware(),
}) as unknown as LanguageModelV3;
}
case 'openai': {
const p = plugins.createOpenAI({ apiKey: options.apiKey });
return p(options.model) as LanguageModelV3;
}
case 'google': {
const p = plugins.createGoogleGenerativeAI({ apiKey: options.apiKey });
return p(options.model) as LanguageModelV3;
}
case 'groq': {
const p = plugins.createGroq({ apiKey: options.apiKey });
return p(options.model) as LanguageModelV3;
}
case 'mistral': {
const p = plugins.createMistral({ apiKey: options.apiKey });
return p(options.model) as LanguageModelV3;
}
case 'xai': {
const p = plugins.createXai({ apiKey: options.apiKey });
return p(options.model) as LanguageModelV3;
}
case 'perplexity': {
const p = plugins.createPerplexity({ apiKey: options.apiKey });
return p(options.model) as LanguageModelV3;
}
case 'ollama':
return createOllamaModel(options);
default:
throw new Error(`Unknown provider: ${(options as ISmartAiOptions).provider}`);
}
}

53
ts/smartai.interfaces.ts Normal file
View File

@@ -0,0 +1,53 @@
import type { LanguageModelV3 } from '@ai-sdk/provider';
export type TProvider =
| 'anthropic'
| 'openai'
| 'google'
| 'groq'
| 'mistral'
| 'xai'
| 'perplexity'
| 'ollama';
export interface ISmartAiOptions {
provider: TProvider;
model: string;
apiKey?: string;
/** For Ollama: base URL of the local server. Default: http://localhost:11434 */
baseUrl?: string;
/**
* Ollama-specific model runtime options.
* Only used when provider === 'ollama'.
*/
ollamaOptions?: IOllamaModelOptions;
/**
* Enable Anthropic prompt caching on system + recent messages.
* Only used when provider === 'anthropic'. Default: true.
*/
promptCaching?: boolean;
}
/**
* Ollama model runtime options passed in the request body `options` field.
* @see https://github.com/ollama/ollama/blob/main/docs/modelfile.md
*/
export interface IOllamaModelOptions {
/** Context window size. Default: 2048. */
num_ctx?: number;
/** 0 = deterministic. Default: 0.8. For Qwen models use 0.55. */
temperature?: number;
top_k?: number;
top_p?: number;
repeat_penalty?: number;
num_predict?: number;
stop?: string[];
seed?: number;
/**
* Enable thinking/reasoning mode (Qwen3, QwQ, DeepSeek-R1 etc.).
* The custom Ollama provider handles this directly.
*/
think?: boolean;
}
export type { LanguageModelV3 };

View File

@@ -0,0 +1,38 @@
import type { LanguageModelV3Middleware, LanguageModelV3Prompt } from '@ai-sdk/provider';
/**
* Creates middleware that adds Anthropic prompt caching directives.
* Marks the last system message and last user message with ephemeral cache control,
* reducing input token cost and latency on repeated calls.
*/
export function createAnthropicCachingMiddleware(): LanguageModelV3Middleware {
return {
specificationVersion: 'v3',
transformParams: async ({ params }) => {
const messages = [...params.prompt] as Array<Record<string, unknown>>;
// Find the last system message and last user message
let lastSystemIdx = -1;
let lastUserIdx = -1;
for (let i = 0; i < messages.length; i++) {
if (messages[i].role === 'system') lastSystemIdx = i;
if (messages[i].role === 'user') lastUserIdx = i;
}
const targets = [lastSystemIdx, lastUserIdx].filter(i => i >= 0);
for (const idx of targets) {
const msg = { ...messages[idx] };
msg.providerOptions = {
...(msg.providerOptions as Record<string, unknown> || {}),
anthropic: {
...((msg.providerOptions as Record<string, unknown>)?.anthropic as Record<string, unknown> || {}),
cacheControl: { type: 'ephemeral' },
},
};
messages[idx] = msg;
}
return { ...params, prompt: messages as unknown as LanguageModelV3Prompt };
},
};
}

View File

@@ -0,0 +1,426 @@
import type {
LanguageModelV3,
LanguageModelV3CallOptions,
LanguageModelV3GenerateResult,
LanguageModelV3StreamResult,
LanguageModelV3StreamPart,
LanguageModelV3Prompt,
LanguageModelV3Content,
LanguageModelV3Usage,
LanguageModelV3FinishReason,
} from '@ai-sdk/provider';
import type { ISmartAiOptions, IOllamaModelOptions } from './smartai.interfaces.js';
interface IOllamaMessage {
role: string;
content: string;
images?: string[];
tool_calls?: Array<{
function: { name: string; arguments: Record<string, unknown> };
}>;
thinking?: string;
}
interface IOllamaTool {
type: 'function';
function: {
name: string;
description: string;
parameters: Record<string, unknown>;
};
}
/**
* Convert AI SDK V3 prompt messages to Ollama's message format.
*/
function convertPromptToOllamaMessages(prompt: LanguageModelV3Prompt): IOllamaMessage[] {
const messages: IOllamaMessage[] = [];
for (const msg of prompt) {
if (msg.role === 'system') {
// System message content is a plain string in V3
messages.push({ role: 'system', content: msg.content });
} else if (msg.role === 'user') {
let text = '';
const images: string[] = [];
for (const part of msg.content) {
if (part.type === 'text') {
text += part.text;
} else if (part.type === 'file' && part.mediaType?.startsWith('image/')) {
// Handle image files — Ollama expects base64 images
if (typeof part.data === 'string') {
images.push(part.data);
} else if (part.data instanceof Uint8Array) {
images.push(Buffer.from(part.data).toString('base64'));
}
}
}
const m: IOllamaMessage = { role: 'user', content: text };
if (images.length > 0) m.images = images;
messages.push(m);
} else if (msg.role === 'assistant') {
let text = '';
let thinking = '';
const toolCalls: IOllamaMessage['tool_calls'] = [];
for (const part of msg.content) {
if (part.type === 'text') {
text += part.text;
} else if (part.type === 'reasoning') {
thinking += part.text;
} else if (part.type === 'tool-call') {
const args = typeof part.input === 'string'
? JSON.parse(part.input as string)
: (part.input as Record<string, unknown>);
toolCalls.push({
function: {
name: part.toolName,
arguments: args,
},
});
}
}
const m: IOllamaMessage = { role: 'assistant', content: text };
if (toolCalls.length > 0) m.tool_calls = toolCalls;
if (thinking) m.thinking = thinking;
messages.push(m);
} else if (msg.role === 'tool') {
for (const part of msg.content) {
if (part.type === 'tool-result') {
let resultContent = '';
if (part.output) {
if (part.output.type === 'text') {
resultContent = part.output.value;
} else if (part.output.type === 'json') {
resultContent = JSON.stringify(part.output.value);
}
}
messages.push({ role: 'tool', content: resultContent });
}
}
}
}
return messages;
}
/**
* Convert AI SDK V3 tools to Ollama's tool format.
*/
function convertToolsToOllamaTools(tools: LanguageModelV3CallOptions['tools']): IOllamaTool[] | undefined {
if (!tools || tools.length === 0) return undefined;
return tools
.filter((t): t is Extract<typeof t, { type: 'function' }> => t.type === 'function')
.map(t => ({
type: 'function' as const,
function: {
name: t.name,
description: t.description ?? '',
parameters: t.inputSchema as Record<string, unknown>,
},
}));
}
function makeUsage(promptTokens?: number, completionTokens?: number): LanguageModelV3Usage {
return {
inputTokens: {
total: promptTokens,
noCache: undefined,
cacheRead: undefined,
cacheWrite: undefined,
},
outputTokens: {
total: completionTokens,
text: completionTokens,
reasoning: undefined,
},
};
}
function makeFinishReason(reason?: string): LanguageModelV3FinishReason {
if (reason === 'tool_calls' || reason === 'tool-calls') {
return { unified: 'tool-calls', raw: reason };
}
return { unified: 'stop', raw: reason ?? 'stop' };
}
let idCounter = 0;
function generateId(): string {
return `ollama-${Date.now()}-${idCounter++}`;
}
/**
* Custom LanguageModelV3 implementation for Ollama.
* Calls Ollama's native /api/chat endpoint directly to support
* think, num_ctx, temperature, and other model options.
*/
export function createOllamaModel(options: ISmartAiOptions): LanguageModelV3 {
const baseUrl = options.baseUrl ?? 'http://localhost:11434';
const modelId = options.model;
const ollamaOpts: IOllamaModelOptions = { ...options.ollamaOptions };
// Apply default temperature of 0.55 for Qwen models
if (modelId.toLowerCase().includes('qwen') && ollamaOpts.temperature === undefined) {
ollamaOpts.temperature = 0.55;
}
const model: LanguageModelV3 = {
specificationVersion: 'v3',
provider: 'ollama',
modelId,
supportedUrls: {},
async doGenerate(callOptions: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult> {
const messages = convertPromptToOllamaMessages(callOptions.prompt);
const tools = convertToolsToOllamaTools(callOptions.tools);
const ollamaModelOptions: Record<string, unknown> = { ...ollamaOpts };
// Override with call-level options if provided
if (callOptions.temperature !== undefined) ollamaModelOptions.temperature = callOptions.temperature;
if (callOptions.topP !== undefined) ollamaModelOptions.top_p = callOptions.topP;
if (callOptions.topK !== undefined) ollamaModelOptions.top_k = callOptions.topK;
if (callOptions.maxOutputTokens !== undefined) ollamaModelOptions.num_predict = callOptions.maxOutputTokens;
if (callOptions.seed !== undefined) ollamaModelOptions.seed = callOptions.seed;
if (callOptions.stopSequences) ollamaModelOptions.stop = callOptions.stopSequences;
// Remove think from options — it goes at the top level
const { think, ...modelOpts } = ollamaModelOptions;
const requestBody: Record<string, unknown> = {
model: modelId,
messages,
stream: false,
options: modelOpts,
};
// Add think parameter at the top level (Ollama API requirement)
if (ollamaOpts.think !== undefined) {
requestBody.think = ollamaOpts.think;
}
if (tools) requestBody.tools = tools;
const response = await fetch(`${baseUrl}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(requestBody),
signal: callOptions.abortSignal,
});
if (!response.ok) {
const body = await response.text();
throw new Error(`Ollama API error ${response.status}: ${body}`);
}
const result = await response.json() as Record<string, unknown>;
const message = result.message as Record<string, unknown>;
// Build content array
const content: LanguageModelV3Content[] = [];
// Add reasoning if present
if (message.thinking && typeof message.thinking === 'string') {
content.push({ type: 'reasoning', text: message.thinking });
}
// Add text content
if (message.content && typeof message.content === 'string') {
content.push({ type: 'text', text: message.content });
}
// Add tool calls if present
if (Array.isArray(message.tool_calls)) {
for (const tc of message.tool_calls as Array<Record<string, unknown>>) {
const fn = tc.function as Record<string, unknown>;
content.push({
type: 'tool-call',
toolCallId: generateId(),
toolName: fn.name as string,
input: JSON.stringify(fn.arguments),
});
}
}
const finishReason = Array.isArray(message.tool_calls) && (message.tool_calls as unknown[]).length > 0
? makeFinishReason('tool_calls')
: makeFinishReason('stop');
return {
content,
finishReason,
usage: makeUsage(
(result.prompt_eval_count as number) ?? undefined,
(result.eval_count as number) ?? undefined,
),
warnings: [],
request: { body: requestBody },
};
},
async doStream(callOptions: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult> {
const messages = convertPromptToOllamaMessages(callOptions.prompt);
const tools = convertToolsToOllamaTools(callOptions.tools);
const ollamaModelOptions: Record<string, unknown> = { ...ollamaOpts };
if (callOptions.temperature !== undefined) ollamaModelOptions.temperature = callOptions.temperature;
if (callOptions.topP !== undefined) ollamaModelOptions.top_p = callOptions.topP;
if (callOptions.topK !== undefined) ollamaModelOptions.top_k = callOptions.topK;
if (callOptions.maxOutputTokens !== undefined) ollamaModelOptions.num_predict = callOptions.maxOutputTokens;
if (callOptions.seed !== undefined) ollamaModelOptions.seed = callOptions.seed;
if (callOptions.stopSequences) ollamaModelOptions.stop = callOptions.stopSequences;
const { think, ...modelOpts } = ollamaModelOptions;
const requestBody: Record<string, unknown> = {
model: modelId,
messages,
stream: true,
options: modelOpts,
};
if (ollamaOpts.think !== undefined) {
requestBody.think = ollamaOpts.think;
}
if (tools) requestBody.tools = tools;
const response = await fetch(`${baseUrl}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(requestBody),
signal: callOptions.abortSignal,
});
if (!response.ok) {
const body = await response.text();
throw new Error(`Ollama API error ${response.status}: ${body}`);
}
const reader = response.body!.getReader();
const decoder = new TextDecoder();
const textId = generateId();
const reasoningId = generateId();
let textStarted = false;
let reasoningStarted = false;
let hasToolCalls = false;
let closed = false;
const stream = new ReadableStream<LanguageModelV3StreamPart>({
async pull(controller) {
if (closed) return;
const processLine = (line: string) => {
if (!line.trim()) return;
let json: Record<string, unknown>;
try {
json = JSON.parse(line);
} catch {
return;
}
const msg = json.message as Record<string, unknown> | undefined;
// Handle thinking/reasoning content
if (msg?.thinking && typeof msg.thinking === 'string') {
if (!reasoningStarted) {
reasoningStarted = true;
controller.enqueue({ type: 'reasoning-start', id: reasoningId });
}
controller.enqueue({ type: 'reasoning-delta', id: reasoningId, delta: msg.thinking });
}
// Handle text content
if (msg?.content && typeof msg.content === 'string') {
if (reasoningStarted && !textStarted) {
controller.enqueue({ type: 'reasoning-end', id: reasoningId });
}
if (!textStarted) {
textStarted = true;
controller.enqueue({ type: 'text-start', id: textId });
}
controller.enqueue({ type: 'text-delta', id: textId, delta: msg.content });
}
// Handle tool calls
if (Array.isArray(msg?.tool_calls)) {
hasToolCalls = true;
for (const tc of msg!.tool_calls as Array<Record<string, unknown>>) {
const fn = tc.function as Record<string, unknown>;
const callId = generateId();
controller.enqueue({
type: 'tool-call',
toolCallId: callId,
toolName: fn.name as string,
input: JSON.stringify(fn.arguments),
});
}
}
// Handle done
if (json.done) {
if (reasoningStarted && !textStarted) {
controller.enqueue({ type: 'reasoning-end', id: reasoningId });
}
if (textStarted) {
controller.enqueue({ type: 'text-end', id: textId });
}
controller.enqueue({
type: 'finish',
finishReason: hasToolCalls
? makeFinishReason('tool_calls')
: makeFinishReason('stop'),
usage: makeUsage(
(json.prompt_eval_count as number) ?? undefined,
(json.eval_count as number) ?? undefined,
),
});
closed = true;
controller.close();
}
};
try {
let buffer = '';
while (true) {
const { done, value } = await reader.read();
if (done) {
if (buffer.trim()) processLine(buffer);
if (!closed) {
controller.enqueue({
type: 'finish',
finishReason: makeFinishReason('stop'),
usage: makeUsage(undefined, undefined),
});
closed = true;
controller.close();
}
return;
}
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop() || '';
for (const line of lines) {
processLine(line);
if (closed) return;
}
}
} catch (error) {
if (!closed) {
controller.error(error);
closed = true;
}
} finally {
reader.releaseLock();
}
},
});
return {
stream,
request: { body: requestBody },
};
},
};
return model;
}

24
ts_audio/index.ts Normal file
View File

@@ -0,0 +1,24 @@
import * as plugins from './plugins.js';
import { Readable } from 'stream';
export interface IOpenAiTtsOptions {
apiKey: string;
text: string;
voice?: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer';
model?: 'tts-1' | 'tts-1-hd';
responseFormat?: 'mp3' | 'opus' | 'aac' | 'flac';
speed?: number;
}
export async function textToSpeech(options: IOpenAiTtsOptions): Promise<NodeJS.ReadableStream> {
const client = new plugins.OpenAI({ apiKey: options.apiKey });
const result = await client.audio.speech.create({
model: options.model ?? 'tts-1',
voice: options.voice ?? 'alloy',
input: options.text,
response_format: options.responseFormat ?? 'mp3',
speed: options.speed ?? 1,
});
const stream = result.body;
return Readable.fromWeb(stream as any);
}

2
ts_audio/plugins.ts Normal file
View File

@@ -0,0 +1,2 @@
import OpenAI from 'openai';
export { OpenAI };

61
ts_document/index.ts Normal file
View File

@@ -0,0 +1,61 @@
import * as plugins from './plugins.js';
import type { LanguageModelV3 } from '@ai-sdk/provider';
import type { ModelMessage } from 'ai';
let smartpdfInstance: InstanceType<typeof plugins.smartpdf.SmartPdf> | null = null;
async function ensureSmartpdf(): Promise<InstanceType<typeof plugins.smartpdf.SmartPdf>> {
if (!smartpdfInstance) {
smartpdfInstance = new plugins.smartpdf.SmartPdf();
await smartpdfInstance.start();
}
return smartpdfInstance;
}
export interface IDocumentOptions {
model: LanguageModelV3;
systemMessage?: string;
userMessage: string;
pdfDocuments: Uint8Array[];
messageHistory?: ModelMessage[];
}
export async function analyzeDocuments(options: IDocumentOptions): Promise<string> {
const pdf = await ensureSmartpdf();
const imagePages: Uint8Array[] = [];
for (const doc of options.pdfDocuments) {
const pages = await pdf.convertPDFToPngBytes(doc);
imagePages.push(...pages);
}
// Filter out empty buffers
const validPages = imagePages.filter(page => page && page.length > 0);
const result = await plugins.generateText({
model: options.model,
system: options.systemMessage,
messages: [
...(options.messageHistory ?? []),
{
role: 'user',
content: [
{ type: 'text', text: options.userMessage },
...validPages.map(page => ({
type: 'image' as const,
image: page,
mimeType: 'image/png' as const,
})),
],
},
],
});
return result.text;
}
export async function stopSmartpdf(): Promise<void> {
if (smartpdfInstance) {
await smartpdfInstance.stop();
smartpdfInstance = null;
}
}

3
ts_document/plugins.ts Normal file
View File

@@ -0,0 +1,3 @@
import { generateText } from 'ai';
import * as smartpdf from '@push.rocks/smartpdf';
export { generateText, smartpdf };

147
ts_image/index.ts Normal file
View File

@@ -0,0 +1,147 @@
import * as plugins from './plugins.js';
export interface IImageGenerateOptions {
apiKey: string;
prompt: string;
model?: 'gpt-image-1' | 'dall-e-3' | 'dall-e-2';
quality?: 'low' | 'medium' | 'high' | 'standard' | 'hd' | 'auto';
size?: '256x256' | '512x512' | '1024x1024' | '1536x1024' | '1024x1536' | '1792x1024' | '1024x1792' | 'auto';
style?: 'vivid' | 'natural';
background?: 'transparent' | 'opaque' | 'auto';
outputFormat?: 'png' | 'jpeg' | 'webp';
outputCompression?: number;
moderation?: 'low' | 'auto';
n?: number;
stream?: boolean;
partialImages?: number;
}
export interface IImageEditOptions {
apiKey: string;
image: Buffer;
prompt: string;
mask?: Buffer;
model?: 'gpt-image-1' | 'dall-e-2';
quality?: 'low' | 'medium' | 'high' | 'standard' | 'auto';
size?: '256x256' | '512x512' | '1024x1024' | '1536x1024' | '1024x1536' | 'auto';
background?: 'transparent' | 'opaque' | 'auto';
outputFormat?: 'png' | 'jpeg' | 'webp';
outputCompression?: number;
n?: number;
stream?: boolean;
partialImages?: number;
}
export interface IImageResponse {
images: Array<{
b64_json?: string;
url?: string;
revisedPrompt?: string;
}>;
metadata?: {
model: string;
quality?: string;
size?: string;
outputFormat?: string;
tokensUsed?: number;
};
}
export async function generateImage(options: IImageGenerateOptions): Promise<IImageResponse> {
const client = new plugins.OpenAI({ apiKey: options.apiKey });
const model = options.model || 'gpt-image-1';
const requestParams: Record<string, unknown> = {
model,
prompt: options.prompt,
n: options.n || 1,
};
if (model === 'gpt-image-1') {
if (options.quality) requestParams.quality = options.quality;
if (options.size) requestParams.size = options.size;
if (options.background) requestParams.background = options.background;
if (options.outputFormat) requestParams.output_format = options.outputFormat;
if (options.outputCompression !== undefined) requestParams.output_compression = options.outputCompression;
if (options.moderation) requestParams.moderation = options.moderation;
if (options.stream !== undefined) requestParams.stream = options.stream;
if (options.partialImages !== undefined) requestParams.partial_images = options.partialImages;
} else if (model === 'dall-e-3') {
if (options.quality) requestParams.quality = options.quality;
if (options.size) requestParams.size = options.size;
if (options.style) requestParams.style = options.style;
requestParams.response_format = 'b64_json';
} else if (model === 'dall-e-2') {
if (options.size) requestParams.size = options.size;
requestParams.response_format = 'b64_json';
}
const result: any = await client.images.generate(requestParams as any);
const images = (result.data || []).map((img: any) => ({
b64_json: img.b64_json,
url: img.url,
revisedPrompt: img.revised_prompt,
}));
return {
images,
metadata: {
model,
quality: result.quality,
size: result.size,
outputFormat: result.output_format,
tokensUsed: result.usage?.total_tokens,
},
};
}
export async function editImage(options: IImageEditOptions): Promise<IImageResponse> {
const client = new plugins.OpenAI({ apiKey: options.apiKey });
const model = options.model || 'gpt-image-1';
const imageFile = await plugins.toFile(options.image, 'image.png', { type: 'image/png' });
const requestParams: Record<string, unknown> = {
model,
image: imageFile,
prompt: options.prompt,
n: options.n || 1,
};
if (options.mask) {
requestParams.mask = await plugins.toFile(options.mask, 'mask.png', { type: 'image/png' });
}
if (model === 'gpt-image-1') {
if (options.quality) requestParams.quality = options.quality;
if (options.size) requestParams.size = options.size;
if (options.background) requestParams.background = options.background;
if (options.outputFormat) requestParams.output_format = options.outputFormat;
if (options.outputCompression !== undefined) requestParams.output_compression = options.outputCompression;
if (options.stream !== undefined) requestParams.stream = options.stream;
if (options.partialImages !== undefined) requestParams.partial_images = options.partialImages;
} else if (model === 'dall-e-2') {
if (options.size) requestParams.size = options.size;
requestParams.response_format = 'b64_json';
}
const result: any = await client.images.edit(requestParams as any);
const images = (result.data || []).map((img: any) => ({
b64_json: img.b64_json,
url: img.url,
revisedPrompt: img.revised_prompt,
}));
return {
images,
metadata: {
model,
quality: result.quality,
size: result.size,
outputFormat: result.output_format,
tokensUsed: result.usage?.total_tokens,
},
};
}

3
ts_image/plugins.ts Normal file
View File

@@ -0,0 +1,3 @@
import OpenAI from 'openai';
import { toFile } from 'openai';
export { OpenAI, toFile };

120
ts_research/index.ts Normal file
View File

@@ -0,0 +1,120 @@
import * as plugins from './plugins.js';
export interface IResearchOptions {
apiKey: string;
query: string;
searchDepth?: 'basic' | 'advanced' | 'deep';
maxSources?: number;
allowedDomains?: string[];
blockedDomains?: string[];
}
export interface IResearchResponse {
answer: string;
sources: Array<{ url: string; title: string; snippet: string }>;
searchQueries?: string[];
metadata?: Record<string, unknown>;
}
export async function research(options: IResearchOptions): Promise<IResearchResponse> {
const client = new plugins.Anthropic({ apiKey: options.apiKey });
const systemMessage = `You are a research assistant with web search capabilities.
Provide comprehensive, well-researched answers with citations and sources.
When searching the web, be thorough and cite your sources accurately.`;
// Build web search tool config
const webSearchTool: any = {
type: 'web_search_20250305',
name: 'web_search',
};
if (options.maxSources) {
webSearchTool.max_uses = options.maxSources;
}
if (options.allowedDomains?.length) {
webSearchTool.allowed_domains = options.allowedDomains;
} else if (options.blockedDomains?.length) {
webSearchTool.blocked_domains = options.blockedDomains;
}
const result = await client.messages.create({
model: 'claude-sonnet-4-5-20250929',
system: systemMessage,
messages: [
{ role: 'user' as const, content: options.query },
],
max_tokens: 20000,
temperature: 0.7,
tools: [webSearchTool],
});
// Extract answer, sources, and search queries
let answer = '';
const sources: Array<{ url: string; title: string; snippet: string }> = [];
const searchQueries: string[] = [];
for (const block of result.content) {
const b: any = block;
if ('text' in b) {
answer += b.text;
// Extract citations if present
if (b.citations && Array.isArray(b.citations)) {
for (const citation of b.citations) {
if (citation.type === 'web_search_result_location') {
sources.push({
title: citation.title || '',
url: citation.url || '',
snippet: citation.cited_text || '',
});
}
}
}
} else if (b.type === 'server_tool_use') {
if (b.name === 'web_search' && b.input?.query) {
searchQueries.push(b.input.query);
}
} else if (b.type === 'web_search_tool_result') {
if (Array.isArray(b.content)) {
for (const item of b.content) {
if (item.type === 'web_search_result') {
if (!sources.some(s => s.url === item.url)) {
sources.push({
title: item.title || '',
url: item.url || '',
snippet: '',
});
}
}
}
}
}
}
// Fallback: parse markdown links if no citations found
if (sources.length === 0) {
const urlRegex = /\[([^\]]+)\]\(([^)]+)\)/g;
let match: RegExpExecArray | null;
while ((match = urlRegex.exec(answer)) !== null) {
sources.push({
title: match[1],
url: match[2],
snippet: '',
});
}
}
const usage: any = result.usage;
return {
answer,
sources,
searchQueries: searchQueries.length > 0 ? searchQueries : undefined,
metadata: {
model: 'claude-sonnet-4-5-20250929',
searchDepth: options.searchDepth || 'basic',
tokensUsed: usage?.output_tokens,
webSearchesPerformed: usage?.server_tool_use?.web_search_requests ?? 0,
},
};
}

2
ts_research/plugins.ts Normal file
View File

@@ -0,0 +1,2 @@
import Anthropic from '@anthropic-ai/sdk';
export { Anthropic };

29
ts_vision/index.ts Normal file
View File

@@ -0,0 +1,29 @@
import * as plugins from './plugins.js';
import type { LanguageModelV3 } from '@ai-sdk/provider';
export interface IVisionOptions {
model: LanguageModelV3;
image: Buffer | Uint8Array;
prompt: string;
mediaType?: 'image/jpeg' | 'image/png' | 'image/webp' | 'image/gif';
}
export async function analyzeImage(options: IVisionOptions): Promise<string> {
const result = await plugins.generateText({
model: options.model,
messages: [
{
role: 'user',
content: [
{ type: 'text', text: options.prompt },
{
type: 'image',
image: options.image,
mediaType: options.mediaType ?? 'image/jpeg',
},
],
},
],
});
return result.text;
}

2
ts_vision/plugins.ts Normal file
View File

@@ -0,0 +1,2 @@
import { generateText } from 'ai';
export { generateText };

View File

@@ -6,9 +6,9 @@
"module": "NodeNext",
"moduleResolution": "NodeNext",
"esModuleInterop": true,
"verbatimModuleSyntax": true
"verbatimModuleSyntax": true,
"baseUrl": ".",
"paths": {}
},
"exclude": [
"dist_*/**/*.d.ts"
]
"exclude": ["dist_*/**/*.d.ts"]
}