feat(OllamaProvider): add model options, streaming support, and thinking tokens

- Add IOllamaModelOptions interface for runtime options (num_ctx, temperature, etc.)
- Extend IOllamaProviderOptions with defaultOptions and defaultTimeout
- Add IOllamaChatOptions for per-request overrides
- Add IOllamaStreamChunk and IOllamaChatResponse interfaces
- Add chatStreamResponse() for async iteration with options
- Add collectStreamResponse() for streaming with progress callback
- Add chatWithOptions() for non-streaming with full options
- Update chat() to use defaultOptions and defaultTimeout
This commit is contained in:
2026-01-20 00:02:45 +00:00
parent a556053510
commit 126e9b239b
12 changed files with 320 additions and 74 deletions

View File

@@ -1,8 +1,9 @@
import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as qenv from '@push.rocks/qenv';
import * as smartfile from '@push.rocks/smartfile';
import { SmartFs, SmartFsProviderNode } from '@push.rocks/smartfs';
const testQenv = new qenv.Qenv('./', './.nogit/');
const smartfs = new SmartFs(new SmartFsProviderNode());
import * as smartai from '../ts/index.js';
@@ -26,7 +27,7 @@ tap.test('OpenAI Audio: should create audio response', async () => {
chunks.push(chunk as Uint8Array);
}
const audioBuffer = Buffer.concat(chunks);
await smartfile.fs.toFs(audioBuffer, './.nogit/testoutput.mp3');
await smartfs.file('./.nogit/testoutput.mp3').write(audioBuffer);
console.log(`Audio Buffer length: ${audioBuffer.length}`);
// Assert that the resulting buffer is not empty.
expect(audioBuffer.length).toBeGreaterThan(0);