feat(openai): add getModelSetup() and typed provider options for OpenAI reasoning settings

This commit is contained in:
2026-05-06 19:09:58 +00:00
parent 8ad0b90f95
commit 5c871242b0
10 changed files with 952 additions and 1178 deletions
+17
View File
@@ -1,5 +1,22 @@
# Changelog # Changelog
## 2026-05-06 - 2.2.0 - feat(openai)
add getModelSetup() and typed provider options for OpenAI reasoning settings
- export getModelSetup() to return both the model and request-time providerOptions for AI SDK calls
- add typed OpenAI provider options including reasoningEffort and textVerbosity support
- cover providerOptions passthrough for generateText() and streamText() with tests
- update documentation and dependency versions for the new OpenAI provider options workflow
## 2026-05-06 - 2.1.0 - feat(openai)
add first-class OpenAI provider options for request-time reasoning settings
- add `getModelSetup()` returning a model plus AI SDK `providerOptions`
- add typed OpenAI provider options including `reasoningEffort: 'xhigh'` and `textVerbosity`
- keep `getModel()` backward compatible for existing consumers
- document GPT-5.5 xhigh usage and update dependency versions
- add tests proving provider options pass through `generateText()` and `streamText()`
## 2026-04-30 - 2.0.1 - fix(build) ## 2026-04-30 - 2.0.1 - fix(build)
update toolchain configuration and test error handling for stricter TypeScript builds update toolchain configuration and test error handling for stricter TypeScript builds
+15 -15
View File
@@ -1,6 +1,6 @@
{ {
"name": "@push.rocks/smartai", "name": "@push.rocks/smartai",
"version": "2.0.1", "version": "2.1.0",
"private": false, "private": false,
"description": "Provider registry and capability utilities for ai-sdk (Vercel AI SDK). Core export returns LanguageModel; subpath exports provide vision, audio, image, document and research capabilities.", "description": "Provider registry and capability utilities for ai-sdk (Vercel AI SDK). Core export returns LanguageModel; subpath exports provide vision, audio, image, document and research capabilities.",
"main": "dist_ts/index.js", "main": "dist_ts/index.js",
@@ -42,29 +42,29 @@
}, },
"devDependencies": { "devDependencies": {
"@git.zone/tsbuild": "^4.4.0", "@git.zone/tsbuild": "^4.4.0",
"@git.zone/tsrun": "^2.0.2", "@git.zone/tsrun": "^2.0.3",
"@git.zone/tstest": "^3.6.3", "@git.zone/tstest": "^3.6.3",
"@push.rocks/qenv": "^6.1.3", "@push.rocks/qenv": "^6.1.4",
"@types/json-schema": "^7.0.15", "@types/json-schema": "^7.0.15",
"@types/lodash.clonedeep": "^4.5.9", "@types/lodash.clonedeep": "^4.5.9",
"@types/node": "^25.6.0", "@types/node": "^25.6.0",
"@types/pngjs": "^6.0.5", "@types/pngjs": "^6.0.5",
"typescript": "^6.0.3", "typescript": "^6.0.3",
"undici-types": "^8.1.0" "undici-types": "^8.2.0"
}, },
"dependencies": { "dependencies": {
"@ai-sdk/anthropic": "^3.0.72", "@ai-sdk/anthropic": "^3.0.75",
"@ai-sdk/google": "^3.0.65", "@ai-sdk/google": "^3.0.67",
"@ai-sdk/groq": "^3.0.36", "@ai-sdk/groq": "^3.0.38",
"@ai-sdk/mistral": "^3.0.31", "@ai-sdk/mistral": "^3.0.35",
"@ai-sdk/openai": "^3.0.54", "@ai-sdk/openai": "^3.0.62",
"@ai-sdk/perplexity": "^3.0.30", "@ai-sdk/perplexity": "^3.0.32",
"@ai-sdk/provider": "^3.0.9", "@ai-sdk/provider": "^3.0.10",
"@ai-sdk/xai": "^3.0.84", "@ai-sdk/xai": "^3.0.88",
"@anthropic-ai/sdk": "0.91.0", "@anthropic-ai/sdk": "0.95.0",
"@push.rocks/smartpdf": "^4.2.2", "@push.rocks/smartpdf": "^4.2.2",
"ai": "^6.0.170", "ai": "^6.0.175",
"openai": "^6.35.0" "openai": "^6.36.0"
}, },
"repository": { "repository": {
"type": "git", "type": "git",
+713 -1153
View File
File diff suppressed because it is too large Load Diff
+7 -4
View File
@@ -6,6 +6,7 @@ The package is a **provider registry** built on the Vercel AI SDK (`ai` v6). The
### Core Entry (`ts/`) ### Core Entry (`ts/`)
- `getModel(options)` → returns `LanguageModelV3` for any supported provider - `getModel(options)` → returns `LanguageModelV3` for any supported provider
- `getModelSetup(options)` → returns `{ model, providerOptions }` for request-time AI SDK provider options
- Providers: anthropic, openai, google, groq, mistral, xai, perplexity, ollama - Providers: anthropic, openai, google, groq, mistral, xai, perplexity, ollama
- Anthropic prompt caching via `wrapLanguageModel` middleware (enabled by default) - Anthropic prompt caching via `wrapLanguageModel` middleware (enabled by default)
- Custom Ollama provider implementing `LanguageModelV3` directly (for think, num_ctx support) - Custom Ollama provider implementing `LanguageModelV3` directly (for think, num_ctx support)
@@ -19,11 +20,11 @@ The package is a **provider registry** built on the Vercel AI SDK (`ai` v6). The
## Dependencies ## Dependencies
- `ai` ^6.0.116 — Vercel AI SDK core - `ai` ^6.0.175 — Vercel AI SDK core
- `@ai-sdk/*` — Provider packages (anthropic, openai, google, groq, mistral, xai, perplexity) - `@ai-sdk/*` — Provider packages (anthropic, openai, google, groq, mistral, xai, perplexity)
- `@ai-sdk/provider` ^3.0.8 — LanguageModelV3 types - `@ai-sdk/provider` ^3.0.10 — LanguageModelV3 types
- `@anthropic-ai/sdk` ^0.78.0 — Direct SDK for research (web search tool) - `@anthropic-ai/sdk` ^0.95.0 — Direct SDK for research (web search tool)
- `openai` ^6.25.0 — Direct SDK for audio TTS and image generation/editing - `openai` ^6.36.0 — Direct SDK for audio TTS and image generation/editing
- `@push.rocks/smartpdf` ^4.1.3 — PDF to PNG conversion for document analysis - `@push.rocks/smartpdf` ^4.1.3 — PDF to PNG conversion for document analysis
## Build ## Build
@@ -40,6 +41,8 @@ The package is a **provider registry** built on the Vercel AI SDK (`ai` v6). The
- Ollama `think` param goes at request body top level, not inside `options` - Ollama `think` param goes at request body top level, not inside `options`
- Qwen models get default temperature 0.55 in the custom Ollama provider - Qwen models get default temperature 0.55 in the custom Ollama provider
- `qenv.getEnvVarOnDemand()` returns a Promise — must be awaited in tests - `qenv.getEnvVarOnDemand()` returns a Promise — must be awaited in tests
- OpenAI reasoning options belong in AI SDK `providerOptions`, not model construction options
- SmartAI accepts OpenAI model IDs as plain strings, including `gpt-5.5`
## Testing ## Testing
+33 -1
View File
@@ -70,11 +70,43 @@ const options: ISmartAiOptions = {
baseUrl: 'http://localhost:11434', baseUrl: 'http://localhost:11434',
// Ollama-only: model runtime options // Ollama-only: model runtime options
ollamaOptions: { think: true, num_ctx: 4096 }, ollamaOptions: { think: true, num_ctx: 4096 },
// OpenAI request-time options for generateText()/streamText()
providerOptions: { openai: { reasoningEffort: 'xhigh' } },
}; };
const model = getModel(options); const model = getModel(options);
``` ```
### `getModelSetup(options): { model, providerOptions }`
Use this when a provider needs request-time AI SDK `providerOptions`, such as GPT-5 reasoning settings for OpenAI. `getModel()` remains available for existing consumers; `getModelSetup()` returns the same model plus provider options to pass into `generateText()` or `streamText()`.
```typescript
import { getModelSetup, generateText } from '@push.rocks/smartai';
const setup = getModelSetup({
provider: 'openai',
apiKey: process.env.OPENAI_API_KEY,
model: 'gpt-5.5',
providerOptions: {
openai: {
reasoningEffort: 'xhigh',
textVerbosity: 'high',
},
},
});
const result = await generateText({
model: setup.model,
prompt: 'Draft a careful migration plan.',
providerOptions: setup.providerOptions,
});
console.log(result.text);
```
OpenAI `reasoningEffort` supports `'none'`, `'minimal'`, `'low'`, `'medium'`, `'high'`, and `'xhigh'`. Model IDs are accepted as strings, so new IDs like `'gpt-5.5'` can be used before upstream model unions are updated.
### Re-exported AI SDK Functions ### Re-exported AI SDK Functions
SmartAI re-exports the most commonly used functions from `ai` for convenience: SmartAI re-exports the most commonly used functions from `ai` for convenience:
@@ -101,7 +133,7 @@ import type {
| Provider | Package | Example Models | | Provider | Package | Example Models |
|----------|---------|----------------| |----------|---------|----------------|
| **Anthropic** | `@ai-sdk/anthropic` | `claude-sonnet-4-5-20250929`, `claude-opus-4-5-20250929` | | **Anthropic** | `@ai-sdk/anthropic` | `claude-sonnet-4-5-20250929`, `claude-opus-4-5-20250929` |
| **OpenAI** | `@ai-sdk/openai` | `gpt-4o`, `gpt-4o-mini`, `o3-mini` | | **OpenAI** | `@ai-sdk/openai` | `gpt-5.5`, `gpt-5`, `gpt-4o`, `o3-mini` |
| **Google** | `@ai-sdk/google` | `gemini-2.0-flash`, `gemini-2.5-pro` | | **Google** | `@ai-sdk/google` | `gemini-2.0-flash`, `gemini-2.5-pro` |
| **Groq** | `@ai-sdk/groq` | `llama-3.3-70b-versatile`, `mixtral-8x7b-32768` | | **Groq** | `@ai-sdk/groq` | `llama-3.3-70b-versatile`, `mixtral-8x7b-32768` |
| **Mistral** | `@ai-sdk/mistral` | `mistral-large-latest`, `mistral-small-latest` | | **Mistral** | `@ai-sdk/mistral` | `mistral-large-latest`, `mistral-small-latest` |
+98
View File
@@ -1,5 +1,7 @@
import { tap, expect } from '@git.zone/tstest/tapbundle'; import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as qenv from '@push.rocks/qenv'; import * as qenv from '@push.rocks/qenv';
import { simulateReadableStream } from 'ai';
import { MockLanguageModelV3 } from 'ai/test';
import * as smartai from '../ts/index.js'; import * as smartai from '../ts/index.js';
const testQenv = new qenv.Qenv('./', './.nogit/'); const testQenv = new qenv.Qenv('./', './.nogit/');
@@ -91,6 +93,102 @@ tap.test('getModel should return a LanguageModelV3 for openai', async () => {
expect(model).toHaveProperty('doStream'); expect(model).toHaveProperty('doStream');
}); });
tap.test('getModelSetup should return OpenAI providerOptions for GPT-5.5 xhigh', async () => {
const setup = smartai.getModelSetup({
provider: 'openai',
model: 'gpt-5.5',
apiKey: 'test-key',
providerOptions: {
openai: {
reasoningEffort: 'xhigh',
textVerbosity: 'high',
},
},
});
expect(setup.model.modelId).toEqual('gpt-5.5');
expect(setup.providerOptions?.openai?.reasoningEffort).toEqual('xhigh');
expect(setup.providerOptions?.openai?.textVerbosity).toEqual('high');
});
tap.test('generateText should pass OpenAI providerOptions through AI SDK', async () => {
const setup = smartai.getModelSetup({
provider: 'openai',
model: 'gpt-5.5',
apiKey: 'test-key',
providerOptions: {
openai: {
reasoningEffort: 'xhigh',
textVerbosity: 'high',
},
},
});
const model = new MockLanguageModelV3({
doGenerate: async () => ({
content: [{ type: 'text', text: 'ok' }],
finishReason: { unified: 'stop', raw: undefined },
usage: {
inputTokens: { total: 1, noCache: 1, cacheRead: undefined, cacheWrite: undefined },
outputTokens: { total: 1, text: 1, reasoning: undefined },
},
warnings: [],
}),
});
await smartai.generateText({
model,
prompt: 'test',
providerOptions: setup.providerOptions,
});
expect(model.doGenerateCalls[0].providerOptions?.openai?.reasoningEffort).toEqual('xhigh');
expect(model.doGenerateCalls[0].providerOptions?.openai?.textVerbosity).toEqual('high');
});
tap.test('streamText should pass OpenAI providerOptions through AI SDK', async () => {
const setup = smartai.getModelSetup({
provider: 'openai',
model: 'gpt-5.5',
apiKey: 'test-key',
providerOptions: {
openai: {
reasoningEffort: 'xhigh',
textVerbosity: 'high',
},
},
});
const model = new MockLanguageModelV3({
doStream: async () => ({
stream: simulateReadableStream({
chunks: [
{ type: 'text-start', id: 'text-1' },
{ type: 'text-delta', id: 'text-1', delta: 'ok' },
{ type: 'text-end', id: 'text-1' },
{
type: 'finish',
finishReason: { unified: 'stop', raw: undefined },
logprobs: undefined,
usage: {
inputTokens: { total: 1, noCache: 1, cacheRead: undefined, cacheWrite: undefined },
outputTokens: { total: 1, text: 1, reasoning: undefined },
},
},
],
}),
}),
});
const result = smartai.streamText({
model,
prompt: 'test',
providerOptions: setup.providerOptions,
});
await result.text;
expect(model.doStreamCalls[0].providerOptions?.openai?.reasoningEffort).toEqual('xhigh');
expect(model.doStreamCalls[0].providerOptions?.openai?.textVerbosity).toEqual('high');
});
tap.test('streamText with anthropic model', async () => { tap.test('streamText with anthropic model', async () => {
const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN'); const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN');
if (!apiKey) { if (!apiKey) {
+1 -1
View File
@@ -3,6 +3,6 @@
*/ */
export const commitinfo = { export const commitinfo = {
name: '@push.rocks/smartai', name: '@push.rocks/smartai',
version: '2.0.1', version: '2.2.0',
description: 'Provider registry and capability utilities for ai-sdk (Vercel AI SDK). Core export returns LanguageModel; subpath exports provide vision, audio, image, document and research capabilities.' description: 'Provider registry and capability utilities for ai-sdk (Vercel AI SDK). Core export returns LanguageModel; subpath exports provide vision, audio, image, document and research capabilities.'
} }
+12 -2
View File
@@ -1,5 +1,15 @@
export { getModel } from './smartai.classes.smartai.js'; export { getModel, getModelSetup } from './smartai.classes.smartai.js';
export type { ISmartAiOptions, TProvider, IOllamaModelOptions, LanguageModelV3 } from './smartai.interfaces.js'; export type {
IOpenAiProviderOptions,
ISmartAiModelSetup,
ISmartAiOptions,
TOpenAiReasoningEffort,
TOpenAiTextVerbosity,
TProvider,
TSmartAiProviderOptions,
IOllamaModelOptions,
LanguageModelV3,
} from './smartai.interfaces.js';
export { createAnthropicCachingMiddleware } from './smartai.middleware.anthropic.js'; export { createAnthropicCachingMiddleware } from './smartai.middleware.anthropic.js';
export { createOllamaModel } from './smartai.provider.ollama.js'; export { createOllamaModel } from './smartai.provider.ollama.js';
+9 -1
View File
@@ -1,5 +1,5 @@
import * as plugins from './plugins.js'; import * as plugins from './plugins.js';
import type { ISmartAiOptions, LanguageModelV3 } from './smartai.interfaces.js'; import type { ISmartAiModelSetup, ISmartAiOptions, LanguageModelV3 } from './smartai.interfaces.js';
import { createOllamaModel } from './smartai.provider.ollama.js'; import { createOllamaModel } from './smartai.provider.ollama.js';
import { createAnthropicCachingMiddleware } from './smartai.middleware.anthropic.js'; import { createAnthropicCachingMiddleware } from './smartai.middleware.anthropic.js';
@@ -49,3 +49,11 @@ export function getModel(options: ISmartAiOptions): LanguageModelV3 {
throw new Error(`Unknown provider: ${(options as ISmartAiOptions).provider}`); throw new Error(`Unknown provider: ${(options as ISmartAiOptions).provider}`);
} }
} }
/**
* Returns the model plus request-time providerOptions for AI SDK calls.
*/
export function getModelSetup(options: ISmartAiOptions): ISmartAiModelSetup {
const model = getModel(options);
return options.providerOptions ? { model, providerOptions: options.providerOptions } : { model };
}
+47 -1
View File
@@ -1,4 +1,4 @@
import type { LanguageModelV3 } from '@ai-sdk/provider'; import type { JSONObject, JSONValue, LanguageModelV3 } from '@ai-sdk/provider';
export type TProvider = export type TProvider =
| 'anthropic' | 'anthropic'
@@ -10,10 +10,56 @@ export type TProvider =
| 'perplexity' | 'perplexity'
| 'ollama'; | 'ollama';
export type TOpenAiReasoningEffort = 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
export type TOpenAiTextVerbosity = 'low' | 'medium' | 'high';
export interface IOpenAiProviderOptions extends JSONObject {
conversation?: string | null;
include?: string[] | null;
instructions?: string | null;
logitBias?: Record<string, number>;
logprobs?: boolean | number | null;
maxCompletionTokens?: number;
maxToolCalls?: number | null;
metadata?: JSONObject | null;
parallelToolCalls?: boolean | null;
previousResponseId?: string | null;
prediction?: JSONObject;
promptCacheKey?: string | null;
promptCacheRetention?: 'in_memory' | '24h' | null;
reasoningEffort?: TOpenAiReasoningEffort | null;
reasoningSummary?: string | null;
safetyIdentifier?: string | null;
serviceTier?: 'auto' | 'flex' | 'priority' | 'default' | null;
store?: boolean | null;
strictJsonSchema?: boolean | null;
systemMessageMode?: 'remove' | 'system' | 'developer';
textVerbosity?: TOpenAiTextVerbosity | null;
truncation?: 'auto' | 'disabled' | null;
user?: string | null;
forceReasoning?: boolean;
[key: string]: JSONValue | undefined;
}
export type TSmartAiProviderOptions = Record<string, JSONObject> & {
openai?: IOpenAiProviderOptions;
};
export interface ISmartAiModelSetup {
model: LanguageModelV3;
providerOptions?: TSmartAiProviderOptions;
}
export interface ISmartAiOptions { export interface ISmartAiOptions {
provider: TProvider; provider: TProvider;
model: string; model: string;
apiKey?: string; apiKey?: string;
/**
* Provider-specific AI SDK generation options.
* Pass this to generateText()/streamText() alongside the model.
*/
providerOptions?: TSmartAiProviderOptions;
/** For Ollama: base URL of the local server. Default: http://localhost:11434 */ /** For Ollama: base URL of the local server. Default: http://localhost:11434 */
baseUrl?: string; baseUrl?: string;
/** /**