feat(openai): add getModelSetup() and typed provider options for OpenAI reasoning settings
This commit is contained in:
@@ -3,6 +3,6 @@
|
||||
*/
|
||||
export const commitinfo = {
|
||||
name: '@push.rocks/smartai',
|
||||
version: '2.0.1',
|
||||
version: '2.2.0',
|
||||
description: 'Provider registry and capability utilities for ai-sdk (Vercel AI SDK). Core export returns LanguageModel; subpath exports provide vision, audio, image, document and research capabilities.'
|
||||
}
|
||||
|
||||
+12
-2
@@ -1,5 +1,15 @@
|
||||
export { getModel } from './smartai.classes.smartai.js';
|
||||
export type { ISmartAiOptions, TProvider, IOllamaModelOptions, LanguageModelV3 } from './smartai.interfaces.js';
|
||||
export { getModel, getModelSetup } from './smartai.classes.smartai.js';
|
||||
export type {
|
||||
IOpenAiProviderOptions,
|
||||
ISmartAiModelSetup,
|
||||
ISmartAiOptions,
|
||||
TOpenAiReasoningEffort,
|
||||
TOpenAiTextVerbosity,
|
||||
TProvider,
|
||||
TSmartAiProviderOptions,
|
||||
IOllamaModelOptions,
|
||||
LanguageModelV3,
|
||||
} from './smartai.interfaces.js';
|
||||
export { createAnthropicCachingMiddleware } from './smartai.middleware.anthropic.js';
|
||||
export { createOllamaModel } from './smartai.provider.ollama.js';
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import type { ISmartAiOptions, LanguageModelV3 } from './smartai.interfaces.js';
|
||||
import type { ISmartAiModelSetup, ISmartAiOptions, LanguageModelV3 } from './smartai.interfaces.js';
|
||||
import { createOllamaModel } from './smartai.provider.ollama.js';
|
||||
import { createAnthropicCachingMiddleware } from './smartai.middleware.anthropic.js';
|
||||
|
||||
@@ -49,3 +49,11 @@ export function getModel(options: ISmartAiOptions): LanguageModelV3 {
|
||||
throw new Error(`Unknown provider: ${(options as ISmartAiOptions).provider}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the model plus request-time providerOptions for AI SDK calls.
|
||||
*/
|
||||
export function getModelSetup(options: ISmartAiOptions): ISmartAiModelSetup {
|
||||
const model = getModel(options);
|
||||
return options.providerOptions ? { model, providerOptions: options.providerOptions } : { model };
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import type { LanguageModelV3 } from '@ai-sdk/provider';
|
||||
import type { JSONObject, JSONValue, LanguageModelV3 } from '@ai-sdk/provider';
|
||||
|
||||
export type TProvider =
|
||||
| 'anthropic'
|
||||
@@ -10,10 +10,56 @@ export type TProvider =
|
||||
| 'perplexity'
|
||||
| 'ollama';
|
||||
|
||||
export type TOpenAiReasoningEffort = 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
|
||||
|
||||
export type TOpenAiTextVerbosity = 'low' | 'medium' | 'high';
|
||||
|
||||
export interface IOpenAiProviderOptions extends JSONObject {
|
||||
conversation?: string | null;
|
||||
include?: string[] | null;
|
||||
instructions?: string | null;
|
||||
logitBias?: Record<string, number>;
|
||||
logprobs?: boolean | number | null;
|
||||
maxCompletionTokens?: number;
|
||||
maxToolCalls?: number | null;
|
||||
metadata?: JSONObject | null;
|
||||
parallelToolCalls?: boolean | null;
|
||||
previousResponseId?: string | null;
|
||||
prediction?: JSONObject;
|
||||
promptCacheKey?: string | null;
|
||||
promptCacheRetention?: 'in_memory' | '24h' | null;
|
||||
reasoningEffort?: TOpenAiReasoningEffort | null;
|
||||
reasoningSummary?: string | null;
|
||||
safetyIdentifier?: string | null;
|
||||
serviceTier?: 'auto' | 'flex' | 'priority' | 'default' | null;
|
||||
store?: boolean | null;
|
||||
strictJsonSchema?: boolean | null;
|
||||
systemMessageMode?: 'remove' | 'system' | 'developer';
|
||||
textVerbosity?: TOpenAiTextVerbosity | null;
|
||||
truncation?: 'auto' | 'disabled' | null;
|
||||
user?: string | null;
|
||||
forceReasoning?: boolean;
|
||||
[key: string]: JSONValue | undefined;
|
||||
}
|
||||
|
||||
export type TSmartAiProviderOptions = Record<string, JSONObject> & {
|
||||
openai?: IOpenAiProviderOptions;
|
||||
};
|
||||
|
||||
export interface ISmartAiModelSetup {
|
||||
model: LanguageModelV3;
|
||||
providerOptions?: TSmartAiProviderOptions;
|
||||
}
|
||||
|
||||
export interface ISmartAiOptions {
|
||||
provider: TProvider;
|
||||
model: string;
|
||||
apiKey?: string;
|
||||
/**
|
||||
* Provider-specific AI SDK generation options.
|
||||
* Pass this to generateText()/streamText() alongside the model.
|
||||
*/
|
||||
providerOptions?: TSmartAiProviderOptions;
|
||||
/** For Ollama: base URL of the local server. Default: http://localhost:11434 */
|
||||
baseUrl?: string;
|
||||
/**
|
||||
|
||||
Reference in New Issue
Block a user