Files
smartai/ts/smartai.interfaces.ts
T

100 lines
2.9 KiB
TypeScript
Raw Permalink Normal View History

import type { JSONObject, JSONValue, LanguageModelV3 } from '@ai-sdk/provider';
export type TProvider =
| 'anthropic'
| 'openai'
| 'google'
| 'groq'
| 'mistral'
| 'xai'
| 'perplexity'
| 'ollama';
export type TOpenAiReasoningEffort = 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
export type TOpenAiTextVerbosity = 'low' | 'medium' | 'high';
export interface IOpenAiProviderOptions extends JSONObject {
conversation?: string | null;
include?: string[] | null;
instructions?: string | null;
logitBias?: Record<string, number>;
logprobs?: boolean | number | null;
maxCompletionTokens?: number;
maxToolCalls?: number | null;
metadata?: JSONObject | null;
parallelToolCalls?: boolean | null;
previousResponseId?: string | null;
prediction?: JSONObject;
promptCacheKey?: string | null;
promptCacheRetention?: 'in_memory' | '24h' | null;
reasoningEffort?: TOpenAiReasoningEffort | null;
reasoningSummary?: string | null;
safetyIdentifier?: string | null;
serviceTier?: 'auto' | 'flex' | 'priority' | 'default' | null;
store?: boolean | null;
strictJsonSchema?: boolean | null;
systemMessageMode?: 'remove' | 'system' | 'developer';
textVerbosity?: TOpenAiTextVerbosity | null;
truncation?: 'auto' | 'disabled' | null;
user?: string | null;
forceReasoning?: boolean;
[key: string]: JSONValue | undefined;
}
export type TSmartAiProviderOptions = Record<string, JSONObject> & {
openai?: IOpenAiProviderOptions;
};
export interface ISmartAiModelSetup {
model: LanguageModelV3;
providerOptions?: TSmartAiProviderOptions;
}
export interface ISmartAiOptions {
provider: TProvider;
model: string;
apiKey?: string;
/**
* Provider-specific AI SDK generation options.
* Pass this to generateText()/streamText() alongside the model.
*/
providerOptions?: TSmartAiProviderOptions;
/** For Ollama: base URL of the local server. Default: http://localhost:11434 */
baseUrl?: string;
/**
* Ollama-specific model runtime options.
* Only used when provider === 'ollama'.
*/
ollamaOptions?: IOllamaModelOptions;
/**
* Enable Anthropic prompt caching on system + recent messages.
* Only used when provider === 'anthropic'. Default: true.
*/
promptCaching?: boolean;
}
/**
* Ollama model runtime options passed in the request body `options` field.
* @see https://github.com/ollama/ollama/blob/main/docs/modelfile.md
*/
export interface IOllamaModelOptions {
/** Context window size. Default: 2048. */
num_ctx?: number;
/** 0 = deterministic. Default: 0.8. For Qwen models use 0.55. */
temperature?: number;
top_k?: number;
top_p?: number;
repeat_penalty?: number;
num_predict?: number;
stop?: string[];
seed?: number;
/**
* Enable thinking/reasoning mode (Qwen3, QwQ, DeepSeek-R1 etc.).
* The custom Ollama provider handles this directly.
*/
think?: boolean;
}
export type { LanguageModelV3 };