BREAKING CHANGE(vercel-ai-sdk): migrate to Vercel AI SDK v6 and introduce provider registry (getModel) returning LanguageModelV3
This commit is contained in:
53
ts/smartai.interfaces.ts
Normal file
53
ts/smartai.interfaces.ts
Normal file
@@ -0,0 +1,53 @@
|
||||
import type { LanguageModelV3 } from '@ai-sdk/provider';
|
||||
|
||||
export type TProvider =
|
||||
| 'anthropic'
|
||||
| 'openai'
|
||||
| 'google'
|
||||
| 'groq'
|
||||
| 'mistral'
|
||||
| 'xai'
|
||||
| 'perplexity'
|
||||
| 'ollama';
|
||||
|
||||
export interface ISmartAiOptions {
|
||||
provider: TProvider;
|
||||
model: string;
|
||||
apiKey?: string;
|
||||
/** For Ollama: base URL of the local server. Default: http://localhost:11434 */
|
||||
baseUrl?: string;
|
||||
/**
|
||||
* Ollama-specific model runtime options.
|
||||
* Only used when provider === 'ollama'.
|
||||
*/
|
||||
ollamaOptions?: IOllamaModelOptions;
|
||||
/**
|
||||
* Enable Anthropic prompt caching on system + recent messages.
|
||||
* Only used when provider === 'anthropic'. Default: true.
|
||||
*/
|
||||
promptCaching?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ollama model runtime options passed in the request body `options` field.
|
||||
* @see https://github.com/ollama/ollama/blob/main/docs/modelfile.md
|
||||
*/
|
||||
export interface IOllamaModelOptions {
|
||||
/** Context window size. Default: 2048. */
|
||||
num_ctx?: number;
|
||||
/** 0 = deterministic. Default: 0.8. For Qwen models use 0.55. */
|
||||
temperature?: number;
|
||||
top_k?: number;
|
||||
top_p?: number;
|
||||
repeat_penalty?: number;
|
||||
num_predict?: number;
|
||||
stop?: string[];
|
||||
seed?: number;
|
||||
/**
|
||||
* Enable thinking/reasoning mode (Qwen3, QwQ, DeepSeek-R1 etc.).
|
||||
* The custom Ollama provider handles this directly.
|
||||
*/
|
||||
think?: boolean;
|
||||
}
|
||||
|
||||
export type { LanguageModelV3 };
|
||||
Reference in New Issue
Block a user