feat(openai): add getModelSetup() and typed provider options for OpenAI reasoning settings
This commit is contained in:
@@ -1,5 +1,7 @@
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
import { simulateReadableStream } from 'ai';
|
||||
import { MockLanguageModelV3 } from 'ai/test';
|
||||
import * as smartai from '../ts/index.js';
|
||||
|
||||
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||
@@ -91,6 +93,102 @@ tap.test('getModel should return a LanguageModelV3 for openai', async () => {
|
||||
expect(model).toHaveProperty('doStream');
|
||||
});
|
||||
|
||||
tap.test('getModelSetup should return OpenAI providerOptions for GPT-5.5 xhigh', async () => {
|
||||
const setup = smartai.getModelSetup({
|
||||
provider: 'openai',
|
||||
model: 'gpt-5.5',
|
||||
apiKey: 'test-key',
|
||||
providerOptions: {
|
||||
openai: {
|
||||
reasoningEffort: 'xhigh',
|
||||
textVerbosity: 'high',
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
expect(setup.model.modelId).toEqual('gpt-5.5');
|
||||
expect(setup.providerOptions?.openai?.reasoningEffort).toEqual('xhigh');
|
||||
expect(setup.providerOptions?.openai?.textVerbosity).toEqual('high');
|
||||
});
|
||||
|
||||
tap.test('generateText should pass OpenAI providerOptions through AI SDK', async () => {
|
||||
const setup = smartai.getModelSetup({
|
||||
provider: 'openai',
|
||||
model: 'gpt-5.5',
|
||||
apiKey: 'test-key',
|
||||
providerOptions: {
|
||||
openai: {
|
||||
reasoningEffort: 'xhigh',
|
||||
textVerbosity: 'high',
|
||||
},
|
||||
},
|
||||
});
|
||||
const model = new MockLanguageModelV3({
|
||||
doGenerate: async () => ({
|
||||
content: [{ type: 'text', text: 'ok' }],
|
||||
finishReason: { unified: 'stop', raw: undefined },
|
||||
usage: {
|
||||
inputTokens: { total: 1, noCache: 1, cacheRead: undefined, cacheWrite: undefined },
|
||||
outputTokens: { total: 1, text: 1, reasoning: undefined },
|
||||
},
|
||||
warnings: [],
|
||||
}),
|
||||
});
|
||||
|
||||
await smartai.generateText({
|
||||
model,
|
||||
prompt: 'test',
|
||||
providerOptions: setup.providerOptions,
|
||||
});
|
||||
|
||||
expect(model.doGenerateCalls[0].providerOptions?.openai?.reasoningEffort).toEqual('xhigh');
|
||||
expect(model.doGenerateCalls[0].providerOptions?.openai?.textVerbosity).toEqual('high');
|
||||
});
|
||||
|
||||
tap.test('streamText should pass OpenAI providerOptions through AI SDK', async () => {
|
||||
const setup = smartai.getModelSetup({
|
||||
provider: 'openai',
|
||||
model: 'gpt-5.5',
|
||||
apiKey: 'test-key',
|
||||
providerOptions: {
|
||||
openai: {
|
||||
reasoningEffort: 'xhigh',
|
||||
textVerbosity: 'high',
|
||||
},
|
||||
},
|
||||
});
|
||||
const model = new MockLanguageModelV3({
|
||||
doStream: async () => ({
|
||||
stream: simulateReadableStream({
|
||||
chunks: [
|
||||
{ type: 'text-start', id: 'text-1' },
|
||||
{ type: 'text-delta', id: 'text-1', delta: 'ok' },
|
||||
{ type: 'text-end', id: 'text-1' },
|
||||
{
|
||||
type: 'finish',
|
||||
finishReason: { unified: 'stop', raw: undefined },
|
||||
logprobs: undefined,
|
||||
usage: {
|
||||
inputTokens: { total: 1, noCache: 1, cacheRead: undefined, cacheWrite: undefined },
|
||||
outputTokens: { total: 1, text: 1, reasoning: undefined },
|
||||
},
|
||||
},
|
||||
],
|
||||
}),
|
||||
}),
|
||||
});
|
||||
|
||||
const result = smartai.streamText({
|
||||
model,
|
||||
prompt: 'test',
|
||||
providerOptions: setup.providerOptions,
|
||||
});
|
||||
await result.text;
|
||||
|
||||
expect(model.doStreamCalls[0].providerOptions?.openai?.reasoningEffort).toEqual('xhigh');
|
||||
expect(model.doStreamCalls[0].providerOptions?.openai?.textVerbosity).toEqual('high');
|
||||
});
|
||||
|
||||
tap.test('streamText with anthropic model', async () => {
|
||||
const apiKey = await testQenv.getEnvVarOnDemand('ANTHROPIC_TOKEN');
|
||||
if (!apiKey) {
|
||||
|
||||
Reference in New Issue
Block a user