93 lines
2.7 KiB
TypeScript
93 lines
2.7 KiB
TypeScript
|
|
import { assertEquals } from 'jsr:@std/assert@^1.0.0';
|
||
|
|
import { ApiServer } from '../ts/api/server.ts';
|
||
|
|
|
||
|
|
Deno.test('ApiServer serves health metrics and authenticated model listings', async () => {
|
||
|
|
const port = 18100 + Math.floor(Math.random() * 1000);
|
||
|
|
const server = new ApiServer(
|
||
|
|
{
|
||
|
|
host: '127.0.0.1',
|
||
|
|
port,
|
||
|
|
apiKeys: ['valid-key'],
|
||
|
|
cors: false,
|
||
|
|
corsOrigins: [],
|
||
|
|
},
|
||
|
|
{
|
||
|
|
async getAllStatus() {
|
||
|
|
return new Map([
|
||
|
|
['vllm-1', { running: true, health: 'healthy' }],
|
||
|
|
]);
|
||
|
|
},
|
||
|
|
async getAllAvailableModels() {
|
||
|
|
return new Map([
|
||
|
|
['meta-llama/Llama-3.1-8B-Instruct', [{ type: 'vllm' }]],
|
||
|
|
]);
|
||
|
|
},
|
||
|
|
} as never,
|
||
|
|
{
|
||
|
|
async getAllModels() {
|
||
|
|
return [
|
||
|
|
{
|
||
|
|
id: 'meta-llama/Llama-3.1-8B-Instruct',
|
||
|
|
engine: 'vllm',
|
||
|
|
source: { repo: 'meta-llama/Llama-3.1-8B-Instruct' },
|
||
|
|
capabilities: { chat: true },
|
||
|
|
requirements: { minVramGb: 18 },
|
||
|
|
},
|
||
|
|
];
|
||
|
|
},
|
||
|
|
} as never,
|
||
|
|
{} as never,
|
||
|
|
{
|
||
|
|
getStatus() {
|
||
|
|
return {
|
||
|
|
localNode: null,
|
||
|
|
nodes: [],
|
||
|
|
models: {},
|
||
|
|
desiredDeployments: [],
|
||
|
|
};
|
||
|
|
},
|
||
|
|
} as never,
|
||
|
|
);
|
||
|
|
|
||
|
|
(server as unknown as {
|
||
|
|
gpuDetector: { detectGpus: () => Promise<unknown[]> };
|
||
|
|
}).gpuDetector = {
|
||
|
|
async detectGpus() {
|
||
|
|
return [{ id: 'nvidia-0' }];
|
||
|
|
},
|
||
|
|
};
|
||
|
|
|
||
|
|
await server.start();
|
||
|
|
|
||
|
|
try {
|
||
|
|
const healthResponse = await fetch(`http://127.0.0.1:${port}/health`);
|
||
|
|
const healthBody = await healthResponse.json();
|
||
|
|
assertEquals(healthResponse.status, 200);
|
||
|
|
assertEquals(healthBody.status, 'ok');
|
||
|
|
assertEquals(healthBody.models, 1);
|
||
|
|
|
||
|
|
const metricsResponse = await fetch(`http://127.0.0.1:${port}/metrics`);
|
||
|
|
const metricsBody = await metricsResponse.text();
|
||
|
|
assertEquals(metricsResponse.status, 200);
|
||
|
|
assertEquals(metricsBody.includes('modelgrid_uptime_seconds'), true);
|
||
|
|
assertEquals(metricsBody.includes('modelgrid_models_available 1'), true);
|
||
|
|
|
||
|
|
const unauthenticatedModels = await fetch(`http://127.0.0.1:${port}/v1/models`);
|
||
|
|
const unauthenticatedBody = await unauthenticatedModels.json();
|
||
|
|
assertEquals(unauthenticatedModels.status, 401);
|
||
|
|
assertEquals(unauthenticatedBody.error.type, 'authentication_error');
|
||
|
|
|
||
|
|
const authenticatedModels = await fetch(`http://127.0.0.1:${port}/v1/models`, {
|
||
|
|
headers: {
|
||
|
|
Authorization: 'Bearer valid-key',
|
||
|
|
},
|
||
|
|
});
|
||
|
|
const authenticatedBody = await authenticatedModels.json();
|
||
|
|
assertEquals(authenticatedModels.status, 200);
|
||
|
|
assertEquals(authenticatedBody.object, 'list');
|
||
|
|
assertEquals(authenticatedBody.data[0].id, 'meta-llama/Llama-3.1-8B-Instruct');
|
||
|
|
} finally {
|
||
|
|
await server.stop();
|
||
|
|
}
|
||
|
|
});
|