324 lines
9.5 KiB
TypeScript
324 lines
9.5 KiB
TypeScript
import { assert, assertEquals, assertExists } from 'jsr:@std/assert@^1.0.0';
|
|
import { shortId } from '../ts/helpers/shortid.ts';
|
|
|
|
// =============================================================================
|
|
// UNIT TESTS - ModelGrid Core Components
|
|
// =============================================================================
|
|
|
|
// -----------------------------------------------------------------------------
|
|
// shortId() Tests
|
|
// -----------------------------------------------------------------------------
|
|
|
|
Deno.test('shortId: generates 6-character string', () => {
|
|
const id = shortId();
|
|
assertEquals(id.length, 6);
|
|
});
|
|
|
|
Deno.test('shortId: contains only alphanumeric characters', () => {
|
|
const id = shortId();
|
|
const alphanumericRegex = /^[a-zA-Z0-9]+$/;
|
|
assert(alphanumericRegex.test(id), `ID "${id}" contains non-alphanumeric characters`);
|
|
});
|
|
|
|
Deno.test('shortId: generates unique IDs', () => {
|
|
const ids = new Set<string>();
|
|
const count = 100;
|
|
|
|
for (let i = 0; i < count; i++) {
|
|
ids.add(shortId());
|
|
}
|
|
|
|
// All IDs should be unique (statistically extremely likely for 100 IDs)
|
|
assertEquals(ids.size, count, 'Generated IDs should be unique');
|
|
});
|
|
|
|
// -----------------------------------------------------------------------------
|
|
// Interface Type Tests
|
|
// -----------------------------------------------------------------------------
|
|
|
|
Deno.test('IModelGridConfig: valid config structure', () => {
|
|
const config = {
|
|
version: '1.0',
|
|
api: {
|
|
port: 8080,
|
|
host: '0.0.0.0',
|
|
apiKeys: ['test-key'],
|
|
},
|
|
docker: {
|
|
networkName: 'modelgrid',
|
|
runtime: 'docker' as const,
|
|
},
|
|
gpus: {
|
|
autoDetect: true,
|
|
assignments: {},
|
|
},
|
|
containers: [],
|
|
models: {
|
|
greenlistUrl: 'https://example.com/greenlit.json',
|
|
autoPull: true,
|
|
defaultContainer: 'ollama',
|
|
autoLoad: [],
|
|
},
|
|
checkInterval: 30000,
|
|
};
|
|
|
|
assertExists(config.version);
|
|
assertExists(config.api);
|
|
assertExists(config.docker);
|
|
assertExists(config.gpus);
|
|
assertExists(config.containers);
|
|
assertExists(config.models);
|
|
assertEquals(config.api.port, 8080);
|
|
assertEquals(config.docker.runtime, 'docker');
|
|
});
|
|
|
|
Deno.test('IGpuInfo: valid GPU info structure', () => {
|
|
const gpu = {
|
|
id: 'gpu-0',
|
|
vendor: 'nvidia' as const,
|
|
model: 'RTX 4090',
|
|
vram: 24576,
|
|
driverVersion: '535.154.05',
|
|
cudaVersion: '12.2',
|
|
pciSlot: '0000:01:00.0',
|
|
};
|
|
|
|
assertExists(gpu.id);
|
|
assertExists(gpu.vendor);
|
|
assertExists(gpu.model);
|
|
assert(gpu.vram > 0, 'VRAM should be positive');
|
|
assert(['nvidia', 'amd', 'intel'].includes(gpu.vendor), 'Vendor should be valid');
|
|
});
|
|
|
|
Deno.test('IContainerConfig: valid container config structure', () => {
|
|
const container = {
|
|
id: 'ollama-1',
|
|
type: 'ollama' as const,
|
|
name: 'Ollama Container',
|
|
image: 'ollama/ollama:latest',
|
|
gpuIds: ['gpu-0'],
|
|
port: 11434,
|
|
models: ['llama3:8b'],
|
|
};
|
|
|
|
assertExists(container.id);
|
|
assertExists(container.type);
|
|
assertExists(container.name);
|
|
assertExists(container.image);
|
|
assert(container.gpuIds.length > 0, 'Should have at least one GPU');
|
|
assert(container.port > 0, 'Port should be positive');
|
|
});
|
|
|
|
// -----------------------------------------------------------------------------
|
|
// Greenlit Model Tests
|
|
// -----------------------------------------------------------------------------
|
|
|
|
Deno.test('Greenlit model validation: valid model passes', () => {
|
|
const greenlist = {
|
|
version: '1.0',
|
|
models: [
|
|
{ name: 'llama3:8b', container: 'ollama', minVram: 8 },
|
|
{ name: 'mistral:7b', container: 'ollama', minVram: 8 },
|
|
],
|
|
};
|
|
|
|
const requestedModel = 'llama3:8b';
|
|
const availableVram = 24; // GB
|
|
|
|
const model = greenlist.models.find((m) => m.name === requestedModel);
|
|
assertExists(model, 'Model should be in greenlist');
|
|
assert(availableVram >= model.minVram, 'Should have enough VRAM');
|
|
});
|
|
|
|
Deno.test('Greenlit model validation: insufficient VRAM fails', () => {
|
|
const greenlist = {
|
|
version: '1.0',
|
|
models: [
|
|
{ name: 'llama3:70b', container: 'vllm', minVram: 48 },
|
|
],
|
|
};
|
|
|
|
const requestedModel = 'llama3:70b';
|
|
const availableVram = 24; // GB
|
|
|
|
const model = greenlist.models.find((m) => m.name === requestedModel);
|
|
assertExists(model, 'Model should be in greenlist');
|
|
assert(availableVram < model.minVram, 'Should NOT have enough VRAM');
|
|
});
|
|
|
|
Deno.test('Greenlit model validation: unlisted model rejected', () => {
|
|
const greenlist = {
|
|
version: '1.0',
|
|
models: [
|
|
{ name: 'llama3:8b', container: 'ollama', minVram: 8 },
|
|
],
|
|
};
|
|
|
|
const requestedModel = 'some-random-model:latest';
|
|
const model = greenlist.models.find((m) => m.name === requestedModel);
|
|
assertEquals(model, undefined, 'Model should NOT be in greenlist');
|
|
});
|
|
|
|
// -----------------------------------------------------------------------------
|
|
// API Request Validation Tests
|
|
// -----------------------------------------------------------------------------
|
|
|
|
Deno.test('Chat completion request: valid request passes', () => {
|
|
const request = {
|
|
model: 'llama3:8b',
|
|
messages: [
|
|
{ role: 'user', content: 'Hello!' },
|
|
],
|
|
};
|
|
|
|
assertExists(request.model, 'Model should be specified');
|
|
assert(request.messages.length > 0, 'Should have at least one message');
|
|
assert(
|
|
request.messages.every((m) => m.role && m.content),
|
|
'All messages should have role and content',
|
|
);
|
|
});
|
|
|
|
Deno.test('Chat completion request: missing model fails', () => {
|
|
const request = {
|
|
messages: [
|
|
{ role: 'user', content: 'Hello!' },
|
|
],
|
|
};
|
|
|
|
assertEquals((request as { model?: string }).model, undefined, 'Model should be missing');
|
|
});
|
|
|
|
Deno.test('Chat completion request: empty messages fails', () => {
|
|
const request = {
|
|
model: 'llama3:8b',
|
|
messages: [],
|
|
};
|
|
|
|
assertEquals(request.messages.length, 0, 'Messages should be empty');
|
|
});
|
|
|
|
Deno.test('Embedding request: valid request passes', () => {
|
|
const request = {
|
|
model: 'llama3:8b',
|
|
input: 'Hello, world!',
|
|
};
|
|
|
|
assertExists(request.model, 'Model should be specified');
|
|
assertExists(request.input, 'Input should be specified');
|
|
});
|
|
|
|
Deno.test('Embedding request: array input passes', () => {
|
|
const request = {
|
|
model: 'llama3:8b',
|
|
input: ['Hello', 'World'],
|
|
};
|
|
|
|
assertExists(request.model, 'Model should be specified');
|
|
assert(Array.isArray(request.input), 'Input should be an array');
|
|
assert(request.input.length > 0, 'Input should not be empty');
|
|
});
|
|
|
|
// -----------------------------------------------------------------------------
|
|
// Container Type Tests
|
|
// -----------------------------------------------------------------------------
|
|
|
|
Deno.test('Container types: ollama configuration', () => {
|
|
const ollamaConfig = {
|
|
type: 'ollama' as const,
|
|
image: 'ollama/ollama:latest',
|
|
defaultPort: 11434,
|
|
apiPath: '/api',
|
|
};
|
|
|
|
assertEquals(ollamaConfig.type, 'ollama');
|
|
assertEquals(ollamaConfig.defaultPort, 11434);
|
|
});
|
|
|
|
Deno.test('Container types: vllm configuration', () => {
|
|
const vllmConfig = {
|
|
type: 'vllm' as const,
|
|
image: 'vllm/vllm-openai:latest',
|
|
defaultPort: 8000,
|
|
apiPath: '/v1',
|
|
};
|
|
|
|
assertEquals(vllmConfig.type, 'vllm');
|
|
assertEquals(vllmConfig.defaultPort, 8000);
|
|
});
|
|
|
|
Deno.test('Container types: tgi configuration', () => {
|
|
const tgiConfig = {
|
|
type: 'tgi' as const,
|
|
image: 'ghcr.io/huggingface/text-generation-inference:latest',
|
|
defaultPort: 80,
|
|
apiPath: '/generate',
|
|
};
|
|
|
|
assertEquals(tgiConfig.type, 'tgi');
|
|
assertEquals(tgiConfig.defaultPort, 80);
|
|
});
|
|
|
|
// -----------------------------------------------------------------------------
|
|
// GPU Vendor Tests
|
|
// -----------------------------------------------------------------------------
|
|
|
|
Deno.test('GPU vendors: NVIDIA detection pattern', () => {
|
|
const nvidiaPatterns = ['NVIDIA', 'GeForce', 'Quadro', 'Tesla', 'RTX', 'GTX'];
|
|
const gpuName = 'NVIDIA GeForce RTX 4090';
|
|
|
|
const isNvidia = nvidiaPatterns.some((pattern) =>
|
|
gpuName.toUpperCase().includes(pattern.toUpperCase())
|
|
);
|
|
assert(isNvidia, 'Should detect NVIDIA GPU');
|
|
});
|
|
|
|
Deno.test('GPU vendors: AMD detection pattern', () => {
|
|
const amdPatterns = ['AMD', 'Radeon', 'RX'];
|
|
const gpuName = 'AMD Radeon RX 7900 XTX';
|
|
|
|
const isAmd = amdPatterns.some((pattern) =>
|
|
gpuName.toUpperCase().includes(pattern.toUpperCase())
|
|
);
|
|
assert(isAmd, 'Should detect AMD GPU');
|
|
});
|
|
|
|
Deno.test('GPU vendors: Intel detection pattern', () => {
|
|
const intelPatterns = ['Intel', 'Arc', 'Iris', 'UHD'];
|
|
const gpuName = 'Intel Arc A770';
|
|
|
|
const isIntel = intelPatterns.some((pattern) =>
|
|
gpuName.toUpperCase().includes(pattern.toUpperCase())
|
|
);
|
|
assert(isIntel, 'Should detect Intel GPU');
|
|
});
|
|
|
|
// -----------------------------------------------------------------------------
|
|
// VRAM Calculation Tests
|
|
// -----------------------------------------------------------------------------
|
|
|
|
Deno.test('VRAM calculation: MB to GB conversion', () => {
|
|
const vramMB = 24576; // 24 GB in MB
|
|
const vramGB = vramMB / 1024;
|
|
assertEquals(vramGB, 24);
|
|
});
|
|
|
|
Deno.test('VRAM calculation: model fits in available VRAM', () => {
|
|
const availableVramGB = 24;
|
|
const modelRequiredVramGB = 8;
|
|
const overhead = 2; // GB for system overhead
|
|
|
|
const fits = (modelRequiredVramGB + overhead) <= availableVramGB;
|
|
assert(fits, 'Model should fit in available VRAM');
|
|
});
|
|
|
|
Deno.test('VRAM calculation: multiple models VRAM sum', () => {
|
|
const models = [
|
|
{ name: 'llama3:8b', vram: 8 },
|
|
{ name: 'mistral:7b', vram: 8 },
|
|
];
|
|
|
|
const totalVram = models.reduce((sum, m) => sum + m.vram, 0);
|
|
assertEquals(totalVram, 16);
|
|
});
|