initial
This commit is contained in:
32
.gitignore
vendored
Normal file
32
.gitignore
vendored
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
# Dependencies
|
||||||
|
node_modules/
|
||||||
|
|
||||||
|
# Build outputs
|
||||||
|
dist/
|
||||||
|
dist_ts/
|
||||||
|
|
||||||
|
# IDE
|
||||||
|
.idea/
|
||||||
|
.vscode/
|
||||||
|
*.swp
|
||||||
|
*.swo
|
||||||
|
|
||||||
|
# OS
|
||||||
|
.DS_Store
|
||||||
|
Thumbs.db
|
||||||
|
|
||||||
|
# Logs
|
||||||
|
*.log
|
||||||
|
npm-debug.log*
|
||||||
|
|
||||||
|
# Environment
|
||||||
|
.env
|
||||||
|
.env.local
|
||||||
|
.env.*.local
|
||||||
|
|
||||||
|
# Test artifacts
|
||||||
|
.nogit/
|
||||||
|
coverage/
|
||||||
|
|
||||||
|
# Docker
|
||||||
|
*.tar
|
||||||
25
Dockerfile_minicpm45v
Normal file
25
Dockerfile_minicpm45v
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
# MiniCPM-V 4.5 GPU Variant
|
||||||
|
# Vision-Language Model with NVIDIA GPU support
|
||||||
|
FROM ollama/ollama:latest
|
||||||
|
|
||||||
|
LABEL maintainer="Task Venture Capital GmbH <hello@task.vc>"
|
||||||
|
LABEL description="MiniCPM-V 4.5 Vision-Language Model - GPU optimized"
|
||||||
|
LABEL org.opencontainers.image.source="https://code.foss.global/host.today/ht-docker-ai"
|
||||||
|
|
||||||
|
# Environment configuration
|
||||||
|
ENV MODEL_NAME="minicpm-v"
|
||||||
|
ENV OLLAMA_HOST="0.0.0.0"
|
||||||
|
ENV OLLAMA_ORIGINS="*"
|
||||||
|
|
||||||
|
# Copy and setup entrypoint
|
||||||
|
COPY image_support_files/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
|
||||||
|
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
|
||||||
|
|
||||||
|
# Expose Ollama API port
|
||||||
|
EXPOSE 11434
|
||||||
|
|
||||||
|
# Health check
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
||||||
|
CMD curl -f http://localhost:11434/api/tags || exit 1
|
||||||
|
|
||||||
|
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
|
||||||
27
Dockerfile_minicpm45v_cpu
Normal file
27
Dockerfile_minicpm45v_cpu
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
# MiniCPM-V 4.5 CPU Variant
|
||||||
|
# Vision-Language Model optimized for CPU-only inference
|
||||||
|
FROM ollama/ollama:latest
|
||||||
|
|
||||||
|
LABEL maintainer="Task Venture Capital GmbH <hello@task.vc>"
|
||||||
|
LABEL description="MiniCPM-V 4.5 Vision-Language Model - CPU optimized (GGUF)"
|
||||||
|
LABEL org.opencontainers.image.source="https://code.foss.global/host.today/ht-docker-ai"
|
||||||
|
|
||||||
|
# Environment configuration for CPU-only mode
|
||||||
|
ENV MODEL_NAME="minicpm-v"
|
||||||
|
ENV OLLAMA_HOST="0.0.0.0"
|
||||||
|
ENV OLLAMA_ORIGINS="*"
|
||||||
|
# Disable GPU usage for CPU-only variant
|
||||||
|
ENV CUDA_VISIBLE_DEVICES=""
|
||||||
|
|
||||||
|
# Copy and setup entrypoint
|
||||||
|
COPY image_support_files/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
|
||||||
|
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
|
||||||
|
|
||||||
|
# Expose Ollama API port
|
||||||
|
EXPOSE 11434
|
||||||
|
|
||||||
|
# Health check
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=120s --retries=3 \
|
||||||
|
CMD curl -f http://localhost:11434/api/tags || exit 1
|
||||||
|
|
||||||
|
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
|
||||||
37
build-images.sh
Executable file
37
build-images.sh
Executable file
@@ -0,0 +1,37 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
REGISTRY="code.foss.global"
|
||||||
|
NAMESPACE="host.today"
|
||||||
|
IMAGE_NAME="ht-docker-ai"
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
echo -e "${BLUE}Building ht-docker-ai images...${NC}"
|
||||||
|
|
||||||
|
# Build GPU variant
|
||||||
|
echo -e "${GREEN}Building MiniCPM-V 4.5 GPU variant...${NC}"
|
||||||
|
docker build \
|
||||||
|
-f Dockerfile_minicpm45v \
|
||||||
|
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v \
|
||||||
|
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v-gpu \
|
||||||
|
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:latest \
|
||||||
|
.
|
||||||
|
|
||||||
|
# Build CPU variant
|
||||||
|
echo -e "${GREEN}Building MiniCPM-V 4.5 CPU variant...${NC}"
|
||||||
|
docker build \
|
||||||
|
-f Dockerfile_minicpm45v_cpu \
|
||||||
|
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v-cpu \
|
||||||
|
.
|
||||||
|
|
||||||
|
echo -e "${GREEN}All images built successfully!${NC}"
|
||||||
|
echo ""
|
||||||
|
echo "Available images:"
|
||||||
|
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v (GPU)"
|
||||||
|
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v-cpu (CPU)"
|
||||||
|
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:latest (GPU)"
|
||||||
28
image_support_files/docker-entrypoint.sh
Normal file
28
image_support_files/docker-entrypoint.sh
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Default model to pull
|
||||||
|
MODEL_NAME="${MODEL_NAME:-minicpm-v}"
|
||||||
|
|
||||||
|
# Start Ollama server in background
|
||||||
|
echo "Starting Ollama server..."
|
||||||
|
ollama serve &
|
||||||
|
|
||||||
|
# Wait for Ollama to be ready
|
||||||
|
echo "Waiting for Ollama server to start..."
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
# Check if model is already pulled
|
||||||
|
if ! ollama list | grep -q "${MODEL_NAME}"; then
|
||||||
|
echo "Pulling model: ${MODEL_NAME}..."
|
||||||
|
ollama pull "${MODEL_NAME}"
|
||||||
|
echo "Model ${MODEL_NAME} pulled successfully."
|
||||||
|
else
|
||||||
|
echo "Model ${MODEL_NAME} already available."
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Ollama server ready with ${MODEL_NAME}"
|
||||||
|
echo "API available at http://0.0.0.0:11434"
|
||||||
|
|
||||||
|
# Keep container running
|
||||||
|
wait
|
||||||
25
npmextra.json
Normal file
25
npmextra.json
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
{
|
||||||
|
"npmci": {
|
||||||
|
"npmGlobalTools": [],
|
||||||
|
"npmAccessLevel": "public"
|
||||||
|
},
|
||||||
|
"gitzone": {
|
||||||
|
"projectType": "docker",
|
||||||
|
"module": {
|
||||||
|
"githost": "code.foss.global",
|
||||||
|
"gitscope": "host.today",
|
||||||
|
"gitrepo": "ht-docker-ai",
|
||||||
|
"description": "Docker images for AI vision-language models including MiniCPM-V 4.5",
|
||||||
|
"npmPackagename": "@host.today/ht-docker-ai",
|
||||||
|
"license": "MIT",
|
||||||
|
"keywords": [
|
||||||
|
"docker",
|
||||||
|
"ai",
|
||||||
|
"vision-language",
|
||||||
|
"minicpm",
|
||||||
|
"ollama",
|
||||||
|
"multimodal"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
27
package.json
Normal file
27
package.json
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
{
|
||||||
|
"name": "@host.today/ht-docker-ai",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"private": false,
|
||||||
|
"description": "Docker images for AI vision-language models including MiniCPM-V 4.5",
|
||||||
|
"main": "dist_ts/index.js",
|
||||||
|
"typings": "dist_ts/index.d.ts",
|
||||||
|
"author": "Task Venture Capital GmbH <hello@task.vc>",
|
||||||
|
"license": "MIT",
|
||||||
|
"scripts": {
|
||||||
|
"build": "./build-images.sh",
|
||||||
|
"test": "./test-images.sh"
|
||||||
|
},
|
||||||
|
"repository": {
|
||||||
|
"type": "git",
|
||||||
|
"url": "https://code.foss.global/host.today/ht-docker-ai.git"
|
||||||
|
},
|
||||||
|
"homepage": "https://code.foss.global/host.today/ht-docker-ai",
|
||||||
|
"keywords": [
|
||||||
|
"docker",
|
||||||
|
"ai",
|
||||||
|
"vision-language",
|
||||||
|
"minicpm",
|
||||||
|
"ollama",
|
||||||
|
"multimodal"
|
||||||
|
]
|
||||||
|
}
|
||||||
125
readme.hints.md
Normal file
125
readme.hints.md
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
# Technical Notes - ht-docker-ai
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
This project uses **Ollama** as the runtime framework for serving AI models. This provides:
|
||||||
|
|
||||||
|
- Automatic model download and caching
|
||||||
|
- Unified REST API (compatible with OpenAI format)
|
||||||
|
- Built-in quantization support
|
||||||
|
- GPU/CPU auto-detection
|
||||||
|
|
||||||
|
## Model Details
|
||||||
|
|
||||||
|
### MiniCPM-V 4.5
|
||||||
|
|
||||||
|
- **Source**: OpenBMB (https://github.com/OpenBMB/MiniCPM-V)
|
||||||
|
- **Base Models**: Qwen3-8B + SigLIP2-400M
|
||||||
|
- **Total Parameters**: 8B
|
||||||
|
- **Ollama Model Name**: `minicpm-v`
|
||||||
|
|
||||||
|
### VRAM Usage
|
||||||
|
|
||||||
|
| Mode | VRAM Required |
|
||||||
|
|------|---------------|
|
||||||
|
| Full precision (bf16) | 18GB |
|
||||||
|
| int4 quantized | 9GB |
|
||||||
|
| GGUF (CPU) | 8GB RAM |
|
||||||
|
|
||||||
|
## Container Startup Flow
|
||||||
|
|
||||||
|
1. `docker-entrypoint.sh` starts Ollama server in background
|
||||||
|
2. Waits for server to be ready
|
||||||
|
3. Checks if model already exists in volume
|
||||||
|
4. Pulls model if not present
|
||||||
|
5. Keeps container running
|
||||||
|
|
||||||
|
## Volume Persistence
|
||||||
|
|
||||||
|
Mount `/root/.ollama` to persist downloaded models:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
-v ollama-data:/root/.ollama
|
||||||
|
```
|
||||||
|
|
||||||
|
Without this volume, the model will be re-downloaded on each container start (~5GB download).
|
||||||
|
|
||||||
|
## API Endpoints
|
||||||
|
|
||||||
|
All endpoints follow the Ollama API specification:
|
||||||
|
|
||||||
|
| Endpoint | Method | Description |
|
||||||
|
|----------|--------|-------------|
|
||||||
|
| `/api/tags` | GET | List available models |
|
||||||
|
| `/api/generate` | POST | Generate completion |
|
||||||
|
| `/api/chat` | POST | Chat completion |
|
||||||
|
| `/api/pull` | POST | Pull a model |
|
||||||
|
| `/api/show` | POST | Show model info |
|
||||||
|
|
||||||
|
## GPU Detection
|
||||||
|
|
||||||
|
The GPU variant uses Ollama's automatic GPU detection. For CPU-only mode, we set:
|
||||||
|
|
||||||
|
```dockerfile
|
||||||
|
ENV CUDA_VISIBLE_DEVICES=""
|
||||||
|
```
|
||||||
|
|
||||||
|
This forces Ollama to use CPU inference even if GPU is available.
|
||||||
|
|
||||||
|
## Health Checks
|
||||||
|
|
||||||
|
Both variants include Docker health checks:
|
||||||
|
|
||||||
|
```dockerfile
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
||||||
|
CMD curl -f http://localhost:11434/api/tags || exit 1
|
||||||
|
```
|
||||||
|
|
||||||
|
CPU variant has longer `start-period` (120s) due to slower startup.
|
||||||
|
|
||||||
|
## Adding New Models
|
||||||
|
|
||||||
|
To add a new model variant:
|
||||||
|
|
||||||
|
1. Create `Dockerfile_<modelname>`
|
||||||
|
2. Set `MODEL_NAME` environment variable
|
||||||
|
3. Update `build-images.sh` with new build target
|
||||||
|
4. Add documentation to `readme.md`
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Model download hangs
|
||||||
|
|
||||||
|
Check container logs:
|
||||||
|
```bash
|
||||||
|
docker logs -f <container-name>
|
||||||
|
```
|
||||||
|
|
||||||
|
The model download is ~5GB and may take several minutes.
|
||||||
|
|
||||||
|
### Out of memory
|
||||||
|
|
||||||
|
- GPU: Use int4 quantized version or add more VRAM
|
||||||
|
- CPU: Increase container memory limit: `--memory=16g`
|
||||||
|
|
||||||
|
### API not responding
|
||||||
|
|
||||||
|
1. Check if container is healthy: `docker ps`
|
||||||
|
2. Check logs for errors: `docker logs <container>`
|
||||||
|
3. Verify port mapping: `curl localhost:11434/api/tags`
|
||||||
|
|
||||||
|
## CI/CD Integration
|
||||||
|
|
||||||
|
Build and push using npmci:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npmci docker login
|
||||||
|
npmci docker build
|
||||||
|
npmci docker push code.foss.global
|
||||||
|
```
|
||||||
|
|
||||||
|
## Related Resources
|
||||||
|
|
||||||
|
- [Ollama Documentation](https://ollama.ai/docs)
|
||||||
|
- [MiniCPM-V GitHub](https://github.com/OpenBMB/MiniCPM-V)
|
||||||
|
- [Ollama API Reference](https://github.com/ollama/ollama/blob/main/docs/api.md)
|
||||||
147
readme.md
Normal file
147
readme.md
Normal file
@@ -0,0 +1,147 @@
|
|||||||
|
# @host.today/ht-docker-ai
|
||||||
|
|
||||||
|
Docker images for AI vision-language models, starting with MiniCPM-V 4.5.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This project provides ready-to-use Docker containers for running state-of-the-art AI vision-language models. Built on Ollama for simplified model management and a consistent REST API.
|
||||||
|
|
||||||
|
## Available Images
|
||||||
|
|
||||||
|
| Tag | Description | Requirements |
|
||||||
|
|-----|-------------|--------------|
|
||||||
|
| `minicpm45v` | MiniCPM-V 4.5 with GPU support | NVIDIA GPU, 9-18GB VRAM |
|
||||||
|
| `minicpm45v-cpu` | MiniCPM-V 4.5 CPU-only | 8GB+ RAM |
|
||||||
|
| `latest` | Alias for `minicpm45v` | NVIDIA GPU |
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
### GPU (Recommended)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run -d \
|
||||||
|
--name minicpm \
|
||||||
|
--gpus all \
|
||||||
|
-p 11434:11434 \
|
||||||
|
-v ollama-data:/root/.ollama \
|
||||||
|
code.foss.global/host.today/ht-docker-ai:minicpm45v
|
||||||
|
```
|
||||||
|
|
||||||
|
### CPU Only
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run -d \
|
||||||
|
--name minicpm \
|
||||||
|
-p 11434:11434 \
|
||||||
|
-v ollama-data:/root/.ollama \
|
||||||
|
code.foss.global/host.today/ht-docker-ai:minicpm45v-cpu
|
||||||
|
```
|
||||||
|
|
||||||
|
## API Usage
|
||||||
|
|
||||||
|
The container exposes the Ollama API on port 11434.
|
||||||
|
|
||||||
|
### List Available Models
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl http://localhost:11434/api/tags
|
||||||
|
```
|
||||||
|
|
||||||
|
### Generate Text from Image
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl http://localhost:11434/api/generate -d '{
|
||||||
|
"model": "minicpm-v",
|
||||||
|
"prompt": "What do you see in this image?",
|
||||||
|
"images": ["<base64-encoded-image>"]
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
### Chat with Vision
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl http://localhost:11434/api/chat -d '{
|
||||||
|
"model": "minicpm-v",
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "Describe this image in detail",
|
||||||
|
"images": ["<base64-encoded-image>"]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
## Environment Variables
|
||||||
|
|
||||||
|
| Variable | Default | Description |
|
||||||
|
|----------|---------|-------------|
|
||||||
|
| `MODEL_NAME` | `minicpm-v` | Model to pull on startup |
|
||||||
|
| `OLLAMA_HOST` | `0.0.0.0` | Host address for API |
|
||||||
|
| `OLLAMA_ORIGINS` | `*` | Allowed CORS origins |
|
||||||
|
|
||||||
|
## Hardware Requirements
|
||||||
|
|
||||||
|
### GPU Variant (`minicpm45v`)
|
||||||
|
|
||||||
|
- NVIDIA GPU with CUDA support
|
||||||
|
- Minimum 9GB VRAM (int4 quantized)
|
||||||
|
- Recommended 18GB VRAM (full precision)
|
||||||
|
- NVIDIA Container Toolkit installed
|
||||||
|
|
||||||
|
### CPU Variant (`minicpm45v-cpu`)
|
||||||
|
|
||||||
|
- Minimum 8GB RAM
|
||||||
|
- Recommended 16GB+ RAM for better performance
|
||||||
|
- No GPU required
|
||||||
|
|
||||||
|
## Model Information
|
||||||
|
|
||||||
|
**MiniCPM-V 4.5** is a GPT-4o level multimodal large language model developed by OpenBMB.
|
||||||
|
|
||||||
|
- **Parameters**: 8B (Qwen3-8B + SigLIP2-400M)
|
||||||
|
- **Capabilities**: Image understanding, OCR, multi-image analysis
|
||||||
|
- **Languages**: 30+ languages including English, Chinese, French, Spanish
|
||||||
|
|
||||||
|
## Docker Compose Example
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
version: '3.8'
|
||||||
|
services:
|
||||||
|
minicpm:
|
||||||
|
image: code.foss.global/host.today/ht-docker-ai:minicpm45v
|
||||||
|
container_name: minicpm
|
||||||
|
ports:
|
||||||
|
- "11434:11434"
|
||||||
|
volumes:
|
||||||
|
- ollama-data:/root/.ollama
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
reservations:
|
||||||
|
devices:
|
||||||
|
- driver: nvidia
|
||||||
|
count: 1
|
||||||
|
capabilities: [gpu]
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
ollama-data:
|
||||||
|
```
|
||||||
|
|
||||||
|
## Building Locally
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Clone the repository
|
||||||
|
git clone https://code.foss.global/host.today/ht-docker-ai.git
|
||||||
|
cd ht-docker-ai
|
||||||
|
|
||||||
|
# Build all images
|
||||||
|
./build-images.sh
|
||||||
|
|
||||||
|
# Run tests
|
||||||
|
./test-images.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
MIT - Task Venture Capital GmbH
|
||||||
72
test-images.sh
Executable file
72
test-images.sh
Executable file
@@ -0,0 +1,72 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
REGISTRY="code.foss.global"
|
||||||
|
NAMESPACE="host.today"
|
||||||
|
IMAGE_NAME="ht-docker-ai"
|
||||||
|
TEST_PORT=11434
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
RED='\033[0;31m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m'
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
echo -e "${BLUE}Cleaning up test containers...${NC}"
|
||||||
|
docker rm -f test-minicpm-gpu 2>/dev/null || true
|
||||||
|
docker rm -f test-minicpm-cpu 2>/dev/null || true
|
||||||
|
}
|
||||||
|
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
test_image() {
|
||||||
|
local tag=$1
|
||||||
|
local container_name=$2
|
||||||
|
local extra_args=$3
|
||||||
|
|
||||||
|
echo -e "${BLUE}Testing ${tag}...${NC}"
|
||||||
|
|
||||||
|
# Start container
|
||||||
|
docker run -d \
|
||||||
|
--name ${container_name} \
|
||||||
|
-p ${TEST_PORT}:11434 \
|
||||||
|
${extra_args} \
|
||||||
|
${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:${tag}
|
||||||
|
|
||||||
|
# Wait for startup
|
||||||
|
echo "Waiting for container to start..."
|
||||||
|
sleep 10
|
||||||
|
|
||||||
|
# Test API endpoint
|
||||||
|
echo "Testing API endpoint..."
|
||||||
|
if curl -s -f http://localhost:${TEST_PORT}/api/tags > /dev/null; then
|
||||||
|
echo -e "${GREEN}API endpoint responding!${NC}"
|
||||||
|
else
|
||||||
|
echo -e "${RED}API endpoint not responding!${NC}"
|
||||||
|
docker logs ${container_name}
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Cleanup this container
|
||||||
|
docker rm -f ${container_name}
|
||||||
|
|
||||||
|
echo -e "${GREEN}${tag} test passed!${NC}"
|
||||||
|
echo ""
|
||||||
|
}
|
||||||
|
|
||||||
|
echo -e "${BLUE}=== Testing ht-docker-ai images ===${NC}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Test CPU variant (doesn't require GPU)
|
||||||
|
test_image "minicpm45v-cpu" "test-minicpm-cpu" ""
|
||||||
|
|
||||||
|
# Test GPU variant only if NVIDIA runtime is available
|
||||||
|
if docker info 2>/dev/null | grep -q "nvidia"; then
|
||||||
|
test_image "minicpm45v" "test-minicpm-gpu" "--gpus all"
|
||||||
|
else
|
||||||
|
echo -e "${BLUE}Skipping GPU test (NVIDIA runtime not available)${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "${GREEN}=== All tests passed! ===${NC}"
|
||||||
Reference in New Issue
Block a user