Files
ht-docker-ai/image_support_files/docker-entrypoint.sh
2026-01-16 01:51:57 +00:00

29 lines
644 B
Bash

#!/bin/bash
set -e
# Default model to pull
MODEL_NAME="${MODEL_NAME:-minicpm-v}"
# Start Ollama server in background
echo "Starting Ollama server..."
ollama serve &
# Wait for Ollama to be ready
echo "Waiting for Ollama server to start..."
sleep 5
# Check if model is already pulled
if ! ollama list | grep -q "${MODEL_NAME}"; then
echo "Pulling model: ${MODEL_NAME}..."
ollama pull "${MODEL_NAME}"
echo "Model ${MODEL_NAME} pulled successfully."
else
echo "Model ${MODEL_NAME} already available."
fi
echo "Ollama server ready with ${MODEL_NAME}"
echo "API available at http://0.0.0.0:11434"
# Keep container running
wait