feat(docker-images): add vLLM-based Nanonets-OCR2-3B image, Qwen3-VL Ollama image and refactor build/docs/tests to use new runtime/layout
This commit is contained in:
25
Dockerfile_minicpm45v_ollama_gpu_VRAM9GB
Normal file
25
Dockerfile_minicpm45v_ollama_gpu_VRAM9GB
Normal file
@@ -0,0 +1,25 @@
|
||||
# MiniCPM-V 4.5 GPU Variant
|
||||
# Vision-Language Model with NVIDIA GPU support
|
||||
FROM ollama/ollama:latest
|
||||
|
||||
LABEL maintainer="Task Venture Capital GmbH <hello@task.vc>"
|
||||
LABEL description="MiniCPM-V 4.5 Vision-Language Model - GPU optimized"
|
||||
LABEL org.opencontainers.image.source="https://code.foss.global/host.today/ht-docker-ai"
|
||||
|
||||
# Environment configuration
|
||||
ENV MODEL_NAME="minicpm-v"
|
||||
ENV OLLAMA_HOST="0.0.0.0"
|
||||
ENV OLLAMA_ORIGINS="*"
|
||||
|
||||
# Copy and setup entrypoint
|
||||
COPY image_support_files/minicpm45v_entrypoint.sh /usr/local/bin/docker-entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
|
||||
|
||||
# Expose Ollama API port
|
||||
EXPOSE 11434
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
||||
CMD curl -f http://localhost:11434/api/tags || exit 1
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
|
||||
Reference in New Issue
Block a user