#!/bin/bash # Generic Services Manager # Manages MongoDB and S3/MinIO containers for any project # Color codes for output RED='\033[0;31m' GREEN='\033[0;32m' YELLOW='\033[1;33m' BLUE='\033[0;34m' MAGENTA='\033[0;35m' CYAN='\033[0;36m' NC='\033[0m' # No Color # Function to print colored messages print_message() { echo -e "${2}${1}${NC}" } # Function to print header print_header() { echo print_message "═══════════════════════════════════════════════════════════════" "$CYAN" print_message " $1" "$CYAN" print_message "═══════════════════════════════════════════════════════════════" "$CYAN" echo } # Check Docker check_docker() { if ! command -v docker &> /dev/null; then print_message "Error: Docker is not installed. Please install Docker first." "$RED" exit 1 fi } # Get project name from package.json or directory get_project_name() { local name="" if [ -f "package.json" ]; then name=$(grep '"name"' package.json | head -1 | cut -d'"' -f4) # Sanitize: @fin.cx/skr → fin-cx-skr echo "$name" | sed 's/@//g' | sed 's/[\/\.]/-/g' else basename "$(pwd)" fi } # Generate random available port between 20000-30000 get_random_port() { local port local max_attempts=100 local attempts=0 while [ $attempts -lt $max_attempts ]; do port=$((RANDOM % 10001 + 20000)) # Check if port is available if ! lsof -i:$port >/dev/null 2>&1 && ! nc -z localhost $port 2>/dev/null; then echo $port return 0 fi attempts=$((attempts + 1)) done # Fallback to finding any available port print_message "Warning: Could not find random port, using system-assigned port" "$YELLOW" echo "0" } # Add missing field to JSON file add_json_field() { local file=$1 local key=$2 local value=$3 if ! grep -q "\"$key\"" "$file" 2>/dev/null; then # Add the field before the last closing brace local temp_file="${file}.tmp" # Remove last } head -n -1 "$file" > "$temp_file" # Add comma if needed (check if last line ends with }) local last_line=$(tail -n 1 "$temp_file") if [[ ! "$last_line" =~ ^[[:space:]]*$ ]] && [[ ! "$last_line" =~ ,$ ]]; then echo "," >> "$temp_file" fi # Add new field and closing brace echo " \"$key\": \"$value\"" >> "$temp_file" echo "}" >> "$temp_file" mv "$temp_file" "$file" return 0 # Field was added fi return 1 # Field already exists } # Update or create env.json with defaults update_or_create_env_json() { mkdir -p .nogit local project_name=$(get_project_name) local changes_made=false local fields_added="" if [ -f ".nogit/env.json" ]; then print_message "📋 Checking .nogit/env.json for missing values..." "$CYAN" # Check and add missing fields if add_json_field ".nogit/env.json" "PROJECT_NAME" "$project_name"; then fields_added="${fields_added}PROJECT_NAME, " changes_made=true fi if add_json_field ".nogit/env.json" "MONGODB_HOST" "localhost"; then fields_added="${fields_added}MONGODB_HOST, " changes_made=true fi if add_json_field ".nogit/env.json" "MONGODB_NAME" "$project_name"; then fields_added="${fields_added}MONGODB_NAME, " changes_made=true fi if ! grep -q "\"MONGODB_PORT\"" ".nogit/env.json" 2>/dev/null; then local mongo_port=$(get_random_port) add_json_field ".nogit/env.json" "MONGODB_PORT" "$mongo_port" fields_added="${fields_added}MONGODB_PORT($mongo_port), " changes_made=true fi if add_json_field ".nogit/env.json" "MONGODB_USER" "defaultadmin"; then fields_added="${fields_added}MONGODB_USER, " changes_made=true fi if add_json_field ".nogit/env.json" "MONGODB_PASS" "defaultpass"; then fields_added="${fields_added}MONGODB_PASS, " changes_made=true fi if add_json_field ".nogit/env.json" "S3_HOST" "localhost"; then fields_added="${fields_added}S3_HOST, " changes_made=true fi if ! grep -q "\"S3_PORT\"" ".nogit/env.json" 2>/dev/null; then local s3_port=$(get_random_port) add_json_field ".nogit/env.json" "S3_PORT" "$s3_port" fields_added="${fields_added}S3_PORT($s3_port), " changes_made=true fi # Get S3_PORT for console port calculation local s3_port_value=$(grep '"S3_PORT"' .nogit/env.json | cut -d'"' -f4) if [ ! -z "$s3_port_value" ] && ! grep -q "\"S3_CONSOLE_PORT\"" ".nogit/env.json" 2>/dev/null; then local console_port=$((s3_port_value + 1)) # Check if console port is available while lsof -i:$console_port >/dev/null 2>&1 || nc -z localhost $console_port 2>/dev/null; do console_port=$((console_port + 1)) done add_json_field ".nogit/env.json" "S3_CONSOLE_PORT" "$console_port" fields_added="${fields_added}S3_CONSOLE_PORT($console_port), " changes_made=true fi if add_json_field ".nogit/env.json" "S3_USER" "defaultadmin"; then fields_added="${fields_added}S3_USER, " changes_made=true fi if add_json_field ".nogit/env.json" "S3_PASS" "defaultpass"; then fields_added="${fields_added}S3_PASS, " changes_made=true fi if add_json_field ".nogit/env.json" "S3_BUCKET" "${project_name}-documents"; then fields_added="${fields_added}S3_BUCKET, " changes_made=true fi if [ "$changes_made" = true ]; then # Remove trailing comma and space fields_added=${fields_added%, } print_message "✅ Added missing fields: $fields_added" "$GREEN" else print_message "✅ Configuration complete" "$GREEN" fi else # Create new env.json with random ports print_message "📋 Creating .nogit/env.json with default values..." "$YELLOW" local mongo_port=$(get_random_port) local s3_port=$(get_random_port) local s3_console_port=$((s3_port + 1)) # Make sure console port is also available while lsof -i:$s3_console_port >/dev/null 2>&1 || nc -z localhost $s3_console_port 2>/dev/null; do s3_console_port=$((s3_console_port + 1)) done cat > .nogit/env.json </dev/null | cut -d'"' -f4) MONGODB_HOST=$(grep -o '"MONGODB_HOST"[[:space:]]*:[[:space:]]*"[^"]*"' .nogit/env.json 2>/dev/null | cut -d'"' -f4) MONGODB_NAME=$(grep -o '"MONGODB_NAME"[[:space:]]*:[[:space:]]*"[^"]*"' .nogit/env.json 2>/dev/null | cut -d'"' -f4) MONGODB_PORT=$(grep -o '"MONGODB_PORT"[[:space:]]*:[[:space:]]*"[^"]*"' .nogit/env.json 2>/dev/null | cut -d'"' -f4) MONGODB_USER=$(grep -o '"MONGODB_USER"[[:space:]]*:[[:space:]]*"[^"]*"' .nogit/env.json 2>/dev/null | cut -d'"' -f4) MONGODB_PASS=$(grep -o '"MONGODB_PASS"[[:space:]]*:[[:space:]]*"[^"]*"' .nogit/env.json 2>/dev/null | cut -d'"' -f4) S3_HOST=$(grep -o '"S3_HOST"[[:space:]]*:[[:space:]]*"[^"]*"' .nogit/env.json 2>/dev/null | cut -d'"' -f4) S3_PORT=$(grep -o '"S3_PORT"[[:space:]]*:[[:space:]]*"[^"]*"' .nogit/env.json 2>/dev/null | cut -d'"' -f4) S3_CONSOLE_PORT=$(grep -o '"S3_CONSOLE_PORT"[[:space:]]*:[[:space:]]*"[^"]*"' .nogit/env.json 2>/dev/null | cut -d'"' -f4) S3_USER=$(grep -o '"S3_USER"[[:space:]]*:[[:space:]]*"[^"]*"' .nogit/env.json 2>/dev/null | cut -d'"' -f4) S3_PASS=$(grep -o '"S3_PASS"[[:space:]]*:[[:space:]]*"[^"]*"' .nogit/env.json 2>/dev/null | cut -d'"' -f4) S3_BUCKET=$(grep -o '"S3_BUCKET"[[:space:]]*:[[:space:]]*"[^"]*"' .nogit/env.json 2>/dev/null | cut -d'"' -f4) fi # Fallback to defaults if any value is missing (shouldn't happen after update_or_create_env_json) PROJECT_NAME=${PROJECT_NAME:-$(get_project_name)} MONGODB_HOST=${MONGODB_HOST:-"localhost"} MONGODB_NAME=${MONGODB_NAME:-"$PROJECT_NAME"} MONGODB_PORT=${MONGODB_PORT:-"27017"} MONGODB_USER=${MONGODB_USER:-"defaultadmin"} MONGODB_PASS=${MONGODB_PASS:-"defaultpass"} S3_HOST=${S3_HOST:-"localhost"} S3_PORT=${S3_PORT:-"9000"} S3_CONSOLE_PORT=${S3_CONSOLE_PORT:-"9001"} S3_USER=${S3_USER:-"defaultadmin"} S3_PASS=${S3_PASS:-"defaultpass"} S3_BUCKET=${S3_BUCKET:-"${PROJECT_NAME}-documents"} # Container names (project-specific to avoid conflicts) MONGO_CONTAINER="${PROJECT_NAME}-mongodb" MINIO_CONTAINER="${PROJECT_NAME}-minio" # Data directories MONGO_DATA_DIR="$(pwd)/.nogit/mongodata" MINIO_DATA_DIR="$(pwd)/.nogit/miniodata" print_message "📋 Project: $PROJECT_NAME" "$MAGENTA" } # Show current configuration show_config() { print_header "Current Configuration" print_message "Project: $PROJECT_NAME" "$MAGENTA" echo print_message "MongoDB:" "$YELLOW" print_message " Host: $MONGODB_HOST:$MONGODB_PORT" "$NC" print_message " Database: $MONGODB_NAME" "$NC" print_message " User: $MONGODB_USER" "$NC" print_message " Password: ***" "$NC" print_message " Container: $MONGO_CONTAINER" "$NC" print_message " Data: $MONGO_DATA_DIR" "$NC" print_message " Connection: mongodb://$MONGODB_USER:***@$MONGODB_HOST:$MONGODB_PORT/$MONGODB_NAME" "$BLUE" echo print_message "S3/MinIO:" "$YELLOW" print_message " Host: $S3_HOST" "$NC" print_message " API Port: $S3_PORT" "$NC" print_message " Console Port: $S3_CONSOLE_PORT" "$NC" print_message " User: $S3_USER" "$NC" print_message " Password: ***" "$NC" print_message " Bucket: $S3_BUCKET" "$NC" print_message " Container: $MINIO_CONTAINER" "$NC" print_message " Data: $MINIO_DATA_DIR" "$NC" print_message " API URL: http://$S3_HOST:$S3_PORT" "$BLUE" print_message " Console URL: http://$S3_HOST:$S3_CONSOLE_PORT" "$BLUE" } # Check container status check_status() { local container=$1 if docker ps --format '{{.Names}}' | grep -q "^${container}$"; then echo "running" elif docker ps -a --format '{{.Names}}' | grep -q "^${container}$"; then echo "stopped" else echo "not_exists" fi } # Start MongoDB start_mongodb() { print_message "📦 MongoDB:" "$YELLOW" # Create data directory if needed [ ! -d "$MONGO_DATA_DIR" ] && mkdir -p "$MONGO_DATA_DIR" local status=$(check_status "$MONGO_CONTAINER") case $status in "running") print_message " Already running ✓" "$GREEN" ;; "stopped") docker start "$MONGO_CONTAINER" > /dev/null print_message " Started ✓" "$GREEN" ;; "not_exists") print_message " Creating container..." "$YELLOW" docker run -d \ --name "$MONGO_CONTAINER" \ -p "0.0.0.0:${MONGODB_PORT}:27017" \ -v "$MONGO_DATA_DIR:/data/db" \ -e MONGO_INITDB_ROOT_USERNAME="$MONGODB_USER" \ -e MONGO_INITDB_ROOT_PASSWORD="$MONGODB_PASS" \ -e MONGO_INITDB_DATABASE="$MONGODB_NAME" \ --restart unless-stopped \ mongo:7.0 > /dev/null print_message " Created and started ✓" "$GREEN" ;; esac print_message " Container: $MONGO_CONTAINER" "$CYAN" print_message " Port: $MONGODB_PORT" "$CYAN" print_message " Connection: mongodb://$MONGODB_USER:$MONGODB_PASS@$MONGODB_HOST:$MONGODB_PORT/$MONGODB_NAME?authSource=admin" "$BLUE" } # Start MinIO start_minio() { print_message "📦 S3/MinIO:" "$YELLOW" # Create data directory if needed [ ! -d "$MINIO_DATA_DIR" ] && mkdir -p "$MINIO_DATA_DIR" local status=$(check_status "$MINIO_CONTAINER") case $status in "running") print_message " Already running ✓" "$GREEN" ;; "stopped") docker start "$MINIO_CONTAINER" > /dev/null print_message " Started ✓" "$GREEN" ;; "not_exists") print_message " Creating container..." "$YELLOW" docker run -d \ --name "$MINIO_CONTAINER" \ -p "${S3_PORT}:9000" \ -p "${S3_CONSOLE_PORT}:9001" \ -v "$MINIO_DATA_DIR:/data" \ -e MINIO_ROOT_USER="$S3_USER" \ -e MINIO_ROOT_PASSWORD="$S3_PASS" \ --restart unless-stopped \ minio/minio server /data --console-address ":9001" > /dev/null # Wait for MinIO to start and create default bucket sleep 3 docker exec "$MINIO_CONTAINER" mc alias set local http://localhost:9000 "$S3_USER" "$S3_PASS" 2>/dev/null docker exec "$MINIO_CONTAINER" mc mb "local/$S3_BUCKET" 2>/dev/null || true print_message " Created and started ✓" "$GREEN" print_message " Bucket '$S3_BUCKET' created ✓" "$GREEN" ;; esac print_message " Container: $MINIO_CONTAINER" "$CYAN" print_message " Port: $S3_PORT" "$CYAN" print_message " Bucket: $S3_BUCKET" "$CYAN" print_message " API: http://$S3_HOST:$S3_PORT" "$BLUE" print_message " Console: http://$S3_HOST:$S3_CONSOLE_PORT (login: $S3_USER/***)" "$BLUE" } # Stop MongoDB stop_mongodb() { print_message "📦 MongoDB:" "$YELLOW" local status=$(check_status "$MONGO_CONTAINER") if [ "$status" = "running" ]; then docker stop "$MONGO_CONTAINER" > /dev/null print_message " Stopped ✓" "$GREEN" else print_message " Not running" "$YELLOW" fi } # Stop MinIO stop_minio() { print_message "📦 S3/MinIO:" "$YELLOW" local status=$(check_status "$MINIO_CONTAINER") if [ "$status" = "running" ]; then docker stop "$MINIO_CONTAINER" > /dev/null print_message " Stopped ✓" "$GREEN" else print_message " Not running" "$YELLOW" fi } # Remove containers remove_containers() { local removed=false if docker ps -a --format '{{.Names}}' | grep -q "^${MONGO_CONTAINER}$"; then docker rm -f "$MONGO_CONTAINER" > /dev/null 2>&1 print_message " MongoDB container removed ✓" "$GREEN" removed=true fi if docker ps -a --format '{{.Names}}' | grep -q "^${MINIO_CONTAINER}$"; then docker rm -f "$MINIO_CONTAINER" > /dev/null 2>&1 print_message " S3/MinIO container removed ✓" "$GREEN" removed=true fi if [ "$removed" = false ]; then print_message " No containers to remove" "$YELLOW" fi } # Clean data clean_data() { local cleaned=false if [ -d "$MONGO_DATA_DIR" ]; then rm -rf "$MONGO_DATA_DIR" print_message " MongoDB data removed ✓" "$GREEN" cleaned=true fi if [ -d "$MINIO_DATA_DIR" ]; then rm -rf "$MINIO_DATA_DIR" print_message " S3/MinIO data removed ✓" "$GREEN" cleaned=true fi if [ "$cleaned" = false ]; then print_message " No data to clean" "$YELLOW" fi } # Show status show_status() { print_header "Service Status" print_message "Project: $PROJECT_NAME" "$MAGENTA" echo # MongoDB status local mongo_status=$(check_status "$MONGO_CONTAINER") case $mongo_status in "running") print_message "📦 MongoDB: 🟢 Running" "$GREEN" print_message " ├─ Container: $MONGO_CONTAINER" "$CYAN" print_message " └─ mongodb://$MONGODB_USER:***@$MONGODB_HOST:$MONGODB_PORT/$MONGODB_NAME" "$CYAN" ;; "stopped") print_message "📦 MongoDB: 🟡 Stopped" "$YELLOW" print_message " └─ Container: $MONGO_CONTAINER" "$CYAN" ;; "not_exists") print_message "📦 MongoDB: ⚪ Not installed" "$MAGENTA" ;; esac # MinIO status local minio_status=$(check_status "$MINIO_CONTAINER") case $minio_status in "running") print_message "📦 S3/MinIO: 🟢 Running" "$GREEN" print_message " ├─ Container: $MINIO_CONTAINER" "$CYAN" print_message " ├─ API: http://$S3_HOST:$S3_PORT" "$CYAN" print_message " ├─ Console: http://$S3_HOST:$S3_CONSOLE_PORT" "$CYAN" print_message " └─ Bucket: $S3_BUCKET" "$CYAN" ;; "stopped") print_message "📦 S3/MinIO: 🟡 Stopped" "$YELLOW" print_message " └─ Container: $MINIO_CONTAINER" "$CYAN" ;; "not_exists") print_message "📦 S3/MinIO: ⚪ Not installed" "$MAGENTA" ;; esac } # Show logs show_logs() { local service=$1 local lines=${2:-20} case $service in "mongo"|"mongodb") if docker ps --format '{{.Names}}' | grep -q "^${MONGO_CONTAINER}$"; then print_header "MongoDB Logs (last $lines lines)" docker logs --tail "$lines" "$MONGO_CONTAINER" else print_message "MongoDB container is not running" "$YELLOW" fi ;; "minio"|"s3") if docker ps --format '{{.Names}}' | grep -q "^${MINIO_CONTAINER}$"; then print_header "S3/MinIO Logs (last $lines lines)" docker logs --tail "$lines" "$MINIO_CONTAINER" else print_message "S3/MinIO container is not running" "$YELLOW" fi ;; "all"|"") show_logs "mongo" "$lines" echo show_logs "minio" "$lines" ;; *) print_message "Usage: $0 logs [mongo|s3|all] [lines]" "$YELLOW" ;; esac } # Main menu show_help() { print_header "Generic Services Manager" print_message "Usage: $0 [command] [options]" "$GREEN" echo print_message "Commands:" "$YELLOW" print_message " start [service] Start services (mongo|s3|all)" "$NC" print_message " stop [service] Stop services (mongo|s3|all)" "$NC" print_message " restart [service] Restart services (mongo|s3|all)" "$NC" print_message " status Show service status" "$NC" print_message " config Show current configuration" "$NC" print_message " logs [service] Show logs (mongo|s3|all) [lines]" "$NC" print_message " remove Remove all containers" "$NC" print_message " clean Remove all containers and data ⚠️" "$NC" print_message " help Show this help message" "$NC" echo print_message "Features:" "$YELLOW" print_message " • Auto-creates .nogit/env.json with smart defaults" "$NC" print_message " • Random ports (20000-30000) to avoid conflicts" "$NC" print_message " • Project-specific containers for multi-project support" "$NC" print_message " • Preserves custom configuration values" "$NC" echo print_message "Examples:" "$YELLOW" print_message " $0 start # Start all services" "$NC" print_message " $0 start mongo # Start only MongoDB" "$NC" print_message " $0 stop # Stop all services" "$NC" print_message " $0 status # Check service status" "$NC" print_message " $0 config # Show configuration" "$NC" print_message " $0 logs mongo 50 # Show last 50 lines of MongoDB logs" "$NC" } # Main script check_docker load_config case ${1:-help} in start) print_header "Starting Services" case ${2:-all} in mongo|mongodb) start_mongodb ;; minio|s3) start_minio ;; all|"") start_mongodb echo start_minio ;; *) print_message "Unknown service: $2" "$RED" print_message "Use: mongo, s3, or all" "$YELLOW" ;; esac ;; stop) print_header "Stopping Services" case ${2:-all} in mongo|mongodb) stop_mongodb ;; minio|s3) stop_minio ;; all|"") stop_mongodb echo stop_minio ;; *) print_message "Unknown service: $2" "$RED" print_message "Use: mongo, s3, or all" "$YELLOW" ;; esac ;; restart) print_header "Restarting Services" case ${2:-all} in mongo|mongodb) stop_mongodb sleep 2 start_mongodb ;; minio|s3) stop_minio sleep 2 start_minio ;; all|"") stop_mongodb stop_minio sleep 2 start_mongodb echo start_minio ;; *) print_message "Unknown service: $2" "$RED" ;; esac ;; status) show_status ;; config) show_config ;; logs) show_logs "${2:-all}" "${3:-20}" ;; remove) print_header "Removing Containers" print_message "⚠️ This will remove containers but preserve data" "$YELLOW" read -p "Continue? (y/N): " -n 1 -r echo if [[ $REPLY =~ ^[Yy]$ ]]; then remove_containers else print_message "Cancelled" "$YELLOW" fi ;; clean) print_header "Clean All" print_message "⚠️ WARNING: This will remove all containers and data!" "$RED" print_message "This action cannot be undone!" "$RED" read -p "Are you sure? Type 'yes' to confirm: " -r if [ "$REPLY" = "yes" ]; then remove_containers echo clean_data print_message "All cleaned ✓" "$GREEN" else print_message "Cancelled" "$YELLOW" fi ;; help|--help|-h) show_help ;; *) print_message "Unknown command: $1" "$RED" show_help exit 1 ;; esac echo