Created
July 10, 2025 00:05
-
-
Save casjay/9f483a38e27840d4f7074383ff6086a7 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/usr/bin/env bash | |
| # π£οΈ setup-stack.sh | |
| # Fully automated, GPU-aware AI stack installer. | |
| # Includes: Ollama, LocalAI, OpenWebUI, OpenDiffusion + VSCode LSP, unified config, stateful tracking, port resolution, bind mounts. | |
| set -euo pipefail | |
| # ========================= | |
| # π Directory + Variables | |
| # ========================= | |
| STACK_ROOT="/var/lib/localai" | |
| MODELS_DIR="$STACK_ROOT/models" | |
| LOG_DIR="/var/log/localai" | |
| STATE_DIR="$STACK_ROOT/state" | |
| CONFIG_DIR="$STACK_ROOT/config" | |
| INSTALL_STATE_FILE="$STATE_DIR/install.state" | |
| OPENWEBUI_DIR="$STACK_ROOT/openwebui" | |
| OPENDIFFUSION_DIR="$STACK_ROOT/opendiffusion" | |
| OLLAMA_DIR="$STACK_ROOT/ollama" | |
| LOCALAI_CONFIG_FILE="$CONFIG_DIR/config.yaml" | |
| mkdir -p "$MODELS_DIR" "$LOG_DIR" "$STATE_DIR" "$CONFIG_DIR" \ | |
| "$OPENWEBUI_DIR" "$OPENDIFFUSION_DIR" "$OLLAMA_DIR" | |
| chmod -R a+rwx "$STACK_ROOT" "$LOG_DIR" | |
| # ========================= | |
| # βοΈ Flags | |
| # ========================= | |
| DEBUG=false | |
| DRY_RUN=false | |
| UNINSTALL=false | |
| UPDATE=false | |
| STATUS=false | |
| RAW=false | |
| for arg in "$@"; do | |
| case "$arg" in | |
| --debug) DEBUG=true ;; | |
| --dry-run) DRY_RUN=true ;; | |
| --uninstall) UNINSTALL=true ;; | |
| --update) UPDATE=true ;; | |
| --status|--summary) STATUS=true ;; | |
| --raw) RAW=true ;; | |
| esac | |
| done | |
| log() { [[ "$DEBUG" == true ]] && echo "[DEBUG] $*" || true; } | |
| run() { [[ "$DRY_RUN" == true ]] && echo "+ $*" || eval "$@"; } | |
| require_cmd() { command -v "$1" >/dev/null || { echo "β Missing required command: $1"; exit 1; }; } | |
| # ========================= | |
| # π§ Detection + Ports | |
| # ========================= | |
| has_nvidia_gpu() { command -v nvidia-smi >/dev/null && nvidia-smi -L | grep -q GPU; } | |
| has_amd_gpu() { lspci | grep -i amd | grep -qi vga; } | |
| get_total_ram() { awk '/MemTotal/ {print int($2/1024/1024)}' /proc/meminfo; } | |
| get_free_disk() { df --output=avail -BG "$STACK_ROOT" | tail -1 | tr -dc '0-9'; } | |
| CPU_MODE=true | |
| if has_nvidia_gpu || has_amd_gpu; then CPU_MODE=false; fi | |
| find_free_port() { | |
| local port=62000 | |
| while ss -tuln | grep -q ":$port"; do ((port++)); done | |
| echo "$port" | |
| } | |
| # ========================= | |
| # π§ LocalAI Config | |
| # ========================= | |
| generate_localai_config() { | |
| cat >"$LOCALAI_CONFIG_FILE" <<EOF | |
| server: | |
| host: 0.0.0.0 | |
| port: $LOCALAI_PORT | |
| workers: 4 | |
| threads: 8 | |
| timeout: 120 | |
| debug: false | |
| gpu: | |
| enabled: $(if [[ "$CPU_MODE" == false ]]; then echo true; else echo false; fi) | |
| log_level: info | |
| log_json: true | |
| models-path: "$MODELS_DIR" | |
| models: | |
| - name: mistral | |
| backend: llama | |
| path: "$MODELS_DIR/mistral" | |
| parameters: | |
| temperature: 0.7 | |
| - name: llama3 | |
| backend: llama | |
| path: "$MODELS_DIR/llama3" | |
| parameters: | |
| temperature: 0.7 | |
| EOF | |
| echo "localai_config_generated=true" >> "$INSTALL_STATE_FILE" | |
| } | |
| # ========================= | |
| # π§ Ollama + Models | |
| # ========================= | |
| install_ollama() { | |
| if command -v ollama >/dev/null; then log "Ollama already installed"; echo "ollama_installed=true" >> "$INSTALL_STATE_FILE"; return; fi | |
| ARCH=$(uname -m); OS=$(uname | tr '[:upper:]' '[:lower:]') | |
| ARCH=${ARCH/x86_64/amd64}; ARCH=${ARCH/aarch64/arm64} | |
| TARBALL="ollama-$OS-$ARCH.tgz" | |
| TMP=$(mktemp -d) | |
| run curl -fsSL -o "$TMP/$TARBALL" "https://ollama.com/download/$TARBALL" | |
| run tar -xzf "$TMP/$TARBALL" -C "$TMP" | |
| run install -Dm755 "$TMP/ollama" /usr/local/bin/ollama | |
| rm -rf "$TMP" | |
| echo "ollama_installed=true" >> "$INSTALL_STATE_FILE" | |
| } | |
| select_models() { | |
| local ram=$(get_total_ram) | |
| local disk=$(get_free_disk) | |
| local models=("mistral" "llama3") | |
| if [[ "$CPU_MODE" == false ]]; then | |
| models+=("deepseek-coder") | |
| ((ram >= 32 && disk >= 30)) && models+=("codellama") | |
| fi | |
| echo "${models[@]}" | |
| } | |
| prefetch_model() { | |
| local model="$1" | |
| run ollama pull "$model" | |
| echo "model_${model}_pulled=true" >> "$INSTALL_STATE_FILE" | |
| } | |
| # ========================= | |
| # π» VSCode Integration | |
| # ========================= | |
| configure_vscode() { | |
| require_cmd jq | |
| command -v code >/dev/null || return | |
| log "βοΈ Configuring VSCode extensions" | |
| for ext in \ | |
| continue.continue ms-python.python ms-toolsai.jupyter \ | |
| streetsidesoftware.code-spell-checker ms-python.vscode-pylance \ | |
| golang.go rust-lang.rust-analyzer dbaeumer.vscode-eslint \ | |
| esbenp.prettier-vscode timonwong.shellcheck redhat.vscode-yaml \ | |
| yzhang.markdown-all-in-one ms-azuretools.vscode-docker \ | |
| be5invis.toml ms-vscode.cpptools redhat.java \ | |
| ecmel.vscode-html-css aeschli.vscode-css-formatter \ | |
| sumneko.lua editorconfig.editorconfig; do | |
| run code --install-extension "$ext" || true | |
| done | |
| local cfg="$HOME/.config/Code/User/settings.json" | |
| mkdir -p "$(dirname "$cfg")" | |
| jq -s 'reduce .[] as $item ({}; . * $item)' \ | |
| <(cat "$cfg" 2>/dev/null || echo '{}') \ | |
| <(cat <<EOF | |
| { | |
| "continue.serverUrl": "http://localhost:$LOCALAI_PORT", | |
| "continue.backend": "ollama", | |
| "cSpell.enabled": true, | |
| "cSpell.language": "en,technical", | |
| "python.languageServer": "Pylance", | |
| "python.formatting.provider": "black", | |
| "python.linting.enabled": true, | |
| "python.linting.pylintEnabled": true, | |
| "python.linting.flake8Enabled": true, | |
| "go.useLanguageServer": true, | |
| "go.formatTool": "gofmt", | |
| "go.lintTool": "golint", | |
| "rust-analyzer.cargo.allFeatures": true, | |
| "rust-analyzer.procMacro.enable": true, | |
| "shellcheck.enable": true, | |
| "yaml.validate": true, | |
| "yaml.schemas": { | |
| "https://json.schemastore.org/github-workflow.json": "/.github/workflows/*" | |
| }, | |
| "html.format.enable": true, | |
| "css.validate": true | |
| } | |
| EOF | |
| ) > "$cfg" | |
| echo "vscode_configured=true" >> "$INSTALL_STATE_FILE" | |
| } | |
| # ========================= | |
| # π Status Summary | |
| # ========================= | |
| status_summary() { | |
| echo -e "\nπ Stack Summary" | |
| echo "---------------------------" | |
| printf "CPU: %s\n" "$(lscpu | grep 'Model name' | cut -d ':' -f2 | xargs)" | |
| printf "RAM: %s GB\n" "$(get_total_ram)" | |
| printf "Free Disk: %s GB\n" "$(get_free_disk)" | |
| printf "NVIDIA GPU: %s\n" "$(has_nvidia_gpu && echo Yes || echo No)" | |
| printf "AMD GPU: %s\n" "$(has_amd_gpu && echo Yes || echo No)" | |
| printf "Models Dir: %s\n" "$MODELS_DIR" | |
| printf "Log Dir: %s\n" "$LOG_DIR" | |
| printf "VSCode: %s\n" "$(command -v code &>/dev/null && echo Yes || echo No)" | |
| printf "Ollama Installed:%s\n" "$(command -v ollama &>/dev/null && echo Yes || echo No)" | |
| printf "Install State: %s\n" "$INSTALL_STATE_FILE" | |
| echo "OpenWebUI Port: $OPENWEBUI_PORT" | |
| echo "OpenDiffusion Port: $OPENDIFFUSION_PORT" | |
| echo "LocalAI Port: $LOCALAI_PORT" | |
| echo "---------------------------" | |
| } | |
| # ========================= | |
| # π§Ό CLI Entrypoints | |
| # ========================= | |
| [[ "$UNINSTALL" == true ]] && { | |
| echo "π§Ό Uninstalling..." | |
| run docker rm -f openwebui open-diffusion localai || true | |
| rm -rf "$STACK_ROOT" "$LOG_DIR" | |
| echo "β Removed" | |
| exit 0 | |
| } | |
| [[ "$STATUS" == true ]] && { status_summary; exit 0; } | |
| # ========================= | |
| # π Main Install Logic | |
| # ========================= | |
| install_ollama | |
| [[ "$UPDATE" == true ]] && run docker pull ghcr.io/open-webui/open-webui:latest | |
| [[ "$CPU_MODE" == true ]] && echo "β οΈ CPU-only mode: expect slower inference." | |
| echo "π§ Prefetching models..." | |
| for m in $(select_models); do prefetch_model "$m"; done | |
| OPENWEBUI_PORT=$(find_free_port) | |
| OPENDIFFUSION_PORT=$(find_free_port) | |
| LOCALAI_PORT=$(find_free_port) | |
| generate_localai_config | |
| echo "π Stopping old containers..." | |
| for c in openwebui open-diffusion localai; do | |
| run docker rm -f "$c" || true | |
| done | |
| echo "π¦ Launching containers..." | |
| run docker run -d --name openwebui --restart=always \ | |
| -p "$OPENWEBUI_PORT:3000" \ | |
| -v "$OPENWEBUI_DIR:/app/data" \ | |
| -v "$MODELS_DIR:/models" \ | |
| -v "$LOG_DIR/openwebui:/var/log/localai/openwebui" \ | |
| ghcr.io/open-webui/open-webui:latest | |
| run docker run -d --name open-diffusion --restart=always \ | |
| -p "$OPENDIFFUSION_PORT:7860" \ | |
| -v "$OPENDIFFUSION_DIR:/data" \ | |
| -v "$MODELS_DIR:/models" \ | |
| -v "$LOG_DIR/opendiffusion:/var/log/localai/opendiffusion" \ | |
| ghcr.io/opensd/open-diffusion:latest | |
| run docker run -d --name localai --restart=always \ | |
| -p "$LOCALAI_PORT:8080" \ | |
| -v "$OLLAMA_DIR:/data" \ | |
| -v "$MODELS_DIR:/models" \ | |
| -v "$LOG_DIR/localai:/var/log/localai/localai" \ | |
| -v "$LOCALAI_CONFIG_FILE:/etc/localai/config.yaml" \ | |
| quay.io/go-skynet/local-ai:latest | |
| configure_vscode | |
| status_summary | |
| echo -e "\nπ Stack ready! Access points:" | |
| echo "- π OpenWebUI: http://localhost:$OPENWEBUI_PORT" | |
| echo "- π¨ Diffusion: http://localhost:$OPENDIFFUSION_PORT" | |
| echo "- π€ LocalAI API: http://localhost:$LOCALAI_PORT" | |
| echo "- π§ VSCode: Ready with Continue + LSP" | |
| echo "- π¦ Models: $MODELS_DIR" | |
| echo "- πͺ΅ Logs: $LOG_DIR" | |
| echo "" | |
| echo "π§ To stop: docker stop openwebui open-diffusion localai" | |
| echo "π§Ό To uninstall: run this script with --uninstall" | |
| echo "β¬οΈ To update: run this script with --update" |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment