Last active
July 8, 2025 11:27
-
-
Save casjay/c5370b3f54745a1533f24b9bfa63bbb2 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/usr/bin/env bash | |
| # π§ Enhanced Ollama + VS Code Integration Setup Script | |
| # Project: ollama-vscode-integration | |
| # Purpose: Install Ollama system-wide, configure VS Code (user), and pull models | |
| set -euo pipefail | |
| trap 'echo "β An error occurred. Exiting." >&2' ERR | |
| ### CONFIGURATION | |
| TMP_DIR="/tmp/ollama-install" | |
| INSTALL_DIR="/usr/local/bin" | |
| MODEL_DIR="/var/lib/ollama" | |
| SERVICE_FILE="/etc/systemd/system/ollama.service" | |
| SETTINGS="$HOME/.config/Code/User/settings.json" | |
| EXTS=( | |
| Continue.continue | |
| streetsidesoftware.code-spell-checker | |
| ms-python.python | |
| golang.go | |
| rust-lang.rust-analyzer | |
| ) | |
| ### DETECTION | |
| ARCH=$(uname -m) | |
| RAM_GB=$(awk '/MemTotal/ {printf "%.0f", $2 / 1024 / 1024}' /proc/meminfo) | |
| DISK_AVAIL_GB=$(df --output=avail -BG "$HOME" | tail -1 | tr -dc '0-9') | |
| GPU_VENDOR_AMD=$(lspci | grep -i 'vga\|3d' | grep -i amd || true) | |
| GPU_VENDOR_NVIDIA=$(lspci | grep -i 'vga\|3d' | grep -i nvidia || command -v nvidia-smi >/dev/null && echo "nvidia" || true) | |
| ### MODEL SELECTION | |
| RECOMMENDED_MODELS=("mistral") # Always include | |
| [[ $RAM_GB -ge 8 && $DISK_AVAIL_GB -ge 16 ]] && RECOMMENDED_MODELS+=("llama3") | |
| command -v code >/dev/null && [[ $DISK_AVAIL_GB -ge 8 ]] && RECOMMENDED_MODELS+=("codellama") | |
| [[ $RAM_GB -ge 16 && $DISK_AVAIL_GB -ge 20 ]] && RECOMMENDED_MODELS+=("deepseek-coder:6.7b") | |
| if [[ "$ARCH" == "x86_64" && -n "$GPU_VENDOR_AMD" && -z "$GPU_VENDOR_NVIDIA" && $RAM_GB -ge 8 && $DISK_AVAIL_GB -ge 16 ]]; then | |
| RECOMMENDED_MODELS+=("llama3:rocm") | |
| fi | |
| if [[ -n "$GPU_VENDOR_NVIDIA" && $RAM_GB -ge 24 && $DISK_AVAIL_GB -ge 32 ]]; then | |
| RECOMMENDED_MODELS+=("deepseek-coder:33b") | |
| fi | |
| OLLAMA_MODELS=("${RECOMMENDED_MODELS[@]}") | |
| ### GPU PACKAGE INSTALL | |
| if [[ -n "$GPU_VENDOR_NVIDIA" ]]; then | |
| echo "π§ NVIDIA GPU detected β installing NVIDIA drivers and tools..." | |
| sudo pacman -S --needed --noconfirm nvidia nvidia-utils | |
| elif [[ -n "$GPU_VENDOR_AMD" && -z "$GPU_VENDOR_NVIDIA" ]]; then | |
| echo "π§ AMD GPU detected β installing ROCm packages..." | |
| sudo pacman -S --needed --noconfirm rocm-opencl-runtime rocm-core | |
| fi | |
| ### GPU VERIFICATION | |
| if [[ -n "$GPU_VENDOR_NVIDIA" ]]; then | |
| if ! command -v nvidia-smi >/dev/null || ! nvidia-smi >/dev/null 2>&1; then | |
| echo "β NVIDIA drivers installed but not functioning properly. Check nvidia-smi output." >&2 | |
| exit 1 | |
| else | |
| echo "β NVIDIA GPU is available and functional." | |
| fi | |
| fi | |
| if [[ -n "$GPU_VENDOR_AMD" && -z "$GPU_VENDOR_NVIDIA" ]]; then | |
| if ! command -v rocminfo >/dev/null || ! rocminfo >/dev/null 2>&1; then | |
| echo "β ROCm tools installed but not functioning properly. Check rocminfo output." >&2 | |
| exit 1 | |
| else | |
| echo "β ROCm runtime appears functional." | |
| fi | |
| fi | |
| ### STEP 1: Install VS Code | |
| echo -e "\nπ¦ Installing VS Code (AUR)..." | |
| yay -S --noconfirm visual-studio-code-bin | |
| ### STEP 2: Install latest Ollama | |
| echo -e "\nπ Downloading Ollama..." | |
| LATEST=$(curl -s https://api.github.com/repos/ollama/ollama/releases/latest) | |
| VERSION=$(jq -r .tag_name <<<"$LATEST") | |
| ARCH_NAME=$( [[ "$ARCH" == "x86_64" ]] && echo "amd64" || echo "arm64" ) | |
| if [[ "$ARCH_NAME" == "amd64" && -n "$GPU_VENDOR_AMD" && -z "$GPU_VENDOR_NVIDIA" ]]; then | |
| FILE_PATTERN="^ollama-linux-${ARCH_NAME}-rocm\\.tgz$" | |
| else | |
| FILE_PATTERN="^ollama-linux-${ARCH_NAME}(?!.*rocm).*\\.tgz$" | |
| fi | |
| DL_URL=$(jq -r --arg pattern "$FILE_PATTERN" '.assets[] | select(.name | test($pattern)) | .browser_download_url' <<< "$LATEST") | |
| mkdir -p "$TMP_DIR" | |
| cd "$TMP_DIR" | |
| curl -L "$DL_URL" -o ollama.tgz | |
| tar -xzf ollama.tgz | |
| sudo mv ollama "$INSTALL_DIR/" | |
| sudo chmod +x "$INSTALL_DIR/ollama" | |
| ### STEP 3: Create model store | |
| echo -e "\nπ Setting up $MODEL_DIR..." | |
| sudo mkdir -p "$MODEL_DIR" | |
| sudo chown root:root "$MODEL_DIR" | |
| sudo chmod 755 "$MODEL_DIR" | |
| ### STEP 4: Systemd service | |
| echo -e "\nβοΈ Writing systemd service..." | |
| sudo tee "$SERVICE_FILE" >/dev/null <<EOF | |
| [Unit] | |
| Description=Ollama AI Daemon (System-wide) | |
| After=network.target | |
| [Service] | |
| ExecStart=$INSTALL_DIR/ollama serve | |
| Environment=OLLAMA_MODELS=$MODEL_DIR | |
| Restart=on-failure | |
| User=root | |
| [Install] | |
| WantedBy=multi-user.target | |
| EOF | |
| sudo systemctl daemon-reexec | |
| sudo systemctl daemon-reload | |
| sudo systemctl enable --now ollama.service | |
| ### STEP 5: Pull models | |
| echo -e "\nπ₯ Pulling models: ${OLLAMA_MODELS[*]}" | |
| for model in "${OLLAMA_MODELS[@]}"; do | |
| sudo OLLAMA_MODELS="$MODEL_DIR" "$INSTALL_DIR/ollama" run "$model" | |
| done | |
| ### STEP 6: Install VS Code extensions | |
| echo -e "\nπ Installing VS Code extensions..." | |
| for ext in "${EXTS[@]}"; do | |
| code --install-extension "$ext" || echo "β οΈ Failed to install: $ext" | |
| done | |
| ### STEP 7: Configure settings.json | |
| echo -e "\nπ οΈ Configuring VS Code LLM settings..." | |
| mkdir -p "$(dirname "$SETTINGS")" | |
| cp "$SETTINGS" "$SETTINGS.bak" 2>/dev/null || true | |
| if [[ -f "$SETTINGS" ]]; then | |
| jq '. | |
| + { | |
| "continue.serverProvider": "ollama", | |
| "continue.ollama": { | |
| "url": "http://localhost:11434", | |
| "model": "llama3" | |
| }, | |
| "cSpell.enabled": true, | |
| "cSpell.language": "en,en-US", | |
| "editor.quickSuggestions": true, | |
| "editor.suggestOnTriggerCharacters": true, | |
| "editor.codeActionsOnSave": { | |
| "source.organizeImports": true, | |
| "source.fixAll": true | |
| }, | |
| "files.accessibilitySupport": "on" | |
| }' "$SETTINGS" > "${SETTINGS}.tmp" | |
| else | |
| echo '{}' > "${SETTINGS}.tmp" | |
| fi | |
| mv "${SETTINGS}.tmp" "$SETTINGS" | |
| ### DONE | |
| echo -e "\nβ Setup complete!" | |
| "$INSTALL_DIR/ollama" --version || echo "β οΈ Could not verify Ollama version" | |
| echo "π Ollama running at http://localhost:11434" | |
| echo "π¦ Models stored in $MODEL_DIR" | |
| echo "π VS Code settings written to $SETTINGS" |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment