Created
April 27, 2026 19:38
-
-
Save ceaksan/53272866129eb1ff867fa72d90a0aafd to your computer and use it in GitHub Desktop.
Ollama + Qwen2.5-Coder setup for privacy-friendly pivot pipeline. Companion to https://ceaksan.com/en/pivot-table-data-analytics-and-ai-integration
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/usr/bin/env bash | |
| # | |
| # Ollama + Qwen2.5-Coder setup script for a privacy-friendly pivot pipeline. | |
| # Source: https://ceaksan.com/en/pivot-table-data-analytics-and-ai-integration | |
| # | |
| # What it does: | |
| # 1. Installs the Ollama daemon (macOS / Linux) | |
| # 2. Pulls Qwen2.5-Coder (size picked from available memory: 7B / 14B / 32B) | |
| # 3. Creates a Python venv with minimal deps | |
| # 4. Smoke test: produces a one-sentence insight from a tiny pivot summary | |
| # | |
| # Usage: | |
| # chmod +x ollama-setup.sh && ./ollama-setup.sh | |
| # ./ollama-setup.sh --model 14b # force 14B model | |
| # ./ollama-setup.sh --skip-test # skip smoke test | |
| set -euo pipefail | |
| MODEL_OVERRIDE="" | |
| SKIP_TEST=0 | |
| while [[ $# -gt 0 ]]; do | |
| case "$1" in | |
| --model) MODEL_OVERRIDE="$2"; shift 2 ;; | |
| --skip-test) SKIP_TEST=1; shift ;; | |
| -h|--help) | |
| grep '^#' "$0" | sed 's/^# \{0,1\}//' | |
| exit 0 ;; | |
| *) echo "Unknown argument: $1" >&2; exit 1 ;; | |
| esac | |
| done | |
| log() { printf '\033[1;36m==>\033[0m %s\n' "$*"; } | |
| warn() { printf '\033[1;33m!! \033[0m%s\n' "$*" >&2; } | |
| die() { printf '\033[1;31mxx \033[0m%s\n' "$*" >&2; exit 1; } | |
| # 1. OS detect | |
| OS="$(uname -s)" | |
| case "$OS" in | |
| Darwin) PLATFORM="macos" ;; | |
| Linux) PLATFORM="linux" ;; | |
| *) die "Unsupported OS: $OS (macOS and Linux only)" ;; | |
| esac | |
| log "Platform: $PLATFORM" | |
| # 2. Ollama daemon | |
| if ! command -v ollama >/dev/null 2>&1; then | |
| log "Installing Ollama..." | |
| if [[ "$PLATFORM" == "macos" ]]; then | |
| if command -v brew >/dev/null 2>&1; then | |
| brew install ollama | |
| else | |
| die "brew not found. Install manually from https://ollama.com" | |
| fi | |
| else | |
| curl -fsSL https://ollama.com/install.sh | sh | |
| fi | |
| else | |
| log "Ollama already installed: $(ollama --version 2>&1 | head -1)" | |
| fi | |
| # 3. Start daemon | |
| if ! pgrep -f "ollama serve" >/dev/null 2>&1; then | |
| log "Starting Ollama daemon (background)..." | |
| if [[ "$PLATFORM" == "macos" ]]; then | |
| open -a Ollama || ollama serve >/tmp/ollama.log 2>&1 & | |
| else | |
| nohup ollama serve >/tmp/ollama.log 2>&1 & | |
| fi | |
| sleep 3 | |
| fi | |
| # 4. Detect memory -> recommend model | |
| detect_memory_gb() { | |
| if [[ "$PLATFORM" == "macos" ]]; then | |
| echo $(( $(sysctl -n hw.memsize) / 1073741824 )) | |
| else | |
| awk '/MemTotal/ {printf "%d\n", $2/1024/1024}' /proc/meminfo | |
| fi | |
| } | |
| MEM_GB=$(detect_memory_gb) | |
| log "Memory: ${MEM_GB}GB" | |
| if [[ -n "$MODEL_OVERRIDE" ]]; then | |
| MODEL_TAG="qwen2.5-coder:${MODEL_OVERRIDE}" | |
| elif (( MEM_GB >= 32 )); then | |
| MODEL_TAG="qwen2.5-coder:32b" | |
| elif (( MEM_GB >= 16 )); then | |
| MODEL_TAG="qwen2.5-coder:14b" | |
| else | |
| MODEL_TAG="qwen2.5-coder:7b" | |
| warn "Less than 16GB RAM detected, picking 7B. Override with --model 14b if you want to try larger." | |
| fi | |
| log "Selected model: $MODEL_TAG" | |
| # 5. Pull model | |
| if ! ollama list 2>/dev/null | grep -q "${MODEL_TAG%%:*}"; then | |
| log "Pulling model (4-20GB depending on size)..." | |
| ollama pull "$MODEL_TAG" | |
| else | |
| log "Model already present: $MODEL_TAG" | |
| fi | |
| # 6. Python venv (uv preferred, fallback to python -m venv) | |
| VENV_DIR=".venv-pivot" | |
| if [[ ! -d "$VENV_DIR" ]]; then | |
| log "Creating Python venv: $VENV_DIR" | |
| if command -v uv >/dev/null 2>&1; then | |
| uv venv "$VENV_DIR" | |
| else | |
| python3 -m venv "$VENV_DIR" | |
| fi | |
| fi | |
| # shellcheck disable=SC1091 | |
| source "$VENV_DIR/bin/activate" | |
| log "Installing Python dependencies..." | |
| if command -v uv >/dev/null 2>&1; then | |
| uv pip install --quiet polars pandas pyarrow numpy ollama | |
| else | |
| pip install --quiet polars pandas pyarrow numpy ollama | |
| fi | |
| # 7. Smoke test | |
| if (( SKIP_TEST == 0 )); then | |
| log "Running smoke test..." | |
| python3 - <<PY | |
| import polars as pl | |
| import ollama | |
| df = pl.DataFrame({ | |
| "region": ["Marmara", "Aegean", "Mediterranean", "Central Anatolia"], | |
| "revenue": [142_300, 58_900, 41_200, 38_750], | |
| "order_count": [4_820, 1_710, 1_290, 1_140], | |
| }) | |
| prompt = ( | |
| "Below is a regional e-commerce summary. In one sentence, " | |
| "identify the underperforming region and hypothesize a cause:\n\n" | |
| f"{df}\n" | |
| ) | |
| resp = ollama.chat( | |
| model="${MODEL_TAG}", | |
| messages=[{"role": "user", "content": prompt}], | |
| options={"temperature": 0.2}, | |
| ) | |
| print("\nLLM response:") | |
| print(resp["message"]["content"]) | |
| PY | |
| fi | |
| log "Done." | |
| echo | |
| echo "Usage:" | |
| echo " source $VENV_DIR/bin/activate" | |
| echo " jupyter lab pivot-benchmark.ipynb" | |
| echo | |
| echo "Switch model:" | |
| echo " ollama pull qwen2.5-coder:32b" | |
| echo | |
| echo "Privacy note: all data stays local. Ollama does not call external APIs." |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment