#!/usr/bin/env bash # ───────────────────────────────────────────────────────────────────── # Samantha Installer — ZenonAI # https://zenon.ai/install.sh # # Usage: # curl -fsSL https://zenon.ai/install.sh | sh # # What this script does: # 1. Checks system requirements (Python 3.9+, git, curl) # 2. Installs Ollama if not already present # 3. Clones or updates the Samantha repo from GitHub to ~/samantha # 4. Creates a Python virtual environment and installs dependencies # 5. Installs the Playwright Chromium browser # 6. Creates a default .env config if one does not exist # ───────────────────────────────────────────────────────────────────── set -euo pipefail SAMANTHA_VERSION="2.0.0" SAMANTHA_REPO="https://github.com/hellowaste344/samantha" SAMANTHA_BRANCH="main" INSTALL_DIR="${HOME}/samantha" # $'\033[...]' tells bash to interpret the octal escape as the ESC byte. # This makes colours work correctly when the script is piped to sh, # unlike the \033 approach which echo prints literally without -e. RED=$'\033[0;31m' GREEN=$'\033[0;32m' CYAN=$'\033[0;36m' PINK=$'\033[0;35m' BOLD=$'\033[1m' RESET=$'\033[0m' echo "" echo " ${PINK}${BOLD}╔══════════════════════════════════╗${RESET}" echo " ${PINK}${BOLD}║ SAMANTHA v${SAMANTHA_VERSION} ║${RESET}" echo " ${PINK}${BOLD}║ ZenonAI — Local AI Agent ║${RESET}" echo " ${PINK}${BOLD}╚══════════════════════════════════╝${RESET}" echo "" OS="$(uname -s)" ARCH="$(uname -m)" echo " ${CYAN}-> Detected: ${OS} / ${ARCH}${RESET}" # ── 1. Check required tools ─────────────────────────────────────────── for cmd in python3 git curl; do if ! command -v "${cmd}" &>/dev/null; then echo " ${RED}✗ Missing: ${cmd} — please install it first.${RESET}" exit 1 fi done PY_OK="$(python3 -c 'import sys; print(sys.version_info >= (3,9))')" if [ "${PY_OK}" != "True" ]; then echo " ${RED}✗ Python 3.9+ required. Detected: $(python3 --version)${RESET}" exit 1 fi echo " ${GREEN}✓ Python 3.9+ found${RESET}" # ── 2. Install Ollama if missing ────────────────────────────────────── if ! command -v ollama &>/dev/null; then echo " ${PINK}-> Ollama not found — installing...${RESET}" curl -fsSL https://ollama.com/install.sh | sh echo " ${GREEN}✓ Ollama installed${RESET}" else echo " ${GREEN}✓ Ollama found${RESET}" fi # ── 3. Clone or update the Samantha repo ───────────────────────────── if [ -d "${INSTALL_DIR}/.git" ]; then echo " ${CYAN}-> Updating existing installation in ${INSTALL_DIR}...${RESET}" cd "${INSTALL_DIR}" git fetch origin "${SAMANTHA_BRANCH}" --quiet git reset --hard "origin/${SAMANTHA_BRANCH}" --quiet echo " ${GREEN}✓ Updated to latest${RESET}" else echo " ${CYAN}-> Cloning into ${INSTALL_DIR}...${RESET}" git clone --branch "${SAMANTHA_BRANCH}" "${SAMANTHA_REPO}" "${INSTALL_DIR}" cd "${INSTALL_DIR}" echo " ${GREEN}✓ Cloned successfully${RESET}" fi # Always ensure we are inside the install directory from this point on cd "${INSTALL_DIR}" # ── 4. Python virtual environment ───────────────────────────────────── # Only create the venv on a fresh install. # Skipping on updates preserves the pip cache and avoids re-downloading. if [ ! -d ".venv" ]; then echo " ${CYAN}-> Creating virtual environment...${RESET}" python3 -m venv .venv else echo " ${GREEN}✓ Virtual environment already exists${RESET}" fi # shellcheck disable=SC1091 source .venv/bin/activate echo " ${CYAN}-> Installing Python dependencies...${RESET}" pip install --quiet --upgrade pip pip install --quiet -r requirements.txt echo " ${GREEN}✓ Python dependencies installed${RESET}" # ── 5. Playwright Chromium ──────────────────────────────────────────── echo " ${CYAN}-> Checking Playwright Chromium...${RESET}" if python3 -c " import subprocess, sys r = subprocess.run( [sys.executable, '-m', 'playwright', 'install', '--dry-run', 'chromium'], capture_output=True, text=True ) # dry-run exit code 0 means already installed, non-zero means needs install sys.exit(0 if r.returncode == 0 else 1) " 2>/dev/null; then echo " ${GREEN}✓ Playwright Chromium already installed${RESET}" else python3 -m playwright install chromium echo " ${GREEN}✓ Playwright Chromium installed${RESET}" fi # ── 6. Default .env config ──────────────────────────────────────────── if [ ! -f ".env" ]; then echo " ${CYAN}-> Creating default .env config...${RESET}" cat > .env << 'ENVEOF' # Samantha .env — created by installer # Edit this file to customise your setup. # See .env.example for all available options. # LLM — Ollama connection and model OLLAMA_HOST=http://localhost:11434 OLLAMA_MODEL=mistral # STT — Whisper model size (tiny | base | small | medium | large) WHISPER_MODEL=base # TTS — voice output engine # edge → neural voices, requires internet (recommended) # pyttsx3 → system voice, fully offline # none → disable all speech (text only) TTS_ENGINE=edge TTS_EDGE_VOICE=aria ENVEOF echo " ${GREEN}✓ Created .env (edit to customise)${RESET}" else echo " ${GREEN}✓ Existing .env preserved${RESET}" fi # ── Done ────────────────────────────────────────────────────────────── echo "" echo " ${GREEN}${BOLD}✓ Samantha installed successfully!${RESET}" echo "" echo " ${BOLD}Get started:${RESET}" echo " cd ~/samantha" echo " source .venv/bin/activate" echo " ollama pull mistral" echo " python main.py" echo "" echo " ${CYAN}Voice mode (default) — microphone + neural TTS.${RESET}" echo " ${CYAN}Text-only mode — python main.py --text${RESET}" echo "" echo " ${CYAN}Docs: https://zenon.ai/help${RESET}" echo ""