Ollama 방식과 vllm-mlx(MLX) 방식 두 가지 셋업 스크립트 및 가이드 포함. transformers fast image processor 호환성 패치 자동 적용. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
118 lines
3.4 KiB
Bash
Executable File
118 lines
3.4 KiB
Bash
Executable File
#!/bin/bash
|
|
set -e
|
|
|
|
#====================================================================
|
|
# Qwen3.5 + Open WebUI (Ollama) 원클릭 셋업
|
|
# 환경: Mac / Docker Desktop / Homebrew
|
|
#====================================================================
|
|
|
|
PROJECT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
|
MODEL="qwen3.5:35b"
|
|
WEBUI_PORT=3000
|
|
|
|
echo "============================================"
|
|
echo " Qwen3.5 + Open WebUI (Ollama) 셋업"
|
|
echo "============================================"
|
|
echo ""
|
|
|
|
#--------------------------------------------------------------------
|
|
# 1. 사전 요구사항 확인
|
|
#--------------------------------------------------------------------
|
|
echo "[1/5] 사전 요구사항 확인..."
|
|
|
|
if ! command -v brew &>/dev/null; then
|
|
echo "❌ Homebrew가 설치되어 있지 않습니다."
|
|
echo " /bin/bash -c \"\$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)\""
|
|
exit 1
|
|
fi
|
|
echo " ✓ Homebrew"
|
|
|
|
if ! command -v docker &>/dev/null; then
|
|
echo "❌ Docker가 설치되어 있지 않습니다."
|
|
exit 1
|
|
fi
|
|
if ! docker info &>/dev/null; then
|
|
echo "❌ Docker Desktop이 실행 중이 아닙니다."
|
|
exit 1
|
|
fi
|
|
echo " ✓ Docker"
|
|
|
|
echo ""
|
|
|
|
#--------------------------------------------------------------------
|
|
# 2. Ollama 설치 및 시작
|
|
#--------------------------------------------------------------------
|
|
echo "[2/5] Ollama 설치..."
|
|
|
|
if ! command -v ollama &>/dev/null; then
|
|
brew install ollama
|
|
echo " ✓ Ollama 설치 완료"
|
|
else
|
|
echo " ✓ Ollama 이미 설치됨"
|
|
fi
|
|
|
|
if ! brew services list | grep ollama | grep -q started; then
|
|
brew services start ollama
|
|
sleep 3
|
|
echo " ✓ Ollama 서비스 시작"
|
|
else
|
|
echo " ✓ Ollama 서비스 실행 중"
|
|
fi
|
|
|
|
echo ""
|
|
|
|
#--------------------------------------------------------------------
|
|
# 3. 모델 다운로드
|
|
#--------------------------------------------------------------------
|
|
echo "[3/5] 모델 다운로드 ($MODEL)..."
|
|
echo " (네트워크 속도에 따라 시간이 걸릴 수 있습니다)"
|
|
|
|
ollama pull "$MODEL"
|
|
echo " ✓ 모델 다운로드 완료"
|
|
|
|
echo ""
|
|
|
|
#--------------------------------------------------------------------
|
|
# 4. Docker Compose 설정 및 실행
|
|
#--------------------------------------------------------------------
|
|
echo "[4/5] Open WebUI 실행..."
|
|
|
|
if [ ! -f "$PROJECT_DIR/docker-compose.yml" ]; then
|
|
cat > "$PROJECT_DIR/docker-compose.yml" << EOF
|
|
services:
|
|
open-webui:
|
|
image: ghcr.io/open-webui/open-webui:main
|
|
container_name: open-webui
|
|
ports:
|
|
- "${WEBUI_PORT}:8080"
|
|
environment:
|
|
- OLLAMA_BASE_URL=http://host.docker.internal:11434
|
|
volumes:
|
|
- open-webui-data:/app/backend/data
|
|
extra_hosts:
|
|
- "host.docker.internal:host-gateway"
|
|
restart: unless-stopped
|
|
|
|
volumes:
|
|
open-webui-data:
|
|
EOF
|
|
echo " ✓ docker-compose.yml 생성"
|
|
fi
|
|
|
|
cd "$PROJECT_DIR"
|
|
docker compose up -d 2>&1 | grep -v "^$"
|
|
echo " ✓ Open WebUI 실행 중"
|
|
|
|
echo ""
|
|
|
|
#--------------------------------------------------------------------
|
|
# 5. 완료
|
|
#--------------------------------------------------------------------
|
|
echo "[5/5] 셋업 완료!"
|
|
echo ""
|
|
echo "============================================"
|
|
echo " 브라우저에서 http://localhost:${WEBUI_PORT} 접속"
|
|
echo " (첫 접속 시 회원가입 → 첫 계정이 admin)"
|
|
echo " 모델 선택: $MODEL"
|
|
echo "============================================"
|