Files
hyungi_document_server/docker-compose.yml
Hyungi Ahn e0f928f429 feat(deploy): Phase 1.3 reranker (TEI bge-reranker-v2-m3) 서비스 추가
docker-compose.yml에 reranker 서비스 추가:
- image: ghcr.io/huggingface/text-embeddings-inference:1.5
- MODEL_ID=BAAI/bge-reranker-v2-m3
- MAX_BATCH_TOKENS=8192, MAX_CONCURRENT_REQUESTS=4
- GPU 1개 할당 (RTX 4070 Ti Super, CUDA 13.0)
- expose 80만 (host 노출 X, internal network 전용)
- reranker_cache volume으로 모델 영속화
- fastapi가 depends_on 안 함 → 단독 시작 가능, reranker 없어도 fastapi 동작
  (rerank_service가 RRF fallback)

다음 단계:
- GPU에서 docker pull로 호환성 확인
- docker compose up -d reranker → warmup
- config.yaml의 rerank.endpoint를 http://reranker:80/rerank로 갱신 (GPU 직접)
- fastapi rebuild + 평가셋 측정 (rerank=true)
2026-04-08 13:16:37 +09:00

129 lines
3.2 KiB
YAML

services:
postgres:
image: pgvector/pgvector:pg16
volumes:
- pgdata:/var/lib/postgresql/data
- ./migrations:/docker-entrypoint-initdb.d
environment:
POSTGRES_DB: pkm
POSTGRES_USER: pkm
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
ports:
- "15432:5432"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U pkm"]
interval: 5s
timeout: 5s
retries: 5
restart: unless-stopped
kordoc-service:
build: ./services/kordoc
ports:
- "3100:3100"
volumes:
- ${NAS_NFS_PATH:-/mnt/nas/Document_Server}:/documents:ro
healthcheck:
test: ["CMD", "node", "-e", "fetch('http://localhost:3100/health').then(r=>{process.exit(r.ok?0:1)}).catch(()=>process.exit(1))"]
interval: 10s
timeout: 5s
retries: 3
restart: unless-stopped
ollama:
image: ollama/ollama
volumes:
- ollama_data:/root/.ollama
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
ports:
- "127.0.0.1:11434:11434"
restart: unless-stopped
# Phase 1.3: bge-reranker-v2-m3 (TEI) — internal only, fastapi에서 reranker:80으로 호출
# fastapi가 depends_on 안 함 → 단독 시작 가능, 없어도 fastapi 동작 (rerank=false fallback)
reranker:
image: ghcr.io/huggingface/text-embeddings-inference:1.5
container_name: hyungi_document_server-reranker-1
expose:
- "80"
environment:
- MODEL_ID=BAAI/bge-reranker-v2-m3
- MAX_BATCH_TOKENS=8192
- MAX_CONCURRENT_REQUESTS=4
volumes:
- reranker_cache:/data
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
restart: unless-stopped
ai-gateway:
build: ./gpu-server/services/ai-gateway
ports:
- "127.0.0.1:8081:8080"
environment:
- PRIMARY_ENDPOINT=http://100.76.254.116:8801/v1/chat/completions
- FALLBACK_ENDPOINT=http://ollama:11434/v1/chat/completions
- CLAUDE_API_KEY=${CLAUDE_API_KEY:-}
- DAILY_BUDGET_USD=${DAILY_BUDGET_USD:-5.00}
depends_on:
- ollama
restart: unless-stopped
fastapi:
build: ./app
ports:
- "8000:8000"
volumes:
- ${NAS_NFS_PATH:-/mnt/nas/Document_Server}:/documents
- ./config.yaml:/app/config.yaml:ro
- ./scripts:/app/scripts:ro
- ./logs:/app/logs
depends_on:
postgres:
condition: service_healthy
kordoc-service:
condition: service_healthy
env_file:
- credentials.env
environment:
- DATABASE_URL=postgresql+asyncpg://pkm:${POSTGRES_PASSWORD}@postgres:5432/pkm
- KORDOC_ENDPOINT=http://kordoc-service:3100
restart: unless-stopped
frontend:
build: ./frontend
ports:
- "3000:3000"
depends_on:
- fastapi
restart: unless-stopped
caddy:
image: caddy:2
ports:
- "8080:80"
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile
- caddy_data:/data
depends_on:
- fastapi
- frontend
restart: unless-stopped
volumes:
pgdata:
caddy_data:
ollama_data:
reranker_cache: