refactor: AI 서비스 조립컴 Ollama 단독 운영으로 전환
- Ollama 메인 → MLX fallback 순서로 변경 (기존 MLX 우선 제거) - OLLAMA_BASE_URL을 gpu.hyungi.net으로 변경 (Docker 네트워크 호환) - OLLAMA_TEXT_MODEL을 qwen3:8b → qwen3.5:9b-q8_0으로 업데이트 - health 엔드포인트: model 필드 직접 반환, 이중 중첩 해소 - health 체크 타임아웃 120초 → 5초로 단축 - Ollama API 호출에 think: false 추가 (thinking 토큰 방지) Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -2,8 +2,8 @@ from pydantic_settings import BaseSettings
|
|||||||
|
|
||||||
|
|
||||||
class Settings(BaseSettings):
|
class Settings(BaseSettings):
|
||||||
OLLAMA_BASE_URL: str = "http://100.111.160.84:11434"
|
OLLAMA_BASE_URL: str = "https://gpu.hyungi.net"
|
||||||
OLLAMA_TEXT_MODEL: str = "qwen3:8b"
|
OLLAMA_TEXT_MODEL: str = "qwen3.5:9b-q8_0"
|
||||||
OLLAMA_EMBED_MODEL: str = "bge-m3"
|
OLLAMA_EMBED_MODEL: str = "bge-m3"
|
||||||
OLLAMA_TIMEOUT: int = 120
|
OLLAMA_TIMEOUT: int = 120
|
||||||
|
|
||||||
|
|||||||
@@ -7,12 +7,24 @@ router = APIRouter(tags=["health"])
|
|||||||
|
|
||||||
@router.get("/health")
|
@router.get("/health")
|
||||||
async def health_check():
|
async def health_check():
|
||||||
ollama_status = await ollama_client.check_health()
|
backends = await ollama_client.check_health()
|
||||||
|
stats = vector_store.stats()
|
||||||
|
|
||||||
|
# 메인 텍스트 모델명 결정 (Ollama 메인, MLX fallback)
|
||||||
|
model_name = None
|
||||||
|
ollama_models = backends.get("ollama", {}).get("models", [])
|
||||||
|
if ollama_models:
|
||||||
|
model_name = ollama_models[0]
|
||||||
|
if not model_name and backends.get("mlx", {}).get("status") == "connected":
|
||||||
|
model_name = backends["mlx"].get("model")
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"status": "ok",
|
"status": "ok",
|
||||||
"service": "tk-ai-service",
|
"service": "tk-ai-service",
|
||||||
"ollama": ollama_status,
|
"model": model_name,
|
||||||
"embeddings": vector_store.stats(),
|
"ollama": backends.get("ollama", {}),
|
||||||
|
"mlx": backends.get("mlx", {}),
|
||||||
|
"embeddings": stats,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -43,7 +43,21 @@ class OllamaClient:
|
|||||||
messages.append({"role": "system", "content": system})
|
messages.append({"role": "system", "content": system})
|
||||||
messages.append({"role": "user", "content": prompt})
|
messages.append({"role": "user", "content": prompt})
|
||||||
client = await self._get_client()
|
client = await self._get_client()
|
||||||
|
# 조립컴 Ollama 메인, MLX fallback
|
||||||
try:
|
try:
|
||||||
|
response = await client.post(
|
||||||
|
f"{self.base_url}/api/chat",
|
||||||
|
json={
|
||||||
|
"model": settings.OLLAMA_TEXT_MODEL,
|
||||||
|
"messages": messages,
|
||||||
|
"stream": False,
|
||||||
|
"think": False,
|
||||||
|
"options": {"temperature": 0.3, "num_predict": 2048},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
response.raise_for_status()
|
||||||
|
return response.json()["message"]["content"]
|
||||||
|
except Exception:
|
||||||
response = await client.post(
|
response = await client.post(
|
||||||
f"{settings.MLX_BASE_URL}/chat/completions",
|
f"{settings.MLX_BASE_URL}/chat/completions",
|
||||||
json={
|
json={
|
||||||
@@ -55,31 +69,20 @@ class OllamaClient:
|
|||||||
)
|
)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
return response.json()["choices"][0]["message"]["content"]
|
return response.json()["choices"][0]["message"]["content"]
|
||||||
except Exception:
|
|
||||||
response = await client.post(
|
|
||||||
f"{self.base_url}/api/chat",
|
|
||||||
json={
|
|
||||||
"model": settings.OLLAMA_TEXT_MODEL,
|
|
||||||
"messages": messages,
|
|
||||||
"stream": False,
|
|
||||||
"options": {"temperature": 0.3, "num_predict": 2048},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
response.raise_for_status()
|
|
||||||
return response.json()["message"]["content"]
|
|
||||||
|
|
||||||
async def check_health(self) -> dict:
|
async def check_health(self) -> dict:
|
||||||
result = {}
|
result = {}
|
||||||
|
short_timeout = httpx.Timeout(5.0, connect=3.0)
|
||||||
try:
|
try:
|
||||||
client = await self._get_client()
|
async with httpx.AsyncClient(timeout=short_timeout) as c:
|
||||||
response = await client.get(f"{self.base_url}/api/tags")
|
response = await c.get(f"{self.base_url}/api/tags")
|
||||||
models = response.json().get("models", [])
|
models = response.json().get("models", [])
|
||||||
result["ollama"] = {"status": "connected", "models": [m["name"] for m in models]}
|
result["ollama"] = {"status": "connected", "models": [m["name"] for m in models]}
|
||||||
except Exception:
|
except Exception:
|
||||||
result["ollama"] = {"status": "disconnected"}
|
result["ollama"] = {"status": "disconnected"}
|
||||||
try:
|
try:
|
||||||
client = await self._get_client()
|
async with httpx.AsyncClient(timeout=short_timeout) as c:
|
||||||
response = await client.get(f"{settings.MLX_BASE_URL}/health")
|
response = await c.get(f"{settings.MLX_BASE_URL}/health")
|
||||||
result["mlx"] = {"status": "connected", "model": settings.MLX_TEXT_MODEL}
|
result["mlx"] = {"status": "connected", "model": settings.MLX_TEXT_MODEL}
|
||||||
except Exception:
|
except Exception:
|
||||||
result["mlx"] = {"status": "disconnected"}
|
result["mlx"] = {"status": "disconnected"}
|
||||||
|
|||||||
@@ -298,8 +298,8 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "30400:8000"
|
- "30400:8000"
|
||||||
environment:
|
environment:
|
||||||
- OLLAMA_BASE_URL=${OLLAMA_BASE_URL:-http://100.111.160.84:11434}
|
- OLLAMA_BASE_URL=${OLLAMA_BASE_URL:-https://gpu.hyungi.net}
|
||||||
- OLLAMA_TEXT_MODEL=${OLLAMA_TEXT_MODEL:-qwen3:8b}
|
- OLLAMA_TEXT_MODEL=${OLLAMA_TEXT_MODEL:-qwen3.5:9b-q8_0}
|
||||||
- OLLAMA_EMBED_MODEL=${OLLAMA_EMBED_MODEL:-bge-m3}
|
- OLLAMA_EMBED_MODEL=${OLLAMA_EMBED_MODEL:-bge-m3}
|
||||||
- OLLAMA_TIMEOUT=${OLLAMA_TIMEOUT:-120}
|
- OLLAMA_TIMEOUT=${OLLAMA_TIMEOUT:-120}
|
||||||
- MLX_BASE_URL=${MLX_BASE_URL:-https://llm.hyungi.net}
|
- MLX_BASE_URL=${MLX_BASE_URL:-https://llm.hyungi.net}
|
||||||
|
|||||||
Reference in New Issue
Block a user