feat: AI 서비스 MLX 듀얼 백엔드 및 모델 최적화

- MLX(맥미니 27B) 우선 → Ollama(조립컴 9B) fallback 구조
- pydantic-settings 기반 config 전환
- health check에 MLX 상태 추가
- 텍스트 모델 qwen3:8b → qwen3.5:9b-q8_0 변경

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Hyungi Ahn
2026-03-06 23:17:50 +09:00
parent cad662473b
commit 2f7e083db0
14 changed files with 231 additions and 140 deletions

View File

@@ -1,5 +1,6 @@
import json
from services.ollama_client import ollama_client
from services.utils import load_prompt, parse_json_response
from config import settings
@@ -7,13 +8,8 @@ CLASSIFY_PROMPT_PATH = "prompts/classify_issue.txt"
SUMMARIZE_PROMPT_PATH = "prompts/summarize_issue.txt"
def _load_prompt(path: str) -> str:
with open(path, "r", encoding="utf-8") as f:
return f.read()
async def classify_issue(description: str, detail_notes: str = "") -> dict:
template = _load_prompt(CLASSIFY_PROMPT_PATH)
template = load_prompt(CLASSIFY_PROMPT_PATH)
prompt = template.format(
description=description or "",
detail_notes=detail_notes or "",
@@ -32,7 +28,7 @@ async def classify_issue(description: str, detail_notes: str = "") -> dict:
async def summarize_issue(
description: str, detail_notes: str = "", solution: str = ""
) -> dict:
template = _load_prompt(SUMMARIZE_PROMPT_PATH)
template = load_prompt(SUMMARIZE_PROMPT_PATH)
prompt = template.format(
description=description or "",
detail_notes=detail_notes or "",