Files
tk-factory-services/ai-service/services/classification_service.py
Hyungi Ahn 2f7e083db0 feat: AI 서비스 MLX 듀얼 백엔드 및 모델 최적화
- MLX(맥미니 27B) 우선 → Ollama(조립컴 9B) fallback 구조
- pydantic-settings 기반 config 전환
- health check에 MLX 상태 추가
- 텍스트 모델 qwen3:8b → qwen3.5:9b-q8_0 변경

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-06 23:17:50 +09:00

57 lines
1.7 KiB
Python

import json
from services.ollama_client import ollama_client
from services.utils import load_prompt, parse_json_response
from config import settings
CLASSIFY_PROMPT_PATH = "prompts/classify_issue.txt"
SUMMARIZE_PROMPT_PATH = "prompts/summarize_issue.txt"
async def classify_issue(description: str, detail_notes: str = "") -> dict:
template = load_prompt(CLASSIFY_PROMPT_PATH)
prompt = template.format(
description=description or "",
detail_notes=detail_notes or "",
)
raw = await ollama_client.generate_text(prompt)
try:
start = raw.find("{")
end = raw.rfind("}") + 1
if start >= 0 and end > start:
return json.loads(raw[start:end])
except json.JSONDecodeError:
pass
return {"raw_response": raw, "parse_error": True}
async def summarize_issue(
description: str, detail_notes: str = "", solution: str = ""
) -> dict:
template = load_prompt(SUMMARIZE_PROMPT_PATH)
prompt = template.format(
description=description or "",
detail_notes=detail_notes or "",
solution=solution or "",
)
raw = await ollama_client.generate_text(prompt)
try:
start = raw.find("{")
end = raw.rfind("}") + 1
if start >= 0 and end > start:
return json.loads(raw[start:end])
except json.JSONDecodeError:
pass
return {"summary": raw.strip()}
async def classify_and_summarize(
description: str, detail_notes: str = ""
) -> dict:
classification = await classify_issue(description, detail_notes)
summary_result = await summarize_issue(description, detail_notes)
return {
"classification": classification,
"summary": summary_result.get("summary", ""),
}