feat: RAG 임베딩 자동 동기화 + AI 서비스 개선

- 부적합 라이프사이클 전 과정에서 Qdrant 임베딩 자동 동기화
  - 관리함 5개 저장 함수 + 수신함 상태 변경 시 fire-and-forget sync
  - 30분 주기 전체 재동기화 안전망 (FastAPI lifespan 백그라운드 태스크)
  - build_document_text에 카테고리(final_category/category) 포함
- RAG 질의에 DB 통계 집계 지원 (카테고리별/부서별 건수)
- Qdrant client.search → query_points API 마이그레이션
- AI 어시스턴트 페이지 권한 추가 (tkuser)

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Hyungi Ahn
2026-03-12 13:05:32 +09:00
parent 65db787f92
commit 5b1b89254c
11 changed files with 154 additions and 8 deletions

View File

@@ -82,6 +82,38 @@ def get_daily_qc_stats(date_str: str) -> dict:
return dict(row._mapping) if row else {}
def get_category_stats() -> list[dict]:
"""카테고리별 부적합 건수 집계"""
with engine.connect() as conn:
result = conn.execute(
text(
"SELECT COALESCE(final_category, category) AS category, "
"COUNT(*) AS count "
"FROM qc_issues "
"GROUP BY COALESCE(final_category, category) "
"ORDER BY count DESC"
)
)
return [dict(row._mapping) for row in result]
def get_department_stats() -> list[dict]:
"""부서별 부적합 건수 집계"""
with engine.connect() as conn:
result = conn.execute(
text(
"SELECT responsible_department AS department, "
"COUNT(*) AS count "
"FROM qc_issues "
"WHERE responsible_department IS NOT NULL "
"AND responsible_department != '' "
"GROUP BY responsible_department "
"ORDER BY count DESC"
)
)
return [dict(row._mapping) for row in result]
def get_issues_for_date(date_str: str) -> list[dict]:
with engine.connect() as conn:
result = conn.execute(

View File

@@ -1,11 +1,18 @@
import logging
from services.ollama_client import ollama_client
from db.vector_store import vector_store
from db.metadata_store import metadata_store
from services.db_client import get_all_issues, get_issue_by_id, get_issues_since
logger = logging.getLogger(__name__)
def build_document_text(issue: dict) -> str:
parts = []
cat = issue.get("final_category") or issue.get("category")
if cat:
parts.append(f"분류: {cat}")
if issue.get("description"):
parts.append(issue["description"])
if issue.get("final_description"):
@@ -84,6 +91,7 @@ async def sync_all_issues() -> dict:
async def sync_single_issue(issue_id: int) -> dict:
logger.info(f"Sync single issue: {issue_id}")
issue = get_issue_by_id(issue_id)
if not issue:
return {"status": "not_found"}

View File

@@ -1,8 +1,54 @@
import logging
import time
from services.ollama_client import ollama_client
from services.embedding_service import search_similar_by_text, build_document_text
from services.db_client import get_issue_by_id
from services.db_client import get_issue_by_id, get_category_stats, get_department_stats
from services.utils import load_prompt
logger = logging.getLogger(__name__)
_stats_cache = {"data": "", "expires": 0}
STATS_CACHE_TTL = 300 # 5분
STATS_KEYWORDS = {"많이", "빈도", "추이", "비율", "통계", "몇 건", "자주", "빈번", "유형별", "부서별"}
def _needs_stats(question: str) -> bool:
"""키워드 매칭으로 통계성 질문인지 판별"""
return any(kw in question for kw in STATS_KEYWORDS)
def _build_stats_summary() -> str:
"""DB 집계 통계 요약 (5분 TTL 캐싱, 실패 시 빈 문자열)"""
now = time.time()
if _stats_cache["data"] and now < _stats_cache["expires"]:
return _stats_cache["data"]
try:
lines = ["[전체 통계 요약]"]
cats = get_category_stats()
if cats:
total = sum(c["count"] for c in cats)
lines.append(f"총 부적합 건수: {total}")
lines.append("카테고리별:")
for c in cats[:10]:
pct = round(c["count"] / total * 100, 1)
lines.append(f" - {c['category']}: {c['count']}건 ({pct}%)")
depts = get_department_stats()
if depts:
lines.append("부서별:")
for d in depts[:10]:
lines.append(f" - {d['department']}: {d['count']}")
if len(lines) <= 1:
return "" # 데이터 없으면 빈 문자열
result = "\n".join(lines)
_stats_cache["data"] = result
_stats_cache["expires"] = now + STATS_CACHE_TTL
return result
except Exception as e:
logger.warning(f"Stats summary failed: {e}")
return ""
def _format_retrieved_issues(results: list[dict]) -> str:
if not results:
@@ -81,11 +127,16 @@ async def rag_ask(question: str, project_id: int = None) -> dict:
results = await search_similar_by_text(
question, n_results=7, filters=None
)
logger.info(f"RAG ask: question='{question[:50]}', results={len(results)}")
context = _format_retrieved_issues(results)
# 통계성 질문일 때만 DB 집계 포함 (토큰 절약)
stats = _build_stats_summary() if _needs_stats(question) else ""
template = load_prompt("prompts/rag_qa.txt")
prompt = template.format(
question=question,
stats_summary=stats,
retrieved_cases=context,
)