From b3012b83208bfe94935a42062824675a5e63a29b Mon Sep 17 00:00:00 2001 From: Hyungi Ahn Date: Fri, 6 Mar 2026 09:38:30 +0900 Subject: [PATCH] =?UTF-8?q?feat:=20AI=20=EC=84=9C=EB=B9=84=EC=8A=A4=20?= =?UTF-8?q?=EB=B0=8F=20AI=20=EC=96=B4=EC=8B=9C=EC=8A=A4=ED=84=B4=ED=8A=B8?= =?UTF-8?q?=20=EC=A0=84=EC=9A=A9=20=ED=8E=98=EC=9D=B4=EC=A7=80=20=EC=B6=94?= =?UTF-8?q?=EA=B0=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - ai-service: Ollama 기반 AI 서비스 (분류, 시맨틱 검색, RAG Q&A, 패턴 분석) - AI 어시스턴트 페이지: 채팅형 Q&A, 시맨틱 검색, 패턴 분석, 분류 테스트 - 권한 시스템에 ai_assistant 페이지 등록 (기본 비활성) - 기존 페이지에 AI 기능 통합 (대시보드, 수신함, 관리함) - docker-compose, gateway, nginx 설정 업데이트 Co-Authored-By: Claude Opus 4.6 --- .env.example | 8 + ai-service/Dockerfile | 9 + ai-service/config.py | 24 + ai-service/db/__init__.py | 0 ai-service/db/metadata_store.py | 39 ++ ai-service/db/vector_store.py | 76 +++ ai-service/main.py | 41 ++ ai-service/prompts/classify_issue.txt | 18 + ai-service/prompts/daily_report.txt | 22 + ai-service/prompts/rag_classify.txt | 23 + ai-service/prompts/rag_pattern.txt | 16 + ai-service/prompts/rag_qa.txt | 14 + ai-service/prompts/rag_suggest_solution.txt | 18 + ai-service/prompts/summarize_issue.txt | 17 + ai-service/requirements.txt | 10 + ai-service/routers/__init__.py | 0 ai-service/routers/classification.py | 47 ++ ai-service/routers/daily_report.py | 33 + ai-service/routers/embeddings.py | 77 +++ ai-service/routers/health.py | 21 + ai-service/routers/rag.py | 57 ++ ai-service/services/__init__.py | 0 ai-service/services/classification_service.py | 60 ++ ai-service/services/db_client.py | 97 +++ ai-service/services/embedding_service.py | 144 +++++ ai-service/services/ollama_client.py | 57 ++ ai-service/services/rag_service.py | 164 +++++ ai-service/services/report_service.py | 122 ++++ docker-compose.yml | 35 ++ gateway/nginx.conf | 12 + .../api/routers/page_permissions.py | 1 + system3-nonconformance/web/ai-assistant.html | 284 +++++++++ .../web/issues-dashboard.html | 83 ++- system3-nonconformance/web/issues-inbox.html | 37 +- .../web/issues-management.html | 55 +- system3-nonconformance/web/nginx.conf | 12 + .../web/static/css/ai-assistant.css | 162 +++++ system3-nonconformance/web/static/js/api.js | 155 +++++ .../web/static/js/components/common-header.js | 9 + .../web/static/js/core/permissions.js | 3 +- .../web/static/js/pages/ai-assistant.js | 584 ++++++++++++++++++ .../web/static/js/pages/issues-dashboard.js | 132 +++- .../web/static/js/pages/issues-inbox.js | 89 ++- .../web/static/js/pages/issues-management.js | 100 ++- 44 files changed, 2914 insertions(+), 53 deletions(-) create mode 100644 ai-service/Dockerfile create mode 100644 ai-service/config.py create mode 100644 ai-service/db/__init__.py create mode 100644 ai-service/db/metadata_store.py create mode 100644 ai-service/db/vector_store.py create mode 100644 ai-service/main.py create mode 100644 ai-service/prompts/classify_issue.txt create mode 100644 ai-service/prompts/daily_report.txt create mode 100644 ai-service/prompts/rag_classify.txt create mode 100644 ai-service/prompts/rag_pattern.txt create mode 100644 ai-service/prompts/rag_qa.txt create mode 100644 ai-service/prompts/rag_suggest_solution.txt create mode 100644 ai-service/prompts/summarize_issue.txt create mode 100644 ai-service/requirements.txt create mode 100644 ai-service/routers/__init__.py create mode 100644 ai-service/routers/classification.py create mode 100644 ai-service/routers/daily_report.py create mode 100644 ai-service/routers/embeddings.py create mode 100644 ai-service/routers/health.py create mode 100644 ai-service/routers/rag.py create mode 100644 ai-service/services/__init__.py create mode 100644 ai-service/services/classification_service.py create mode 100644 ai-service/services/db_client.py create mode 100644 ai-service/services/embedding_service.py create mode 100644 ai-service/services/ollama_client.py create mode 100644 ai-service/services/rag_service.py create mode 100644 ai-service/services/report_service.py create mode 100644 system3-nonconformance/web/ai-assistant.html create mode 100644 system3-nonconformance/web/static/css/ai-assistant.css create mode 100644 system3-nonconformance/web/static/js/pages/ai-assistant.js diff --git a/.env.example b/.env.example index 1509262..40f26cb 100644 --- a/.env.example +++ b/.env.example @@ -84,6 +84,14 @@ PMA_USER=root PMA_PASSWORD=change_this_root_password_min_12_chars UPLOAD_LIMIT=50M +# ------------------------------------------------------------------- +# AI Service +# ------------------------------------------------------------------- +OLLAMA_BASE_URL=http://your-ollama-server:11434 +OLLAMA_TEXT_MODEL=qwen2.5:14b-instruct-q4_K_M +OLLAMA_EMBED_MODEL=bge-m3 +OLLAMA_TIMEOUT=120 + # ------------------------------------------------------------------- # Cloudflare Tunnel # ------------------------------------------------------------------- diff --git a/ai-service/Dockerfile b/ai-service/Dockerfile new file mode 100644 index 0000000..0e51481 --- /dev/null +++ b/ai-service/Dockerfile @@ -0,0 +1,9 @@ +FROM python:3.11-slim +WORKDIR /app +RUN apt-get update && apt-get install -y gcc build-essential && rm -rf /var/lib/apt/lists/* +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt +COPY . . +RUN mkdir -p /app/data/chroma +EXPOSE 8000 +CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"] diff --git a/ai-service/config.py b/ai-service/config.py new file mode 100644 index 0000000..2670d70 --- /dev/null +++ b/ai-service/config.py @@ -0,0 +1,24 @@ +import os + + +class Settings: + OLLAMA_BASE_URL: str = os.getenv("OLLAMA_BASE_URL", "http://100.111.160.84:11434") + OLLAMA_TEXT_MODEL: str = os.getenv("OLLAMA_TEXT_MODEL", "qwen2.5:14b-instruct-q4_K_M") + OLLAMA_EMBED_MODEL: str = os.getenv("OLLAMA_EMBED_MODEL", "bge-m3") + OLLAMA_TIMEOUT: int = int(os.getenv("OLLAMA_TIMEOUT", "120")) + + DB_HOST: str = os.getenv("DB_HOST", "mariadb") + DB_PORT: int = int(os.getenv("DB_PORT", "3306")) + DB_USER: str = os.getenv("DB_USER", "hyungi_user") + DB_PASSWORD: str = os.getenv("DB_PASSWORD", "") + DB_NAME: str = os.getenv("DB_NAME", "hyungi") + + SECRET_KEY: str = os.getenv("SECRET_KEY", "") + ALGORITHM: str = "HS256" + + SYSTEM1_API_URL: str = os.getenv("SYSTEM1_API_URL", "http://system1-api:3005") + CHROMA_PERSIST_DIR: str = os.getenv("CHROMA_PERSIST_DIR", "/app/data/chroma") + METADATA_DB_PATH: str = os.getenv("METADATA_DB_PATH", "/app/data/metadata.db") + + +settings = Settings() diff --git a/ai-service/db/__init__.py b/ai-service/db/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ai-service/db/metadata_store.py b/ai-service/db/metadata_store.py new file mode 100644 index 0000000..67538b0 --- /dev/null +++ b/ai-service/db/metadata_store.py @@ -0,0 +1,39 @@ +import sqlite3 +from config import settings + + +class MetadataStore: + def __init__(self): + self.db_path = settings.METADATA_DB_PATH + + def initialize(self): + conn = sqlite3.connect(self.db_path) + conn.execute( + "CREATE TABLE IF NOT EXISTS sync_state (" + " key TEXT PRIMARY KEY," + " value TEXT" + ")" + ) + conn.commit() + conn.close() + + def get_last_synced_id(self) -> int: + conn = sqlite3.connect(self.db_path) + cur = conn.execute( + "SELECT value FROM sync_state WHERE key = 'last_synced_id'" + ) + row = cur.fetchone() + conn.close() + return int(row[0]) if row else 0 + + def set_last_synced_id(self, issue_id: int): + conn = sqlite3.connect(self.db_path) + conn.execute( + "INSERT OR REPLACE INTO sync_state (key, value) VALUES ('last_synced_id', ?)", + (str(issue_id),), + ) + conn.commit() + conn.close() + + +metadata_store = MetadataStore() diff --git a/ai-service/db/vector_store.py b/ai-service/db/vector_store.py new file mode 100644 index 0000000..23868f3 --- /dev/null +++ b/ai-service/db/vector_store.py @@ -0,0 +1,76 @@ +import chromadb +from config import settings + + +class VectorStore: + def __init__(self): + self.client = None + self.collection = None + + def initialize(self): + self.client = chromadb.PersistentClient(path=settings.CHROMA_PERSIST_DIR) + self.collection = self.client.get_or_create_collection( + name="qc_issues", + metadata={"hnsw:space": "cosine"}, + ) + + def upsert( + self, + doc_id: str, + document: str, + embedding: list[float], + metadata: dict = None, + ): + self.collection.upsert( + ids=[doc_id], + documents=[document], + embeddings=[embedding], + metadatas=[metadata] if metadata else None, + ) + + def query( + self, + embedding: list[float], + n_results: int = 5, + where: dict = None, + ) -> list[dict]: + kwargs = { + "query_embeddings": [embedding], + "n_results": n_results, + "include": ["documents", "metadatas", "distances"], + } + if where: + kwargs["where"] = where + try: + results = self.collection.query(**kwargs) + except Exception: + return [] + + items = [] + if results and results["ids"] and results["ids"][0]: + for i, doc_id in enumerate(results["ids"][0]): + item = { + "id": doc_id, + "document": results["documents"][0][i] if results["documents"] else "", + "distance": results["distances"][0][i] if results["distances"] else 0, + "metadata": results["metadatas"][0][i] if results["metadatas"] else {}, + } + # cosine distance → similarity + item["similarity"] = round(1 - item["distance"], 4) + items.append(item) + return items + + def delete(self, doc_id: str): + self.collection.delete(ids=[doc_id]) + + def count(self) -> int: + return self.collection.count() + + def stats(self) -> dict: + return { + "total_documents": self.count(), + "collection_name": "qc_issues", + } + + +vector_store = VectorStore() diff --git a/ai-service/main.py b/ai-service/main.py new file mode 100644 index 0000000..556729a --- /dev/null +++ b/ai-service/main.py @@ -0,0 +1,41 @@ +from contextlib import asynccontextmanager +from fastapi import FastAPI +from fastapi.middleware.cors import CORSMiddleware + +from routers import health, embeddings, classification, daily_report, rag +from db.vector_store import vector_store +from db.metadata_store import metadata_store + + +@asynccontextmanager +async def lifespan(app: FastAPI): + vector_store.initialize() + metadata_store.initialize() + yield + + +app = FastAPI( + title="TK AI Service", + description="AI 서비스 (유사 검색, 분류, 보고서)", + version="1.0.0", + lifespan=lifespan, +) + +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=False, + allow_methods=["*"], + allow_headers=["*"], +) + +app.include_router(health.router, prefix="/api/ai") +app.include_router(embeddings.router, prefix="/api/ai") +app.include_router(classification.router, prefix="/api/ai") +app.include_router(daily_report.router, prefix="/api/ai") +app.include_router(rag.router, prefix="/api/ai") + + +@app.get("/") +async def root(): + return {"message": "TK AI Service", "version": "1.0.0"} diff --git a/ai-service/prompts/classify_issue.txt b/ai-service/prompts/classify_issue.txt new file mode 100644 index 0000000..ab32068 --- /dev/null +++ b/ai-service/prompts/classify_issue.txt @@ -0,0 +1,18 @@ +당신은 공장 품질관리(QC) 전문가입니다. 아래 부적합 신고 내용을 분석하여 판별하세요. + +부적합 내용: +{description} + +상세 내용: +{detail_notes} + +다음 JSON 형식으로만 응답하세요: +{{ + "category": "material_missing|design_error|incoming_defect|inspection_miss|기타", + "category_confidence": 0.0~1.0, + "responsible_department": "production|quality|purchasing|design|sales", + "department_confidence": 0.0~1.0, + "severity": "low|medium|high|critical", + "summary": "한줄 요약 (30자 이내)", + "reasoning": "판단 근거 (2-3문장)" +}} \ No newline at end of file diff --git a/ai-service/prompts/daily_report.txt b/ai-service/prompts/daily_report.txt new file mode 100644 index 0000000..dc1152d --- /dev/null +++ b/ai-service/prompts/daily_report.txt @@ -0,0 +1,22 @@ +당신은 공장 관리 보고서 작성자입니다. 아래 데이터를 바탕으로 일일 브리핑을 작성하세요. + +날짜: {date} + +[근태 현황] +{attendance_data} + +[작업 현황] +{work_report_data} + +[부적합 현황] +{qc_issue_data} + +[순회점검 현황] +{patrol_data} + +다음 형식으로 작성하세요: + +1. 오늘의 요약 (2-3문장) +2. 주요 이슈 및 관심사항 +3. 부적합 현황 (신규/진행/지연) +4. 내일 주의사항 \ No newline at end of file diff --git a/ai-service/prompts/rag_classify.txt b/ai-service/prompts/rag_classify.txt new file mode 100644 index 0000000..646a398 --- /dev/null +++ b/ai-service/prompts/rag_classify.txt @@ -0,0 +1,23 @@ +당신은 공장 품질관리(QC) 전문가입니다. 아래 부적합 신고를 분류하세요. + +[신고 내용] +{description} + +[상세 내용] +{detail_notes} + +[참고: 과거 유사 사례] +{retrieved_cases} + +위 과거 사례의 분류 패턴을 참고하여, 현재 부적합을 판별하세요. + +다음 JSON 형식으로만 응답하세요: +{{ + "category": "material_missing|design_error|incoming_defect|inspection_miss|기타", + "category_confidence": 0.0~1.0, + "responsible_department": "production|quality|purchasing|design|sales", + "department_confidence": 0.0~1.0, + "severity": "low|medium|high|critical", + "summary": "한줄 요약 (30자 이내)", + "reasoning": "판단 근거 — 과거 사례 참고 내용 포함 (2-3문장)" +}} \ No newline at end of file diff --git a/ai-service/prompts/rag_pattern.txt b/ai-service/prompts/rag_pattern.txt new file mode 100644 index 0000000..4305c52 --- /dev/null +++ b/ai-service/prompts/rag_pattern.txt @@ -0,0 +1,16 @@ +당신은 공장 품질관리(QC) 데이터 분석가입니다. 아래 부적합에 대해 패턴을 분석하세요. + +[분석 대상] +{description} + +[유사 부적합 {total_similar}건] +{retrieved_cases} + +다음을 분석하세요: + +1. **반복 여부**: 이 문제가 과거에도 발생했는지, 반복 빈도는 어느 정도인지 +2. **공통 패턴**: 유사 사례들의 공통 원인, 공통 부서, 공통 시기 등 +3. **근본 원인 추정**: 반복되는 원인이 있다면 근본 원인은 무엇인지 +4. **개선 제안**: 재발 방지를 위한 구조적 개선 방안 + +데이터 기반으로 객관적으로 분석하세요. \ No newline at end of file diff --git a/ai-service/prompts/rag_qa.txt b/ai-service/prompts/rag_qa.txt new file mode 100644 index 0000000..a84dcd5 --- /dev/null +++ b/ai-service/prompts/rag_qa.txt @@ -0,0 +1,14 @@ +당신은 공장 품질관리(QC) 데이터 분석가입니다. 아래 질문에 대해 과거 부적합 데이터를 기반으로 답변하세요. + +[질문] +{question} + +[관련 부적합 데이터] +{retrieved_cases} + +위 데이터를 근거로 질문에 답변하세요. +- 제공된 데이터를 적극적으로 활용하여 답변하세요 +- 관련 사례를 구체적으로 인용하며 분석하세요 +- 패턴이나 공통점이 있다면 정리하세요 +- 숫자나 통계가 있다면 포함하세요 +- 간결하되 유용한 답변을 하세요 \ No newline at end of file diff --git a/ai-service/prompts/rag_suggest_solution.txt b/ai-service/prompts/rag_suggest_solution.txt new file mode 100644 index 0000000..6bf1fd5 --- /dev/null +++ b/ai-service/prompts/rag_suggest_solution.txt @@ -0,0 +1,18 @@ +당신은 공장 품질관리(QC) 전문가입니다. 아래 부적합 이슈에 대한 해결방안을 제안하세요. + +[현재 부적합] +분류: {category} +내용: {description} +상세: {detail_notes} + +[과거 유사 사례] +{retrieved_cases} + +위 과거 사례들을 참고하여 다음을 제안하세요: + +1. **권장 해결방안**: 과거 유사 사례에서 효과적이었던 해결 방법을 기반으로 구체적인 조치를 제안 +2. **예상 원인**: 유사 사례에서 확인된 원인 패턴을 바탕으로 가능한 원인 분석 +3. **담당 부서**: 어느 부서에서 처리해야 하는지 +4. **주의사항**: 과거 사례에서 배운 교훈이나 주의할 점 + +간결하고 실용적으로 작성하세요. 과거 사례가 없는 부분은 일반적인 QC 지식으로 보완하세요. \ No newline at end of file diff --git a/ai-service/prompts/summarize_issue.txt b/ai-service/prompts/summarize_issue.txt new file mode 100644 index 0000000..943b72b --- /dev/null +++ b/ai-service/prompts/summarize_issue.txt @@ -0,0 +1,17 @@ +당신은 공장 품질관리(QC) 전문가입니다. 아래 부적합 이슈를 간결하게 요약하세요. + +부적합 내용: +{description} + +상세 내용: +{detail_notes} + +해결 방법: +{solution} + +다음 JSON 형식으로만 응답하세요: +{{ + "summary": "핵심 요약 (50자 이내)", + "key_points": ["요점1", "요점2", "요점3"], + "suggested_action": "권장 조치사항 (선택)" +}} \ No newline at end of file diff --git a/ai-service/requirements.txt b/ai-service/requirements.txt new file mode 100644 index 0000000..e54eaab --- /dev/null +++ b/ai-service/requirements.txt @@ -0,0 +1,10 @@ +fastapi==0.104.1 +uvicorn[standard]==0.24.0 +httpx==0.27.0 +chromadb==0.4.22 +numpy==1.26.2 +pydantic==2.5.0 +pydantic-settings==2.1.0 +python-jose[cryptography]==3.3.0 +pymysql==1.1.0 +sqlalchemy==2.0.23 diff --git a/ai-service/routers/__init__.py b/ai-service/routers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ai-service/routers/classification.py b/ai-service/routers/classification.py new file mode 100644 index 0000000..58e811c --- /dev/null +++ b/ai-service/routers/classification.py @@ -0,0 +1,47 @@ +from fastapi import APIRouter +from pydantic import BaseModel +from services.classification_service import ( + classify_issue, + summarize_issue, + classify_and_summarize, +) + +router = APIRouter(tags=["classification"]) + + +class ClassifyRequest(BaseModel): + description: str + detail_notes: str = "" + + +class SummarizeRequest(BaseModel): + description: str + detail_notes: str = "" + solution: str = "" + + +@router.post("/classify") +async def classify(req: ClassifyRequest): + try: + result = await classify_issue(req.description, req.detail_notes) + return {"available": True, **result} + except Exception as e: + return {"available": False, "error": str(e)} + + +@router.post("/summarize") +async def summarize(req: SummarizeRequest): + try: + result = await summarize_issue(req.description, req.detail_notes, req.solution) + return {"available": True, **result} + except Exception as e: + return {"available": False, "error": str(e)} + + +@router.post("/classify-and-summarize") +async def classify_and_summarize_endpoint(req: ClassifyRequest): + try: + result = await classify_and_summarize(req.description, req.detail_notes) + return {"available": True, **result} + except Exception as e: + return {"available": False, "error": str(e)} diff --git a/ai-service/routers/daily_report.py b/ai-service/routers/daily_report.py new file mode 100644 index 0000000..25d6b73 --- /dev/null +++ b/ai-service/routers/daily_report.py @@ -0,0 +1,33 @@ +from fastapi import APIRouter, Request +from pydantic import BaseModel +from services.report_service import generate_daily_report +from datetime import date + +router = APIRouter(tags=["daily_report"]) + + +class DailyReportRequest(BaseModel): + date: str | None = None + project_id: int | None = None + + +@router.post("/report/daily") +async def daily_report(req: DailyReportRequest, request: Request): + report_date = req.date or date.today().isoformat() + token = request.headers.get("authorization", "").replace("Bearer ", "") + try: + result = await generate_daily_report(report_date, req.project_id, token) + return {"available": True, **result} + except Exception as e: + return {"available": False, "error": str(e)} + + +@router.post("/report/preview") +async def report_preview(req: DailyReportRequest, request: Request): + report_date = req.date or date.today().isoformat() + token = request.headers.get("authorization", "").replace("Bearer ", "") + try: + result = await generate_daily_report(report_date, req.project_id, token) + return {"available": True, "preview": True, **result} + except Exception as e: + return {"available": False, "error": str(e)} diff --git a/ai-service/routers/embeddings.py b/ai-service/routers/embeddings.py new file mode 100644 index 0000000..7abfa41 --- /dev/null +++ b/ai-service/routers/embeddings.py @@ -0,0 +1,77 @@ +from fastapi import APIRouter, BackgroundTasks, Query +from pydantic import BaseModel +from services.embedding_service import ( + sync_all_issues, + sync_single_issue, + sync_incremental, + search_similar_by_id, + search_similar_by_text, +) +from db.vector_store import vector_store + +router = APIRouter(tags=["embeddings"]) + + +class SyncSingleRequest(BaseModel): + issue_id: int + + +class SearchRequest(BaseModel): + query: str + n_results: int = 5 + project_id: int | None = None + category: str | None = None + + +@router.post("/embeddings/sync") +async def sync_embeddings(background_tasks: BackgroundTasks): + background_tasks.add_task(sync_all_issues) + return {"status": "sync_started", "message": "전체 임베딩 동기화가 시작되었습니다"} + + +@router.post("/embeddings/sync-full") +async def sync_embeddings_full(): + result = await sync_all_issues() + return {"status": "completed", **result} + + +@router.post("/embeddings/sync-single") +async def sync_single(req: SyncSingleRequest): + result = await sync_single_issue(req.issue_id) + return result + + +@router.post("/embeddings/sync-incremental") +async def sync_incr(): + result = await sync_incremental() + return result + + +@router.get("/similar/{issue_id}") +async def get_similar(issue_id: int, n_results: int = Query(default=5, le=20)): + try: + results = await search_similar_by_id(issue_id, n_results) + return {"available": True, "results": results, "query_issue_id": issue_id} + except Exception as e: + return {"available": False, "results": [], "error": str(e)} + + +@router.post("/similar/search") +async def search_similar(req: SearchRequest): + filters = {} + if req.project_id is not None: + filters["project_id"] = str(req.project_id) + if req.category: + filters["category"] = req.category + try: + results = await search_similar_by_text( + req.query, req.n_results, filters or None + ) + return {"available": True, "results": results} + except Exception as e: + return {"available": False, "results": [], "error": str(e)} + + +@router.get("/embeddings/stats") +async def embedding_stats(): + return vector_store.stats() diff --git a/ai-service/routers/health.py b/ai-service/routers/health.py new file mode 100644 index 0000000..99b4201 --- /dev/null +++ b/ai-service/routers/health.py @@ -0,0 +1,21 @@ +from fastapi import APIRouter +from services.ollama_client import ollama_client +from db.vector_store import vector_store + +router = APIRouter(tags=["health"]) + + +@router.get("/health") +async def health_check(): + ollama_status = await ollama_client.check_health() + return { + "status": "ok", + "service": "tk-ai-service", + "ollama": ollama_status, + "embeddings": vector_store.stats(), + } + + +@router.get("/models") +async def list_models(): + return await ollama_client.check_health() diff --git a/ai-service/routers/rag.py b/ai-service/routers/rag.py new file mode 100644 index 0000000..2857948 --- /dev/null +++ b/ai-service/routers/rag.py @@ -0,0 +1,57 @@ +from fastapi import APIRouter +from pydantic import BaseModel +from services.rag_service import ( + rag_suggest_solution, + rag_ask, + rag_analyze_pattern, + rag_classify_with_context, +) + +router = APIRouter(tags=["rag"]) + + +class AskRequest(BaseModel): + question: str + project_id: int | None = None + + +class PatternRequest(BaseModel): + description: str + n_results: int = 10 + + +class ClassifyRequest(BaseModel): + description: str + detail_notes: str = "" + + +@router.post("/rag/suggest-solution/{issue_id}") +async def suggest_solution(issue_id: int): + try: + return await rag_suggest_solution(issue_id) + except Exception as e: + return {"available": False, "error": str(e)} + + +@router.post("/rag/ask") +async def ask_question(req: AskRequest): + try: + return await rag_ask(req.question, req.project_id) + except Exception as e: + return {"available": False, "error": str(e)} + + +@router.post("/rag/pattern") +async def analyze_pattern(req: PatternRequest): + try: + return await rag_analyze_pattern(req.description, req.n_results) + except Exception as e: + return {"available": False, "error": str(e)} + + +@router.post("/rag/classify") +async def classify_with_rag(req: ClassifyRequest): + try: + return await rag_classify_with_context(req.description, req.detail_notes) + except Exception as e: + return {"available": False, "error": str(e)} diff --git a/ai-service/services/__init__.py b/ai-service/services/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ai-service/services/classification_service.py b/ai-service/services/classification_service.py new file mode 100644 index 0000000..56a0823 --- /dev/null +++ b/ai-service/services/classification_service.py @@ -0,0 +1,60 @@ +import json +from services.ollama_client import ollama_client +from config import settings + + +CLASSIFY_PROMPT_PATH = "prompts/classify_issue.txt" +SUMMARIZE_PROMPT_PATH = "prompts/summarize_issue.txt" + + +def _load_prompt(path: str) -> str: + with open(path, "r", encoding="utf-8") as f: + return f.read() + + +async def classify_issue(description: str, detail_notes: str = "") -> dict: + template = _load_prompt(CLASSIFY_PROMPT_PATH) + prompt = template.format( + description=description or "", + detail_notes=detail_notes or "", + ) + raw = await ollama_client.generate_text(prompt) + try: + start = raw.find("{") + end = raw.rfind("}") + 1 + if start >= 0 and end > start: + return json.loads(raw[start:end]) + except json.JSONDecodeError: + pass + return {"raw_response": raw, "parse_error": True} + + +async def summarize_issue( + description: str, detail_notes: str = "", solution: str = "" +) -> dict: + template = _load_prompt(SUMMARIZE_PROMPT_PATH) + prompt = template.format( + description=description or "", + detail_notes=detail_notes or "", + solution=solution or "", + ) + raw = await ollama_client.generate_text(prompt) + try: + start = raw.find("{") + end = raw.rfind("}") + 1 + if start >= 0 and end > start: + return json.loads(raw[start:end]) + except json.JSONDecodeError: + pass + return {"summary": raw.strip()} + + +async def classify_and_summarize( + description: str, detail_notes: str = "" +) -> dict: + classification = await classify_issue(description, detail_notes) + summary_result = await summarize_issue(description, detail_notes) + return { + "classification": classification, + "summary": summary_result.get("summary", ""), + } diff --git a/ai-service/services/db_client.py b/ai-service/services/db_client.py new file mode 100644 index 0000000..495fa59 --- /dev/null +++ b/ai-service/services/db_client.py @@ -0,0 +1,97 @@ +from urllib.parse import quote_plus + +from sqlalchemy import create_engine, text +from config import settings + + +def get_engine(): + password = quote_plus(settings.DB_PASSWORD) + url = ( + f"mysql+pymysql://{settings.DB_USER}:{password}" + f"@{settings.DB_HOST}:{settings.DB_PORT}/{settings.DB_NAME}" + ) + return create_engine(url, pool_pre_ping=True, pool_size=5) + + +engine = get_engine() + + +def get_all_issues() -> list[dict]: + with engine.connect() as conn: + result = conn.execute( + text( + "SELECT id, category, description, detail_notes, " + "final_description, final_category, solution, " + "management_comment, cause_detail, project_id, " + "review_status, report_date, responsible_department, " + "location_info " + "FROM qc_issues ORDER BY id" + ) + ) + return [dict(row._mapping) for row in result] + + +def get_issue_by_id(issue_id: int) -> dict | None: + with engine.connect() as conn: + result = conn.execute( + text( + "SELECT id, category, description, detail_notes, " + "final_description, final_category, solution, " + "management_comment, cause_detail, project_id, " + "review_status, report_date, responsible_department, " + "location_info " + "FROM qc_issues WHERE id = :id" + ), + {"id": issue_id}, + ) + row = result.fetchone() + return dict(row._mapping) if row else None + + +def get_issues_since(last_id: int) -> list[dict]: + with engine.connect() as conn: + result = conn.execute( + text( + "SELECT id, category, description, detail_notes, " + "final_description, final_category, solution, " + "management_comment, cause_detail, project_id, " + "review_status, report_date, responsible_department, " + "location_info " + "FROM qc_issues WHERE id > :last_id ORDER BY id" + ), + {"last_id": last_id}, + ) + return [dict(row._mapping) for row in result] + + +def get_daily_qc_stats(date_str: str) -> dict: + with engine.connect() as conn: + result = conn.execute( + text( + "SELECT " + " COUNT(*) as total, " + " SUM(CASE WHEN DATE(report_date) = :d THEN 1 ELSE 0 END) as new_today, " + " SUM(CASE WHEN review_status = 'in_progress' THEN 1 ELSE 0 END) as in_progress, " + " SUM(CASE WHEN review_status = 'completed' THEN 1 ELSE 0 END) as completed, " + " SUM(CASE WHEN review_status = 'pending_review' THEN 1 ELSE 0 END) as pending " + "FROM qc_issues" + ), + {"d": date_str}, + ) + row = result.fetchone() + return dict(row._mapping) if row else {} + + +def get_issues_for_date(date_str: str) -> list[dict]: + with engine.connect() as conn: + result = conn.execute( + text( + "SELECT id, category, description, detail_notes, " + "review_status, responsible_department, solution " + "FROM qc_issues " + "WHERE DATE(report_date) = :d " + "ORDER BY id" + ), + {"d": date_str}, + ) + return [dict(row._mapping) for row in result] diff --git a/ai-service/services/embedding_service.py b/ai-service/services/embedding_service.py new file mode 100644 index 0000000..c5a7602 --- /dev/null +++ b/ai-service/services/embedding_service.py @@ -0,0 +1,144 @@ +from services.ollama_client import ollama_client +from db.vector_store import vector_store +from db.metadata_store import metadata_store +from services.db_client import get_all_issues, get_issue_by_id, get_issues_since + + +def build_document_text(issue: dict) -> str: + parts = [] + if issue.get("description"): + parts.append(issue["description"]) + if issue.get("final_description"): + parts.append(issue["final_description"]) + if issue.get("detail_notes"): + parts.append(issue["detail_notes"]) + if issue.get("solution"): + parts.append(f"해결: {issue['solution']}") + if issue.get("management_comment"): + parts.append(f"의견: {issue['management_comment']}") + if issue.get("cause_detail"): + parts.append(f"원인: {issue['cause_detail']}") + return " ".join(parts) + + +def build_metadata(issue: dict) -> dict: + meta = {"issue_id": issue["id"]} + for key in [ + "category", "project_id", "review_status", + "responsible_department", "location_info", + ]: + val = issue.get(key) + if val is not None: + meta[key] = str(val) + rd = issue.get("report_date") + if rd: + meta["report_date"] = str(rd)[:10] + meta["has_solution"] = "true" if issue.get("solution") else "false" + return meta + + +async def sync_all_issues() -> dict: + issues = get_all_issues() + synced = 0 + skipped = 0 + for issue in issues: + doc_text = build_document_text(issue) + if not doc_text.strip(): + skipped += 1 + continue + try: + embedding = await ollama_client.generate_embedding(doc_text) + vector_store.upsert( + doc_id=f"issue_{issue['id']}", + document=doc_text, + embedding=embedding, + metadata=build_metadata(issue), + ) + synced += 1 + except Exception as e: + skipped += 1 + if issues: + max_id = max(i["id"] for i in issues) + metadata_store.set_last_synced_id(max_id) + return {"synced": synced, "skipped": skipped, "total": len(issues)} + + +async def sync_single_issue(issue_id: int) -> dict: + issue = get_issue_by_id(issue_id) + if not issue: + return {"status": "not_found"} + doc_text = build_document_text(issue) + if not doc_text.strip(): + return {"status": "empty_text"} + embedding = await ollama_client.generate_embedding(doc_text) + vector_store.upsert( + doc_id=f"issue_{issue['id']}", + document=doc_text, + embedding=embedding, + metadata=build_metadata(issue), + ) + return {"status": "synced", "issue_id": issue_id} + + +async def sync_incremental() -> dict: + last_id = metadata_store.get_last_synced_id() + issues = get_issues_since(last_id) + synced = 0 + for issue in issues: + doc_text = build_document_text(issue) + if not doc_text.strip(): + continue + try: + embedding = await ollama_client.generate_embedding(doc_text) + vector_store.upsert( + doc_id=f"issue_{issue['id']}", + document=doc_text, + embedding=embedding, + metadata=build_metadata(issue), + ) + synced += 1 + except Exception: + pass + if issues: + max_id = max(i["id"] for i in issues) + metadata_store.set_last_synced_id(max_id) + return {"synced": synced, "new_issues": len(issues)} + + +async def search_similar_by_id(issue_id: int, n_results: int = 5) -> list[dict]: + issue = get_issue_by_id(issue_id) + if not issue: + return [] + doc_text = build_document_text(issue) + if not doc_text.strip(): + return [] + embedding = await ollama_client.generate_embedding(doc_text) + results = vector_store.query( + embedding=embedding, + n_results=n_results + 1, + ) + # exclude self + filtered = [] + for r in results: + if r["id"] != f"issue_{issue_id}": + filtered.append(r) + return filtered[:n_results] + + +async def search_similar_by_text(query: str, n_results: int = 5, filters: dict = None) -> list[dict]: + embedding = await ollama_client.generate_embedding(query) + where = None + if filters: + conditions = [] + for k, v in filters.items(): + if v is not None: + conditions.append({k: str(v)}) + if len(conditions) == 1: + where = conditions[0] + elif len(conditions) > 1: + where = {"$and": conditions} + return vector_store.query( + embedding=embedding, + n_results=n_results, + where=where, + ) diff --git a/ai-service/services/ollama_client.py b/ai-service/services/ollama_client.py new file mode 100644 index 0000000..0ffa6b5 --- /dev/null +++ b/ai-service/services/ollama_client.py @@ -0,0 +1,57 @@ +import httpx +from config import settings + + +class OllamaClient: + def __init__(self): + self.base_url = settings.OLLAMA_BASE_URL + self.timeout = httpx.Timeout(float(settings.OLLAMA_TIMEOUT), connect=10.0) + + async def generate_embedding(self, text: str) -> list[float]: + async with httpx.AsyncClient(timeout=self.timeout) as client: + response = await client.post( + f"{self.base_url}/api/embeddings", + json={"model": settings.OLLAMA_EMBED_MODEL, "prompt": text}, + ) + response.raise_for_status() + return response.json()["embedding"] + + async def batch_embeddings(self, texts: list[str]) -> list[list[float]]: + results = [] + for text in texts: + emb = await self.generate_embedding(text) + results.append(emb) + return results + + async def generate_text(self, prompt: str, system: str = None) -> str: + messages = [] + if system: + messages.append({"role": "system", "content": system}) + messages.append({"role": "user", "content": prompt}) + async with httpx.AsyncClient(timeout=self.timeout) as client: + response = await client.post( + f"{self.base_url}/api/chat", + json={ + "model": settings.OLLAMA_TEXT_MODEL, + "messages": messages, + "stream": False, + "options": {"temperature": 0.3, "num_predict": 2048}, + }, + ) + response.raise_for_status() + return response.json()["message"]["content"] + + async def check_health(self) -> dict: + try: + async with httpx.AsyncClient(timeout=httpx.Timeout(5.0)) as client: + response = await client.get(f"{self.base_url}/api/tags") + models = response.json().get("models", []) + return { + "status": "connected", + "models": [m["name"] for m in models], + } + except Exception: + return {"status": "disconnected"} + + +ollama_client = OllamaClient() diff --git a/ai-service/services/rag_service.py b/ai-service/services/rag_service.py new file mode 100644 index 0000000..fa3378b --- /dev/null +++ b/ai-service/services/rag_service.py @@ -0,0 +1,164 @@ +from services.ollama_client import ollama_client +from services.embedding_service import search_similar_by_text, build_document_text +from services.db_client import get_issue_by_id + + +def _load_prompt(path: str) -> str: + with open(path, "r", encoding="utf-8") as f: + return f.read() + + +def _format_retrieved_issues(results: list[dict]) -> str: + if not results: + return "관련 과거 사례가 없습니다." + lines = [] + for i, r in enumerate(results, 1): + meta = r.get("metadata", {}) + similarity = round(r.get("similarity", 0) * 100) + doc = (r.get("document", ""))[:500] + cat = meta.get("category", "") + dept = meta.get("responsible_department", "") + status = meta.get("review_status", "") + has_sol = meta.get("has_solution", "false") + date = meta.get("report_date", "") + issue_id = meta.get("issue_id", r["id"]) + lines.append( + f"[사례 {i}] No.{issue_id} (유사도 {similarity}%)\n" + f" 분류: {cat} | 부서: {dept} | 상태: {status} | 날짜: {date} | 해결여부: {'O' if has_sol == 'true' else 'X'}\n" + f" 내용: {doc}" + ) + return "\n\n".join(lines) + + +async def rag_suggest_solution(issue_id: int) -> dict: + """과거 유사 이슈의 해결 사례를 참고하여 해결방안을 제안""" + issue = get_issue_by_id(issue_id) + if not issue: + return {"available": False, "error": "이슈를 찾을 수 없습니다"} + + doc_text = build_document_text(issue) + if not doc_text.strip(): + return {"available": False, "error": "이슈 내용이 비어있습니다"} + + # 해결 완료된 유사 이슈 검색 + similar = await search_similar_by_text( + doc_text, n_results=5, filters={"has_solution": "true"} + ) + # 해결 안 된 것도 포함 (참고용) + if len(similar) < 3: + all_similar = await search_similar_by_text(doc_text, n_results=5) + seen = {r["id"] for r in similar} + for r in all_similar: + if r["id"] not in seen: + similar.append(r) + if len(similar) >= 5: + break + + context = _format_retrieved_issues(similar) + template = _load_prompt("prompts/rag_suggest_solution.txt") + prompt = template.format( + description=issue.get("description", ""), + detail_notes=issue.get("detail_notes", ""), + category=issue.get("category", ""), + retrieved_cases=context, + ) + + response = await ollama_client.generate_text(prompt) + return { + "available": True, + "issue_id": issue_id, + "suggestion": response, + "referenced_issues": [ + { + "id": r.get("metadata", {}).get("issue_id", r["id"]), + "similarity": round(r.get("similarity", 0) * 100), + "has_solution": r.get("metadata", {}).get("has_solution", "false") == "true", + } + for r in similar + ], + } + + +async def rag_ask(question: str, project_id: int = None) -> dict: + """부적합 데이터를 기반으로 자연어 질문에 답변""" + # 프로젝트 필터 없이 전체 데이터에서 검색 (과거 미지정 데이터 포함) + results = await search_similar_by_text( + question, n_results=15, filters=None + ) + context = _format_retrieved_issues(results) + + template = _load_prompt("prompts/rag_qa.txt") + prompt = template.format( + question=question, + retrieved_cases=context, + ) + + response = await ollama_client.generate_text(prompt) + return { + "available": True, + "answer": response, + "sources": [ + { + "id": r.get("metadata", {}).get("issue_id", r["id"]), + "similarity": round(r.get("similarity", 0) * 100), + "snippet": (r.get("document", ""))[:100], + } + for r in results + ], + } + + +async def rag_analyze_pattern(description: str, n_results: int = 10) -> dict: + """유사 부적합 패턴 분석 — 반복되는 문제인지, 근본 원인은 무엇인지""" + results = await search_similar_by_text(description, n_results=n_results) + context = _format_retrieved_issues(results) + + template = _load_prompt("prompts/rag_pattern.txt") + prompt = template.format( + description=description, + retrieved_cases=context, + total_similar=len(results), + ) + + response = await ollama_client.generate_text(prompt) + return { + "available": True, + "analysis": response, + "similar_count": len(results), + "sources": [ + { + "id": r.get("metadata", {}).get("issue_id", r["id"]), + "similarity": round(r.get("similarity", 0) * 100), + "category": r.get("metadata", {}).get("category", ""), + } + for r in results + ], + } + + +async def rag_classify_with_context(description: str, detail_notes: str = "") -> dict: + """과거 사례를 참고하여 더 정확한 분류 수행 (기존 classify 강화)""" + query = f"{description} {detail_notes}".strip() + similar = await search_similar_by_text(query, n_results=5) + context = _format_retrieved_issues(similar) + + template = _load_prompt("prompts/rag_classify.txt") + prompt = template.format( + description=description, + detail_notes=detail_notes, + retrieved_cases=context, + ) + + raw = await ollama_client.generate_text(prompt) + import json + try: + start = raw.find("{") + end = raw.rfind("}") + 1 + if start >= 0 and end > start: + result = json.loads(raw[start:end]) + result["rag_enhanced"] = True + result["referenced_count"] = len(similar) + return {"available": True, **result} + except json.JSONDecodeError: + pass + return {"available": True, "raw_response": raw, "rag_enhanced": True} diff --git a/ai-service/services/report_service.py b/ai-service/services/report_service.py new file mode 100644 index 0000000..e00b066 --- /dev/null +++ b/ai-service/services/report_service.py @@ -0,0 +1,122 @@ +import httpx +from services.ollama_client import ollama_client +from services.db_client import get_daily_qc_stats, get_issues_for_date +from config import settings + + +REPORT_PROMPT_PATH = "prompts/daily_report.txt" + + +def _load_prompt(path: str) -> str: + with open(path, "r", encoding="utf-8") as f: + return f.read() + + +async def _fetch_system1_data(date_str: str, token: str) -> dict: + headers = {"Authorization": f"Bearer {token}"} + data = {"attendance": None, "work_reports": None, "patrol": None} + try: + async with httpx.AsyncClient(timeout=15.0) as client: + # 근태 + try: + r = await client.get( + f"{settings.SYSTEM1_API_URL}/api/attendance/daily-status", + params={"date": date_str}, + headers=headers, + ) + if r.status_code == 200: + data["attendance"] = r.json() + except Exception: + pass + # 작업보고 + try: + r = await client.get( + f"{settings.SYSTEM1_API_URL}/api/daily-work-reports/summary", + params={"date": date_str}, + headers=headers, + ) + if r.status_code == 200: + data["work_reports"] = r.json() + except Exception: + pass + # 순회점검 + try: + r = await client.get( + f"{settings.SYSTEM1_API_URL}/api/patrol/today-status", + params={"date": date_str}, + headers=headers, + ) + if r.status_code == 200: + data["patrol"] = r.json() + except Exception: + pass + except Exception: + pass + return data + + +def _format_attendance(data) -> str: + if not data: + return "데이터 없음" + if isinstance(data, dict): + parts = [] + for k, v in data.items(): + parts.append(f" {k}: {v}") + return "\n".join(parts) + return str(data) + + +def _format_work_reports(data) -> str: + if not data: + return "데이터 없음" + return str(data) + + +def _format_qc_issues(issues: list[dict], stats: dict) -> str: + lines = [] + lines.append(f"전체: {stats.get('total', 0)}건") + lines.append(f"금일 신규: {stats.get('new_today', 0)}건") + lines.append(f"진행중: {stats.get('in_progress', 0)}건") + lines.append(f"완료: {stats.get('completed', 0)}건") + lines.append(f"미검토: {stats.get('pending', 0)}건") + if issues: + lines.append("\n금일 신규 이슈:") + for iss in issues[:10]: + cat = iss.get("category", "") + desc = (iss.get("description") or "")[:50] + status = iss.get("review_status", "") + lines.append(f" - [{cat}] {desc} (상태: {status})") + return "\n".join(lines) + + +def _format_patrol(data) -> str: + if not data: + return "데이터 없음" + return str(data) + + +async def generate_daily_report( + date_str: str, project_id: int = None, token: str = "" +) -> dict: + system1_data = await _fetch_system1_data(date_str, token) + qc_stats = get_daily_qc_stats(date_str) + qc_issues = get_issues_for_date(date_str) + + template = _load_prompt(REPORT_PROMPT_PATH) + prompt = template.format( + date=date_str, + attendance_data=_format_attendance(system1_data["attendance"]), + work_report_data=_format_work_reports(system1_data["work_reports"]), + qc_issue_data=_format_qc_issues(qc_issues, qc_stats), + patrol_data=_format_patrol(system1_data["patrol"]), + ) + + report_text = await ollama_client.generate_text(prompt) + return { + "date": date_str, + "report": report_text, + "stats": { + "qc": qc_stats, + "new_issues_count": len(qc_issues), + }, + } diff --git a/docker-compose.yml b/docker-compose.yml index e82b43b..6ce923a 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -307,6 +307,40 @@ services: networks: - tk-network + # ================================================================= + # AI Service + # ================================================================= + + ai-service: + build: + context: ./ai-service + dockerfile: Dockerfile + container_name: tk-ai-service + restart: unless-stopped + ports: + - "30400:8000" + environment: + - OLLAMA_BASE_URL=${OLLAMA_BASE_URL:-http://100.111.160.84:11434} + - OLLAMA_TEXT_MODEL=${OLLAMA_TEXT_MODEL:-qwen2.5:14b-instruct-q4_K_M} + - OLLAMA_EMBED_MODEL=${OLLAMA_EMBED_MODEL:-bge-m3} + - OLLAMA_TIMEOUT=${OLLAMA_TIMEOUT:-120} + - DB_HOST=mariadb + - DB_PORT=3306 + - DB_USER=${MYSQL_USER:-hyungi_user} + - DB_PASSWORD=${MYSQL_PASSWORD} + - DB_NAME=${MYSQL_DATABASE:-hyungi} + - SECRET_KEY=${SSO_JWT_SECRET} + - SYSTEM1_API_URL=http://system1-api:3005 + - CHROMA_PERSIST_DIR=/app/data/chroma + - TZ=Asia/Seoul + volumes: + - ai_data:/app/data + depends_on: + mariadb: + condition: service_healthy + networks: + - tk-network + # ================================================================= # Gateway # ================================================================= @@ -382,6 +416,7 @@ volumes: system3_uploads: external: true name: tkqc-package_uploads + ai_data: networks: tk-network: driver: bridge diff --git a/gateway/nginx.conf b/gateway/nginx.conf index 498ee72..dd797d5 100644 --- a/gateway/nginx.conf +++ b/gateway/nginx.conf @@ -53,6 +53,18 @@ server { proxy_set_header X-Forwarded-Proto $scheme; } + # ===== AI Service API ===== + location /ai-api/ { + proxy_pass http://ai-service:8000/api/ai/; + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_read_timeout 120s; + proxy_send_timeout 120s; + } + # ===== System 1 Web (나머지 모든 경로) ===== location / { proxy_pass http://system1-web:80; diff --git a/system3-nonconformance/api/routers/page_permissions.py b/system3-nonconformance/api/routers/page_permissions.py index 87a6d3a..74542b1 100644 --- a/system3-nonconformance/api/routers/page_permissions.py +++ b/system3-nonconformance/api/routers/page_permissions.py @@ -52,6 +52,7 @@ DEFAULT_PAGES = { 'issues_dashboard': {'title': '현황판', 'default_access': True}, 'reports': {'title': '보고서', 'default_access': False}, 'reports_daily': {'title': '일일보고서', 'default_access': False}, + 'ai_assistant': {'title': 'AI 어시스턴트', 'default_access': False}, } diff --git a/system3-nonconformance/web/ai-assistant.html b/system3-nonconformance/web/ai-assistant.html new file mode 100644 index 0000000..776b8ed --- /dev/null +++ b/system3-nonconformance/web/ai-assistant.html @@ -0,0 +1,284 @@ + + + + + + AI 어시스턴트 + + + + + + + + +
+
+
+

AI 어시스턴트를 불러오는 중...

+
+
+ + +
+ +
+ +
+ +
+
+
+

+ + AI 어시스턴트 +

+

AI 기반 부적합 분석, 검색, 질의응답을 한곳에서 사용하세요

+
+
+
+ + +
+
+
+
+

AI 서비스

+

확인 중...

+
+
+ +
+
+
+
+
+
+

임베딩 데이터

+

-

+
+
+ +
+
+
+
+
+
+

AI 모델

+

-

+
+
+ +
+
+
+
+ + +
+
+
+ +

AI Q&A

+ 과거 사례 기반 +
+ +
+ + +
+
+ +

부적합 관련 질문을 입력하세요.

+

과거 사례를 분석하여 답변합니다.

+
+
+ + +
+ + + + +
+ + +
+
+ +
+
+ + +
+
+
+ + +
+
+ +

시맨틱 검색

+ 유사 부적합 찾기 +
+ +
+ + + + + +
+ + +
+ +
+
+ + +
+
+ +

패턴 분석

+ 부적합 패턴 파악 +
+ +
+ + +
+ + + +
+ + +
+
+ +

AI 분류 테스트

+ 기본 vs RAG 비교 +
+ +
+
+ + +
+
+ + +
+
+
+ + +
+ + + +
+
+
+ + + + + + + + + + + + + + + + diff --git a/system3-nonconformance/web/issues-dashboard.html b/system3-nonconformance/web/issues-dashboard.html index 1348070..822ae61 100644 --- a/system3-nonconformance/web/issues-dashboard.html +++ b/system3-nonconformance/web/issues-dashboard.html @@ -115,6 +115,57 @@ + +
+
+ +

AI 유사 부적합 검색

+
+
+ + +
+ + + + +
+
+ +

AI Q&A (과거 사례 기반)

+
+
+ + +
+ + +
+
+
@@ -549,15 +600,29 @@
+ + + - - - - - - - - - + + + + + + + + + + diff --git a/system3-nonconformance/web/issues-inbox.html b/system3-nonconformance/web/issues-inbox.html index dd02ecc..ee6687b 100644 --- a/system3-nonconformance/web/issues-inbox.html +++ b/system3-nonconformance/web/issues-inbox.html @@ -204,6 +204,26 @@ + +
+
+ + AI 분류 추천 + + +
+ + +
+
@@ -350,13 +370,14 @@ - - - - - - - - + + + + + + + + + diff --git a/system3-nonconformance/web/issues-management.html b/system3-nonconformance/web/issues-management.html index 8ffcfe7..f82f884 100644 --- a/system3-nonconformance/web/issues-management.html +++ b/system3-nonconformance/web/issues-management.html @@ -161,6 +161,44 @@
+ + +
+ `; + }).join(''); + results.classList.remove('hidden'); +} + +// RAG Q&A +async function aiAskQuestion() { + const question = document.getElementById('aiQaQuestion')?.value?.trim(); + if (!question || typeof AiAPI === 'undefined') return; + + const loading = document.getElementById('aiQaLoading'); + const result = document.getElementById('aiQaResult'); + const answer = document.getElementById('aiQaAnswer'); + const sources = document.getElementById('aiQaSources'); + + if (loading) loading.classList.remove('hidden'); + if (result) result.classList.add('hidden'); + + const projectId = document.getElementById('projectFilter')?.value || null; + const data = await AiAPI.askQuestion(question, projectId ? parseInt(projectId) : null); + + if (loading) loading.classList.add('hidden'); + + if (!data.available) { + if (answer) answer.textContent = 'AI 서비스를 사용할 수 없습니다'; + if (result) result.classList.remove('hidden'); + return; + } + + if (answer) answer.textContent = data.answer || ''; + if (sources && data.sources) { + const refs = data.sources.slice(0, 5).map(s => + `No.${s.id}(${s.similarity}%)` + ).join(', '); + sources.textContent = refs ? `참고: ${refs}` : ''; + } + if (result) result.classList.remove('hidden'); +} + +// AI 이슈 상세 모달 +async function showAiIssueModal(issueId) { + const modal = document.getElementById('aiIssueModal'); + const title = document.getElementById('aiIssueModalTitle'); + const body = document.getElementById('aiIssueModalBody'); + if (!modal || !body) return; + + title.textContent = `부적합 No.${issueId}`; + body.innerHTML = '
로딩 중...
'; + modal.classList.remove('hidden'); + + try { + const token = typeof TokenManager !== 'undefined' ? TokenManager.getToken() : null; + const headers = token ? { 'Authorization': `Bearer ${token}` } : {}; + const res = await fetch(`/api/issues/${issueId}`, { headers }); + if (!res.ok) throw new Error('fetch failed'); + const issue = await res.json(); + + const categoryText = typeof getCategoryText === 'function' ? getCategoryText(issue.category || issue.final_category) : (issue.category || issue.final_category || '-'); + const statusText = typeof getStatusText === 'function' ? getStatusText(issue.review_status) : (issue.review_status || '-'); + const deptText = typeof getDepartmentText === 'function' ? getDepartmentText(issue.responsible_department) : (issue.responsible_department || '-'); + + body.innerHTML = ` +
+ ${categoryText} + ${statusText} + ${deptText} + ${issue.report_date ? `${issue.report_date}` : ''} +
+ ${issue.description ? `
설명:

${issue.description}

` : ''} + ${issue.detail_notes ? `
상세:

${issue.detail_notes}

` : ''} + ${issue.final_description ? `
최종 판정:

${issue.final_description}

` : ''} + ${issue.solution ? `
해결방안:

${issue.solution}

` : ''} + ${issue.cause_detail ? `
원인:

${issue.cause_detail}

` : ''} + ${issue.management_comment ? `
관리 의견:

${issue.management_comment}

` : ''} + + `; + } catch (e) { + body.innerHTML = `

이슈를 불러올 수 없습니다

+ 관리함에서 보기 →`; + } +} + +// 초기화 +initializeDashboardApp(); diff --git a/system3-nonconformance/web/static/js/pages/issues-inbox.js b/system3-nonconformance/web/static/js/pages/issues-inbox.js index 439600d..f58b2a4 100644 --- a/system3-nonconformance/web/static/js/pages/issues-inbox.js +++ b/system3-nonconformance/web/static/js/pages/issues-inbox.js @@ -879,14 +879,81 @@ function showError(message) { alert(message); } -// API 스크립트 동적 로딩 -const script = document.createElement('script'); -script.src = '/static/js/api.js?v=20260213'; -script.onload = function() { - console.log('API 스크립트 로드 완료 (issues-inbox.html)'); - initializeInbox(); -}; -script.onerror = function() { - console.error('API 스크립트 로드 실패'); -}; -document.head.appendChild(script); +// AI 분류 추천 +async function aiClassifyCurrentIssue() { + if (!currentIssueId || typeof AiAPI === 'undefined') return; + const issue = issues.find(i => i.id === currentIssueId); + if (!issue) return; + + const btn = document.getElementById('aiClassifyBtn'); + const loading = document.getElementById('aiClassifyLoading'); + const result = document.getElementById('aiClassifyResult'); + if (btn) btn.disabled = true; + if (loading) loading.classList.remove('hidden'); + if (result) result.classList.add('hidden'); + + // RAG 강화 분류 사용 (과거 사례 참고) + const classifyFn = AiAPI.classifyWithRAG || AiAPI.classifyIssue; + const data = await classifyFn( + issue.description || issue.final_description || '', + issue.detail_notes || '' + ); + + if (loading) loading.classList.add('hidden'); + if (btn) btn.disabled = false; + + if (!data.available) { + if (result) { + result.innerHTML = '

AI 서비스를 사용할 수 없습니다

'; + result.classList.remove('hidden'); + } + return; + } + + const categoryMap = { + 'material_missing': '자재 누락', + 'design_error': '설계 오류', + 'incoming_defect': '반입 불량', + 'inspection_miss': '검사 누락', + }; + const deptMap = { + 'production': '생산', + 'quality': '품질', + 'purchasing': '구매', + 'design': '설계', + 'sales': '영업', + }; + + const cat = data.category || ''; + const dept = data.responsible_department || ''; + const severity = data.severity || ''; + const summary = data.summary || ''; + const confidence = data.category_confidence ? Math.round(data.category_confidence * 100) : ''; + + result.innerHTML = ` +
+

분류: ${categoryMap[cat] || cat} ${confidence ? `(${confidence}%)` : ''}

+

부서: ${deptMap[dept] || dept}

+

심각도: ${severity}

+ ${summary ? `

요약: ${summary}

` : ''} + +
+ `; + result.classList.remove('hidden'); +} + +function applyAiClassification(category) { + const reviewCategory = document.getElementById('reviewCategory'); + if (reviewCategory && category) { + reviewCategory.value = category; + } + if (window.showToast) { + window.showToast('AI 추천이 적용되었습니다', 'success'); + } +} + +// 초기화 (api.js는 HTML에서 로드됨) +initializeInbox(); diff --git a/system3-nonconformance/web/static/js/pages/issues-management.js b/system3-nonconformance/web/static/js/pages/issues-management.js index 26971ac..51087b7 100644 --- a/system3-nonconformance/web/static/js/pages/issues-management.js +++ b/system3-nonconformance/web/static/js/pages/issues-management.js @@ -930,13 +930,100 @@ async function openIssueDetailModal(issueId) { // 모달 표시 document.getElementById('issueDetailModal').classList.remove('hidden'); + + // AI 유사 부적합 자동 로드 + const aiPanel = document.getElementById('aiSimilarPanel'); + if (aiPanel) { + aiPanel.classList.remove('hidden'); + loadSimilarIssues(); + } } function closeIssueDetailModal() { document.getElementById('issueDetailModal').classList.add('hidden'); + const aiPanel = document.getElementById('aiSimilarPanel'); + if (aiPanel) aiPanel.classList.add('hidden'); + // RAG 결과 초기화 + const suggestResult = document.getElementById('aiSuggestResult'); + if (suggestResult) suggestResult.classList.add('hidden'); currentModalIssueId = null; } +// RAG: AI 해결방안 제안 +async function aiSuggestSolution() { + if (!currentModalIssueId || typeof AiAPI === 'undefined') return; + const btn = document.getElementById('aiSuggestSolutionBtn'); + const loading = document.getElementById('aiSuggestLoading'); + const result = document.getElementById('aiSuggestResult'); + const content = document.getElementById('aiSuggestContent'); + const sources = document.getElementById('aiSuggestSources'); + + if (btn) btn.disabled = true; + if (loading) loading.classList.remove('hidden'); + if (result) result.classList.add('hidden'); + + const data = await AiAPI.suggestSolution(currentModalIssueId); + + if (loading) loading.classList.add('hidden'); + if (btn) btn.disabled = false; + + if (!data.available) { + if (content) content.textContent = 'AI 서비스를 사용할 수 없습니다'; + if (result) result.classList.remove('hidden'); + return; + } + + if (content) content.textContent = data.suggestion || ''; + if (sources && data.referenced_issues) { + const refs = data.referenced_issues + .filter(r => r.has_solution) + .map(r => `No.${r.id}(${r.similarity}%)`) + .join(', '); + sources.textContent = refs ? `참고 사례: ${refs}` : ''; + } + if (result) result.classList.remove('hidden'); +} + +// AI 유사 부적합 검색 +async function loadSimilarIssues() { + if (!currentModalIssueId || typeof AiAPI === 'undefined') return; + const loading = document.getElementById('aiSimilarLoading'); + const results = document.getElementById('aiSimilarResults'); + const empty = document.getElementById('aiSimilarEmpty'); + if (loading) loading.classList.remove('hidden'); + if (results) results.innerHTML = ''; + if (empty) empty.classList.add('hidden'); + + const data = await AiAPI.getSimilarIssues(currentModalIssueId, 5); + if (loading) loading.classList.add('hidden'); + + if (!data.available || !data.results || data.results.length === 0) { + if (empty) empty.classList.remove('hidden'); + return; + } + + results.innerHTML = data.results.map(r => { + const meta = r.metadata || {}; + const similarity = Math.round((r.similarity || 0) * 100); + const issueId = meta.issue_id || r.id.replace('issue_', ''); + const doc = (r.document || '').substring(0, 80); + const cat = meta.category || ''; + return ` +
+ No.${issueId} + + ${similarity}% 유사 + +
+

${doc}...

+ ${cat ? `${cat}` : ''} +
+ `; + }).join(''); +} + function createModalContent(issue, project) { return `
@@ -1186,17 +1273,8 @@ function getPriorityBadge(priority) { return `${p.text}`; } -// API 스크립트 동적 로딩 -const script = document.createElement('script'); -script.src = '/static/js/api.js?v=20260213'; -script.onload = function() { - console.log('✅ API 스크립트 로드 완료 (issues-management.js)'); - initializeManagement(); -}; -script.onerror = function() { - console.error('❌ API 스크립트 로드 실패'); -}; -document.head.appendChild(script); +// 초기화 (api.js는 HTML에서 로드됨) +initializeManagement(); // 추가 정보 모달 관련 함수들 let selectedIssueId = null;