feat: implement Phase 1 data pipeline and migration

- Implement kordoc /parse endpoint (HWP/HWPX/PDF via kordoc lib,
  text files direct read, images flagged for OCR)
- Add queue consumer with APScheduler (1min interval, stage chaining
  extract→classify→embed, stale item recovery, retry logic)
- Add extract worker (kordoc HTTP call + direct text read)
- Add classify worker (Qwen3.5 AI classification with think-tag
  stripping and robust JSON extraction from AI responses)
- Add embed worker (GPU server nomic-embed-text, graceful failure)
- Add DEVONthink migration script with folder mapping for 16 DBs,
  dry-run mode, batch commits, and idempotent file_path UNIQUE
- Enhance ai/client.py with strip_thinking() and parse_json_response()

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Hyungi Ahn
2026-04-02 14:35:36 +09:00
parent 23ee055357
commit 299fac3904
9 changed files with 682 additions and 13 deletions

View File

@@ -1,11 +1,39 @@
"""AI 추상화 레이어 — 통합 클라이언트. 기본값은 항상 Qwen3.5."""
import json
import re
from pathlib import Path
import httpx
from core.config import settings
def strip_thinking(text: str) -> str:
"""Qwen3.5의 <think>...</think> 블록 제거"""
return re.sub(r"<think>.*?</think>", "", text, flags=re.DOTALL).strip()
def parse_json_response(raw: str) -> dict | None:
"""AI 응답에서 JSON 객체 추출 (think 태그, 코드블록 등 제거)"""
cleaned = strip_thinking(raw)
# 코드블록 내부 JSON 추출
code_match = re.search(r"```(?:json)?\s*(\{.*?\})\s*```", cleaned, re.DOTALL)
if code_match:
cleaned = code_match.group(1)
# 마지막 유효 JSON 객체 찾기
matches = list(re.finditer(r"\{[^{}]*(?:\{[^{}]*\}[^{}]*)*\}", cleaned, re.DOTALL))
for m in reversed(matches):
try:
return json.loads(m.group())
except json.JSONDecodeError:
continue
# 최후 시도: 전체 텍스트를 JSON으로
try:
return json.loads(cleaned)
except json.JSONDecodeError:
return None
# 프롬프트 로딩
PROMPTS_DIR = Path(__file__).parent.parent / "prompts"

View File

@@ -16,11 +16,21 @@ from models.user import User
@asynccontextmanager
async def lifespan(app: FastAPI):
"""앱 시작/종료 시 실행되는 lifespan 핸들러"""
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from workers.queue_consumer import consume_queue
# 시작: DB 연결 확인
await init_db()
# TODO: APScheduler 시작 (Phase 3)
# APScheduler: 큐 소비자 1분 간격 실행
scheduler = AsyncIOScheduler()
scheduler.add_job(consume_queue, "interval", minutes=1, id="queue_consumer")
scheduler.start()
yield
# 종료: DB 엔진 정리
# 종료: 스케줄러 → DB 순서로 정리
scheduler.shutdown(wait=False)
await engine.dispose()

View File

@@ -0,0 +1,76 @@
"""AI 분류 워커 — Qwen3.5로 도메인/태그/요약 생성"""
from datetime import datetime, timezone
from sqlalchemy.ext.asyncio import AsyncSession
from ai.client import AIClient, parse_json_response
from core.utils import setup_logger
from models.document import Document
logger = setup_logger("classify_worker")
# 분류용 텍스트 최대 길이 (Qwen3.5 컨텍스트 관리)
MAX_CLASSIFY_TEXT = 8000
# 유효한 도메인 목록
VALID_DOMAINS = {
"Knowledge/Philosophy",
"Knowledge/Language",
"Knowledge/Engineering",
"Knowledge/Industrial_Safety",
"Knowledge/Programming",
"Knowledge/General",
"Reference",
}
async def process(document_id: int, session: AsyncSession) -> None:
"""문서 AI 분류 + 요약"""
doc = await session.get(Document, document_id)
if not doc:
raise ValueError(f"문서 ID {document_id}를 찾을 수 없음")
if not doc.extracted_text:
raise ValueError(f"문서 ID {document_id}: extracted_text가 비어있음")
client = AIClient()
try:
# ─── 분류 ───
truncated = doc.extracted_text[:MAX_CLASSIFY_TEXT]
raw_response = await client.classify(truncated)
parsed = parse_json_response(raw_response)
if not parsed:
raise ValueError(f"AI 응답에서 JSON 추출 실패: {raw_response[:200]}")
# 유효성 검증 + DB 업데이트
domain = parsed.get("domain", "")
if domain not in VALID_DOMAINS:
logger.warning(f"[분류] document_id={document_id}: 알 수 없는 도메인 '{domain}', Knowledge/General로 대체")
domain = "Knowledge/General"
doc.ai_domain = domain
doc.ai_sub_group = parsed.get("sub_group", "")
doc.ai_tags = parsed.get("tags", [])
if parsed.get("sourceChannel") and not doc.source_channel:
doc.source_channel = parsed["sourceChannel"]
if parsed.get("dataOrigin") and not doc.data_origin:
doc.data_origin = parsed["dataOrigin"]
# ─── 요약 ───
summary = await client.summarize(doc.extracted_text[:15000])
doc.ai_summary = summary
# ─── 메타데이터 ───
doc.ai_model_version = "qwen3.5-35b-a3b"
doc.ai_processed_at = datetime.now(timezone.utc)
logger.info(
f"[분류] document_id={document_id}: "
f"domain={domain}, tags={doc.ai_tags}, summary={len(summary)}"
)
finally:
await client.close()

View File

@@ -0,0 +1,44 @@
"""벡터 임베딩 워커 — GPU 서버 nomic-embed-text 호출"""
from datetime import datetime, timezone
from sqlalchemy.ext.asyncio import AsyncSession
from ai.client import AIClient
from core.utils import setup_logger
from models.document import Document
logger = setup_logger("embed_worker")
# 임베딩용 텍스트 최대 길이 (nomic-embed-text: 8192 토큰)
MAX_EMBED_TEXT = 6000
EMBED_MODEL_VERSION = "nomic-embed-text-v1.5"
async def process(document_id: int, session: AsyncSession) -> None:
"""문서 벡터 임베딩 생성"""
doc = await session.get(Document, document_id)
if not doc:
raise ValueError(f"문서 ID {document_id}를 찾을 수 없음")
if not doc.extracted_text:
raise ValueError(f"문서 ID {document_id}: extracted_text가 비어있음")
# title + 본문 앞부분을 결합하여 임베딩 입력 생성
title_part = doc.title or ""
text_part = doc.extracted_text[:MAX_EMBED_TEXT]
embed_input = f"{title_part}\n\n{text_part}".strip()
if not embed_input:
logger.warning(f"[임베딩] document_id={document_id}: 빈 텍스트, 스킵")
return
client = AIClient()
try:
vector = await client.embed(embed_input)
doc.embedding = vector
doc.embed_model_version = EMBED_MODEL_VERSION
doc.embedded_at = datetime.now(timezone.utc)
logger.info(f"[임베딩] document_id={document_id}: {len(vector)}차원 벡터 생성")
finally:
await client.close()

View File

@@ -0,0 +1,80 @@
"""텍스트 추출 워커 — kordoc 호출 또는 직접 파일 읽기"""
from datetime import datetime, timezone
from pathlib import Path
import httpx
from sqlalchemy.ext.asyncio import AsyncSession
from core.config import settings
from core.utils import setup_logger
from models.document import Document
logger = setup_logger("extract_worker")
# kordoc으로 파싱 가능한 포맷
KORDOC_FORMATS = {"hwp", "hwpx", "pdf"}
# 직접 읽기 가능한 텍스트 포맷
TEXT_FORMATS = {"md", "txt", "csv", "json", "xml", "html"}
# OCR 필요 이미지 포맷 (Phase 2)
IMAGE_FORMATS = {"jpg", "jpeg", "png", "tiff", "tif", "bmp", "gif"}
EXTRACTOR_VERSION = "kordoc@1.7"
async def process(document_id: int, session: AsyncSession) -> None:
"""문서 텍스트 추출"""
doc = await session.get(Document, document_id)
if not doc:
raise ValueError(f"문서 ID {document_id}를 찾을 수 없음")
fmt = doc.file_format.lower()
full_path = Path(settings.nas_mount_path) / doc.file_path
# 텍스트 파일 — 직접 읽기
if fmt in TEXT_FORMATS:
if not full_path.exists():
raise FileNotFoundError(f"파일 없음: {full_path}")
text = full_path.read_text(encoding="utf-8", errors="replace")
doc.extracted_text = text
doc.extracted_at = datetime.now(timezone.utc)
doc.extractor_version = "direct_read"
logger.info(f"[텍스트] {doc.file_path} ({len(text)}자)")
return
# 이미지 — 스킵 (Phase 2 OCR)
if fmt in IMAGE_FORMATS:
doc.extracted_text = ""
doc.extracted_at = datetime.now(timezone.utc)
doc.extractor_version = "skip_image"
logger.info(f"[이미지] {doc.file_path} — OCR 미구현, 스킵")
return
# kordoc 파싱 (HWP/HWPX/PDF)
if fmt in KORDOC_FORMATS:
# 컨테이너 내부 경로: /documents/{file_path}
container_path = f"/documents/{doc.file_path}"
async with httpx.AsyncClient(timeout=60) as client:
resp = await client.post(
f"{settings.kordoc_endpoint}/parse",
json={"filePath": container_path},
)
if resp.status_code == 404:
raise FileNotFoundError(f"kordoc: 파일 없음 — {container_path}")
if resp.status_code == 422:
raise ValueError(f"kordoc: 파싱 실패 — {resp.json().get('error', 'unknown')}")
resp.raise_for_status()
data = resp.json()
doc.extracted_text = data.get("markdown", "")
doc.extracted_at = datetime.now(timezone.utc)
doc.extractor_version = EXTRACTOR_VERSION
logger.info(f"[kordoc] {doc.file_path} ({len(doc.extracted_text)}자)")
return
# 미지원 포맷
doc.extracted_text = ""
doc.extracted_at = datetime.now(timezone.utc)
doc.extractor_version = f"unsupported_{fmt}"
logger.warning(f"[미지원] {doc.file_path} (format={fmt})")

View File

@@ -0,0 +1,117 @@
"""처리 큐 소비자 — APScheduler에서 1분 간격으로 호출"""
from datetime import datetime, timedelta, timezone
from sqlalchemy import select, update
from core.database import async_session
from core.utils import setup_logger
from models.queue import ProcessingQueue
logger = setup_logger("queue_consumer")
# stage별 배치 크기
BATCH_SIZE = {"extract": 5, "classify": 3, "embed": 1}
STALE_THRESHOLD_MINUTES = 10
async def reset_stale_items():
"""processing 상태로 10분 이상 방치된 항목 복구"""
cutoff = datetime.now(timezone.utc) - timedelta(minutes=STALE_THRESHOLD_MINUTES)
async with async_session() as session:
result = await session.execute(
update(ProcessingQueue)
.where(
ProcessingQueue.status == "processing",
ProcessingQueue.started_at < cutoff,
)
.values(status="pending", started_at=None)
)
if result.rowcount > 0:
await session.commit()
logger.warning(f"stale 항목 {result.rowcount}건 복구")
async def enqueue_next_stage(document_id: int, current_stage: str, session):
"""현재 stage 완료 후 다음 stage를 pending으로 등록"""
next_stages = {"extract": "classify", "classify": "embed"}
next_stage = next_stages.get(current_stage)
if not next_stage:
return
# 이미 존재하는지 확인 (중복 방지)
existing = await session.execute(
select(ProcessingQueue).where(
ProcessingQueue.document_id == document_id,
ProcessingQueue.stage == next_stage,
ProcessingQueue.status.in_(["pending", "processing"]),
)
)
if existing.scalar_one_or_none():
return
session.add(ProcessingQueue(
document_id=document_id,
stage=next_stage,
status="pending",
))
async def consume_queue():
"""큐에서 pending 항목을 가져와 stage별 워커 실행"""
# 지연 임포트 (순환 참조 방지)
from workers.extract_worker import process as extract_process
from workers.classify_worker import process as classify_process
from workers.embed_worker import process as embed_process
workers = {
"extract": extract_process,
"classify": classify_process,
"embed": embed_process,
}
# stale 항목 복구
await reset_stale_items()
for stage, worker_fn in workers.items():
batch_size = BATCH_SIZE.get(stage, 3)
async with async_session() as session:
result = await session.execute(
select(ProcessingQueue)
.where(
ProcessingQueue.stage == stage,
ProcessingQueue.status == "pending",
)
.order_by(ProcessingQueue.created_at)
.limit(batch_size)
)
items = result.scalars().all()
for item in items:
item.status = "processing"
item.started_at = datetime.now(timezone.utc)
item.attempts += 1
await session.commit()
try:
await worker_fn(item.document_id, session)
item.status = "completed"
item.completed_at = datetime.now(timezone.utc)
await enqueue_next_stage(item.document_id, stage, session)
await session.commit()
logger.info(f"[{stage}] document_id={item.document_id} 완료")
except Exception as e:
await session.rollback()
# 세션에서 item 다시 로드
item = await session.get(ProcessingQueue, item.id)
item.error_message = str(e)[:500]
if item.attempts >= item.max_attempts:
item.status = "failed"
logger.error(f"[{stage}] document_id={item.document_id} 영구 실패: {e}")
else:
item.status = "pending"
item.started_at = None
logger.warning(f"[{stage}] document_id={item.document_id} 재시도 예정 ({item.attempts}/{item.max_attempts}): {e}")
await session.commit()