feat: Conversation sqlite 영구화 — 재시작에도 컨텍스트 유지

- db/database.py: conversation_messages 테이블 + save/load/cleanup 헬퍼
- conversation.py: write-through (memory + DB) + lazy load on first access
  - 메모리 캐시 1시간 TTL, DB 7일 보관
  - add/get/format_for_prompt가 async로 변경
- worker.py: 모든 conversation_store 호출에 await 추가
- main.py lifespan에 startup cleanup 호출 (7일 이상 정리)

서버 재시작 후 "방금 그거 더 자세히" 같은 후속 질문이 컨텍스트 유지

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Hyungi Ahn
2026-04-07 09:02:38 +09:00
parent 26ccdb0f5e
commit 6b36063010
4 changed files with 110 additions and 22 deletions

View File

@@ -240,7 +240,7 @@ async def run(job: Job) -> None:
kst = timezone(timedelta(hours=9))
now_kst = datetime.now(kst)
now_str = now_kst.strftime("%Y년 %m월 %d%H:%M (%A) KST")
history = conversation_store.format_for_prompt(user_id)
history = await conversation_store.format_for_prompt(user_id)
classify_input = f"[현재 시간]\n{now_str}\n\n"
if history:
classify_input += f"[대화 이력]\n{history}\n\n"
@@ -268,7 +268,7 @@ async def run(job: Job) -> None:
logger.info("Job %s classified as '%s'", job.id, action)
# 대화 기록: 사용자 메시지
conversation_store.add(user_id, "user", job.message)
await conversation_store.add(user_id, "user", job.message)
collected: list[str] = []
@@ -281,7 +281,7 @@ async def run(job: Job) -> None:
status_text = await _build_system_status(force_measure=True)
collected.append(status_text)
await state_stream.push(job.id, "result", {"content": status_text})
conversation_store.add(user_id, "assistant", status_text)
await conversation_store.add(user_id, "assistant", status_text)
elif action == "tools":
# === TOOLS: 도구 실행 ===
@@ -299,7 +299,7 @@ async def run(job: Job) -> None:
response = "확인할 일정이 없습니다. 다시 요청해주세요."
collected.append(response)
await state_stream.push(job.id, "result", {"content": response})
conversation_store.add(user_id, "assistant", response)
await conversation_store.add(user_id, "assistant", response)
else:
try:
result = await asyncio.wait_for(execute_tool(tool_name, operation, draft), timeout=TOOL_TIMEOUT)
@@ -309,7 +309,7 @@ async def run(job: Job) -> None:
response = result.get("summary", "") if result["ok"] else result.get("error", "⚠️ 오류")
collected.append(response)
await state_stream.push(job.id, "result", {"content": response})
conversation_store.add(user_id, "assistant", response)
await conversation_store.add(user_id, "assistant", response)
else:
# 일반 도구 실행
try:
@@ -355,13 +355,13 @@ async def run(job: Job) -> None:
collected.append(response)
await state_stream.push(job.id, "result", {"content": response})
conversation_store.add(user_id, "assistant", "".join(collected))
await conversation_store.add(user_id, "assistant", "".join(collected))
elif action == "clarify":
# === CLARIFY: 추가 질문 ===
collected.append(response_text)
await state_stream.push(job.id, "result", {"content": response_text})
conversation_store.add(user_id, "assistant", response_text)
await conversation_store.add(user_id, "assistant", response_text)
elif action == "route" and settings.pipeline_enabled and backend_registry.is_healthy("reasoner"):
# === ROUTE: Gemma reasoning ===
@@ -396,7 +396,7 @@ async def run(job: Job) -> None:
reasoner_system = f"{backend_registry.reasoner.system_prompt}\n\n현재 시간: {now_kst} (한국 표준시)"
# 대화 이력을 OpenAI messages 형식으로 변환 (현재 user 메시지 포함됨)
history_msgs = conversation_store.get(user_id)[-10:]
history_msgs = (await conversation_store.get(user_id))[-10:]
reasoner_messages = [{"role": "system", "content": reasoner_system}]
for m in history_msgs:
reasoner_messages.append({"role": m.role, "content": m.content})
@@ -419,7 +419,7 @@ async def run(job: Job) -> None:
return
if collected:
conversation_store.add(user_id, "assistant", "".join(collected))
await conversation_store.add(user_id, "assistant", "".join(collected))
else:
# === DIRECT: EXAONE 직접 응답 ===
@@ -435,7 +435,7 @@ async def run(job: Job) -> None:
return
if collected:
conversation_store.add(user_id, "assistant", "".join(collected))
await conversation_store.add(user_id, "assistant", "".join(collected))
# --- Complete ---
if not collected: