Files
gpu-services/nanoclaude/routers/chat.py
Hyungi Ahn d946b769e5 feat: NanoClaude Phase 1 — 비동기 job 기반 AI Gateway 코어 구현
POST /chat → job_id ACK, GET /chat/{job_id}/stream → SSE 스트리밍,
EXAONE Ollama adapter, JobManager, StateStream, Worker 구조

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-06 11:12:04 +09:00

62 lines
1.8 KiB
Python

"""Chat router — POST /chat, GET /chat/{job_id}/stream, POST /chat/{job_id}/cancel."""
from __future__ import annotations
import asyncio
from fastapi import APIRouter, HTTPException
from fastapi.responses import StreamingResponse
from models.schemas import CancelResponse, ChatRequest, ChatResponse
from services import worker
from services.job_manager import job_manager
from services.state_stream import state_stream
router = APIRouter(tags=["chat"])
@router.post("/chat", response_model=ChatResponse)
async def create_chat(body: ChatRequest):
"""job_id 즉시 반환 (ACK). 백그라운드에서 EXAONE 처리 시작."""
job = job_manager.create(body.message)
state_stream.create(job.id)
task = asyncio.create_task(worker.run(job))
job_manager.attach_task(job.id, task)
return ChatResponse(job_id=job.id)
@router.get("/chat/{job_id}/stream")
async def stream_chat(job_id: str):
"""SSE 스트림으로 상태 + 결과 전달."""
job = job_manager.get(job_id)
if not job:
raise HTTPException(status_code=404, detail="Job not found")
return StreamingResponse(
_stream_with_cleanup(job_id),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"X-Accel-Buffering": "no",
},
)
async def _stream_with_cleanup(job_id: str):
try:
async for chunk in state_stream.subscribe(job_id):
yield chunk
finally:
state_stream.cleanup(job_id)
@router.post("/chat/{job_id}/cancel", response_model=CancelResponse)
async def cancel_chat(job_id: str):
"""진행 중인 job 취소."""
success = job_manager.cancel(job_id)
if not success:
raise HTTPException(status_code=404, detail="Job not found or already finished")
return CancelResponse(status="cancelled")