feat: AI Gateway Phase 1 - FastAPI 코어 구현
GPU 서버 중앙 AI 라우팅 서비스 초기 구현: - OpenAI 호환 API (/v1/chat/completions, /v1/models, /v1/embeddings) - 모델 레지스트리 + 백엔드 헬스체크 (30초 루프) - Ollama SSE 프록시 (NDJSON → OpenAI SSE 변환) - JWT 인증 이중 경로 (httpOnly 쿠키 + Bearer 토큰) - owner/guest 역할 분리, 로그인 rate limiting - 백엔드별 rate limiting (NanoClaude 대비) - SQLite 스키마 사전 정의 (aiosqlite + WAL) - Docker Compose + Caddy 리버스 프록시 Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
92
hub-api/routers/chat.py
Normal file
92
hub-api/routers/chat.py
Normal file
@@ -0,0 +1,92 @@
|
||||
from typing import List, Optional
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Request
|
||||
from fastapi.responses import JSONResponse, StreamingResponse
|
||||
from pydantic import BaseModel
|
||||
|
||||
from middleware.rate_limit import check_backend_rate_limit
|
||||
from services import proxy_ollama
|
||||
from services.registry import registry
|
||||
|
||||
router = APIRouter(prefix="/v1", tags=["chat"])
|
||||
|
||||
|
||||
class ChatMessage(BaseModel):
|
||||
role: str
|
||||
content: str
|
||||
|
||||
|
||||
class ChatRequest(BaseModel):
|
||||
model: str
|
||||
messages: List[ChatMessage]
|
||||
stream: bool = False
|
||||
temperature: Optional[float] = None
|
||||
max_tokens: Optional[int] = None
|
||||
|
||||
|
||||
@router.post("/chat/completions")
|
||||
async def chat_completions(body: ChatRequest, request: Request):
|
||||
role = getattr(request.state, "role", "anonymous")
|
||||
if role == "anonymous":
|
||||
raise HTTPException(
|
||||
status_code=401,
|
||||
detail={"error": {"message": "Authentication required", "type": "auth_error", "code": "unauthorized"}},
|
||||
)
|
||||
|
||||
# Resolve model to backend
|
||||
result = registry.resolve_model(body.model, role)
|
||||
if not result:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail={
|
||||
"error": {
|
||||
"message": f"Model '{body.model}' not found or not available",
|
||||
"type": "invalid_request_error",
|
||||
"code": "model_not_found",
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
backend, model_info = result
|
||||
|
||||
# Check rate limit
|
||||
check_backend_rate_limit(backend.id)
|
||||
|
||||
# Record request for rate limiting
|
||||
registry.record_request(backend.id)
|
||||
|
||||
messages = [{"role": m.role, "content": m.content} for m in body.messages]
|
||||
kwargs = {}
|
||||
if body.temperature is not None:
|
||||
kwargs["temperature"] = body.temperature
|
||||
|
||||
# Route to appropriate proxy
|
||||
if backend.type == "ollama":
|
||||
if body.stream:
|
||||
return StreamingResponse(
|
||||
proxy_ollama.stream_chat(
|
||||
backend.url, body.model, messages, **kwargs
|
||||
),
|
||||
media_type="text/event-stream",
|
||||
headers={
|
||||
"Cache-Control": "no-cache",
|
||||
"X-Accel-Buffering": "no",
|
||||
},
|
||||
)
|
||||
else:
|
||||
result = await proxy_ollama.complete_chat(
|
||||
backend.url, body.model, messages, **kwargs
|
||||
)
|
||||
return JSONResponse(content=result)
|
||||
|
||||
# Placeholder for other backend types
|
||||
raise HTTPException(
|
||||
status_code=501,
|
||||
detail={
|
||||
"error": {
|
||||
"message": f"Backend type '{backend.type}' not yet implemented",
|
||||
"type": "api_error",
|
||||
"code": "not_implemented",
|
||||
}
|
||||
},
|
||||
)
|
||||
Reference in New Issue
Block a user