feat: AI Gateway Phase 1 - FastAPI 코어 구현

GPU 서버 중앙 AI 라우팅 서비스 초기 구현:
- OpenAI 호환 API (/v1/chat/completions, /v1/models, /v1/embeddings)
- 모델 레지스트리 + 백엔드 헬스체크 (30초 루프)
- Ollama SSE 프록시 (NDJSON → OpenAI SSE 변환)
- JWT 인증 이중 경로 (httpOnly 쿠키 + Bearer 토큰)
- owner/guest 역할 분리, 로그인 rate limiting
- 백엔드별 rate limiting (NanoClaude 대비)
- SQLite 스키마 사전 정의 (aiosqlite + WAL)
- Docker Compose + Caddy 리버스 프록시

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Hyungi Ahn
2026-03-31 13:41:46 +09:00
commit 3794afff95
27 changed files with 1121 additions and 0 deletions

View File

79
hub-api/routers/auth.py Normal file
View File

@@ -0,0 +1,79 @@
from fastapi import APIRouter, Request, Response
from pydantic import BaseModel
from config import settings
from middleware.auth import (
check_login_rate_limit,
create_token,
record_login_attempt,
)
router = APIRouter(prefix="/auth", tags=["auth"])
class LoginRequest(BaseModel):
password: str
class LoginResponse(BaseModel):
role: str
token: str
@router.post("/login")
async def login(body: LoginRequest, request: Request, response: Response):
ip = request.client.host if request.client else "unknown"
if not check_login_rate_limit(ip):
return _error_response(429, "Too many login attempts. Try again in 1 minute.")
record_login_attempt(ip)
if body.password == settings.owner_password:
role = "owner"
elif body.password == settings.guest_password:
role = "guest"
else:
return _error_response(401, "Invalid password")
token = create_token(role)
# Set httpOnly cookie for web UI
response.set_cookie(
key="token",
value=token,
httponly=True,
samesite="lax",
max_age=settings.jwt_expire_hours * 3600,
)
return LoginResponse(role=role, token=token)
@router.get("/me")
async def me(request: Request):
role = getattr(request.state, "role", "anonymous")
if role == "anonymous":
return _error_response(401, "Not authenticated")
return {"role": role}
@router.post("/logout")
async def logout(response: Response):
response.delete_cookie("token")
return {"ok": True}
def _error_response(status_code: int, message: str):
from fastapi.responses import JSONResponse
return JSONResponse(
status_code=status_code,
content={
"error": {
"message": message,
"type": "auth_error",
"code": f"auth_{status_code}",
}
},
)

92
hub-api/routers/chat.py Normal file
View File

@@ -0,0 +1,92 @@
from typing import List, Optional
from fastapi import APIRouter, HTTPException, Request
from fastapi.responses import JSONResponse, StreamingResponse
from pydantic import BaseModel
from middleware.rate_limit import check_backend_rate_limit
from services import proxy_ollama
from services.registry import registry
router = APIRouter(prefix="/v1", tags=["chat"])
class ChatMessage(BaseModel):
role: str
content: str
class ChatRequest(BaseModel):
model: str
messages: List[ChatMessage]
stream: bool = False
temperature: Optional[float] = None
max_tokens: Optional[int] = None
@router.post("/chat/completions")
async def chat_completions(body: ChatRequest, request: Request):
role = getattr(request.state, "role", "anonymous")
if role == "anonymous":
raise HTTPException(
status_code=401,
detail={"error": {"message": "Authentication required", "type": "auth_error", "code": "unauthorized"}},
)
# Resolve model to backend
result = registry.resolve_model(body.model, role)
if not result:
raise HTTPException(
status_code=404,
detail={
"error": {
"message": f"Model '{body.model}' not found or not available",
"type": "invalid_request_error",
"code": "model_not_found",
}
},
)
backend, model_info = result
# Check rate limit
check_backend_rate_limit(backend.id)
# Record request for rate limiting
registry.record_request(backend.id)
messages = [{"role": m.role, "content": m.content} for m in body.messages]
kwargs = {}
if body.temperature is not None:
kwargs["temperature"] = body.temperature
# Route to appropriate proxy
if backend.type == "ollama":
if body.stream:
return StreamingResponse(
proxy_ollama.stream_chat(
backend.url, body.model, messages, **kwargs
),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"X-Accel-Buffering": "no",
},
)
else:
result = await proxy_ollama.complete_chat(
backend.url, body.model, messages, **kwargs
)
return JSONResponse(content=result)
# Placeholder for other backend types
raise HTTPException(
status_code=501,
detail={
"error": {
"message": f"Backend type '{backend.type}' not yet implemented",
"type": "api_error",
"code": "not_implemented",
}
},
)

View File

@@ -0,0 +1,67 @@
from typing import List, Union
from fastapi import APIRouter, HTTPException, Request
from pydantic import BaseModel
from services import proxy_ollama
from services.registry import registry
router = APIRouter(prefix="/v1", tags=["embeddings"])
class EmbeddingRequest(BaseModel):
model: str
input: Union[str, List[str]]
@router.post("/embeddings")
async def create_embedding(body: EmbeddingRequest, request: Request):
role = getattr(request.state, "role", "anonymous")
if role == "anonymous":
raise HTTPException(
status_code=401,
detail={"error": {"message": "Authentication required", "type": "auth_error", "code": "unauthorized"}},
)
result = registry.resolve_model(body.model, role)
if not result:
raise HTTPException(
status_code=404,
detail={
"error": {
"message": f"Model '{body.model}' not found or not available",
"type": "invalid_request_error",
"code": "model_not_found",
}
},
)
backend, model_info = result
if "embed" not in model_info.capabilities:
raise HTTPException(
status_code=400,
detail={
"error": {
"message": f"Model '{body.model}' does not support embeddings",
"type": "invalid_request_error",
"code": "capability_mismatch",
}
},
)
if backend.type == "ollama":
return await proxy_ollama.generate_embedding(
backend.url, body.model, body.input
)
raise HTTPException(
status_code=501,
detail={
"error": {
"message": f"Embedding not supported for backend type '{backend.type}'",
"type": "api_error",
"code": "not_implemented",
}
},
)

13
hub-api/routers/gpu.py Normal file
View File

@@ -0,0 +1,13 @@
from fastapi import APIRouter
from services.gpu_monitor import get_gpu_info
router = APIRouter(tags=["gpu"])
@router.get("/gpu")
async def gpu_status():
info = await get_gpu_info()
if not info:
return {"error": {"message": "GPU info unavailable", "type": "api_error", "code": "gpu_unavailable"}}
return info

31
hub-api/routers/health.py Normal file
View File

@@ -0,0 +1,31 @@
from fastapi import APIRouter
from services.gpu_monitor import get_gpu_info
from services.registry import registry
router = APIRouter(tags=["health"])
@router.get("/health")
async def health():
gpu = await get_gpu_info()
return {
"status": "ok",
"backends": registry.get_health_summary(),
"gpu": gpu,
}
@router.get("/health/{backend_id}")
async def backend_health(backend_id: str):
backend = registry.backends.get(backend_id)
if not backend:
return {"error": {"message": f"Backend '{backend_id}' not found"}}
return {
"id": backend.id,
"type": backend.type,
"status": "healthy" if backend.healthy else "down",
"models": [m.id for m in backend.models],
"latency_ms": backend.latency_ms,
}

12
hub-api/routers/models.py Normal file
View File

@@ -0,0 +1,12 @@
from fastapi import APIRouter, Request
from services.registry import registry
router = APIRouter(prefix="/v1", tags=["models"])
@router.get("/models")
async def list_models(request: Request):
role = getattr(request.state, "role", "anonymous")
models = registry.list_models(role)
return {"object": "list", "data": models}