OrbStack 라이선스 만료로 Mac mini Docker 서비스를 GPU 서버로 통합. nginx → Caddy 전환, 12개 서브도메인 자동 HTTPS, fail2ban Caddy JSON 연동. 주요 변경: - home-caddy: Caddy 리버스 프록시 (Let's Encrypt 자동 HTTPS) - home-fail2ban: Caddy JSON 로그 기반 보안 모니터링 - home-ddns: Cloudflare DDNS (API 키 .env 분리) - gpu-hub-api/web: AI 백엔드 라우터 + 웹 UI (gpu-services에서 이전) - AI 런타임(Ollama) 내부망 전용, 외부는 gpu-hub 인증 게이트웨이 경유 Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
84 lines
2.3 KiB
Python
84 lines
2.3 KiB
Python
"""OpenAI-compatible proxy (MLX server, vLLM, etc.) — SSE passthrough."""
|
|
|
|
from __future__ import annotations
|
|
|
|
import json
|
|
import logging
|
|
from collections.abc import AsyncGenerator
|
|
|
|
import httpx
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
async def stream_chat(
|
|
base_url: str,
|
|
model: str,
|
|
messages: list[dict],
|
|
**kwargs,
|
|
) -> AsyncGenerator[str, None]:
|
|
"""Proxy OpenAI-compatible chat streaming. SSE passthrough with model field override."""
|
|
payload = {
|
|
"model": model,
|
|
"messages": messages,
|
|
"stream": True,
|
|
**{k: v for k, v in kwargs.items() if v is not None},
|
|
}
|
|
|
|
async with httpx.AsyncClient(timeout=120.0) as client:
|
|
async with client.stream(
|
|
"POST",
|
|
f"{base_url}/v1/chat/completions",
|
|
json=payload,
|
|
) as resp:
|
|
if resp.status_code != 200:
|
|
body = await resp.aread()
|
|
error_msg = body.decode("utf-8", errors="replace")
|
|
yield _error_event(f"Backend error ({resp.status_code}): {error_msg}")
|
|
return
|
|
|
|
async for line in resp.aiter_lines():
|
|
if not line.strip():
|
|
continue
|
|
# Pass through SSE lines as-is (already in OpenAI format)
|
|
if line.startswith("data: "):
|
|
yield f"{line}\n\n"
|
|
elif line == "data: [DONE]":
|
|
yield "data: [DONE]\n\n"
|
|
|
|
|
|
async def complete_chat(
|
|
base_url: str,
|
|
model: str,
|
|
messages: list[dict],
|
|
**kwargs,
|
|
) -> dict:
|
|
"""Non-streaming OpenAI-compatible chat."""
|
|
payload = {
|
|
"model": model,
|
|
"messages": messages,
|
|
"stream": False,
|
|
**{k: v for k, v in kwargs.items() if v is not None},
|
|
}
|
|
|
|
async with httpx.AsyncClient(timeout=120.0) as client:
|
|
resp = await client.post(f"{base_url}/v1/chat/completions", json=payload)
|
|
resp.raise_for_status()
|
|
return resp.json()
|
|
|
|
|
|
def _error_event(message: str) -> str:
|
|
error = {
|
|
"id": "chatcmpl-gateway",
|
|
"object": "chat.completion.chunk",
|
|
"model": "error",
|
|
"choices": [
|
|
{
|
|
"index": 0,
|
|
"delta": {"content": f"[Error] {message}"},
|
|
"finish_reason": "stop",
|
|
}
|
|
],
|
|
}
|
|
return f"data: {json.dumps(error)}\n\ndata: [DONE]\n\n"
|