Files
gpu-services/nanoclaude/services/model_adapter.py
Hyungi Ahn c4c32170f1 feat: NanoClaude Phase 2 — EXAONE→Gemma 파이프라인, 큐, 상태 API
- ModelAdapter: 범용 OpenAI-compat 어댑터 (stream/complete/health)
- BackendRegistry: rewriter(EXAONE) + reasoner(Gemma4) 헬스체크 루프
- 2단계 파이프라인: EXAONE rewrite → Gemma reasoning (SSE rewrite 이벤트 노출)
- Fallback: 맥미니 다운 시 EXAONE 단독 모드, stream 중간 실패 시 자동 전환
- Cancel-safe: rewrite 전/후, streaming loop 내, fallback 경로 모두 체크
- Rewrite heartbeat: complete_chat 대기 중 2초 간격 processing 이벤트
- JobQueue: Semaphore(3) 기반 동시성 제한, 정확한 queue position
- GET /chat/{job_id}/status, GET /queue/stats 엔드포인트
- DB: rewrite_model, reasoning_model, rewritten_message 컬럼 추가

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-06 12:04:15 +09:00

110 lines
3.8 KiB
Python

"""ModelAdapter — 범용 OpenAI-compat 모델 어댑터."""
from __future__ import annotations
import json
import logging
from collections.abc import AsyncGenerator
import httpx
logger = logging.getLogger(__name__)
class ModelAdapter:
"""OpenAI-compatible /v1/chat/completions 백엔드 범용 어댑터.
Ollama, MLX 등 모두 동일 인터페이스로 호출."""
def __init__(
self,
name: str,
base_url: str,
model: str,
system_prompt: str,
temperature: float = 0.7,
timeout: float = 120.0,
):
self.name = name
self.base_url = base_url
self.model = model
self.system_prompt = system_prompt
self.temperature = temperature
self.timeout = timeout
async def stream_chat(self, message: str) -> AsyncGenerator[str, None]:
"""스트리밍 호출. content chunk를 yield."""
payload = {
"model": self.model,
"messages": [
{"role": "system", "content": self.system_prompt},
{"role": "user", "content": message},
],
"stream": True,
"temperature": self.temperature,
}
async with httpx.AsyncClient(timeout=self.timeout) as client:
try:
async with client.stream(
"POST",
f"{self.base_url}/v1/chat/completions",
json=payload,
) as resp:
if resp.status_code != 200:
body = await resp.aread()
logger.error("%s error %d: %s", self.name, resp.status_code, body.decode())
raise RuntimeError(f"{self.name} 응답 실패 ({resp.status_code})")
async for line in resp.aiter_lines():
line = line.strip()
if not line or not line.startswith("data: "):
continue
payload_str = line[len("data: "):]
if payload_str == "[DONE]":
return
try:
chunk = json.loads(payload_str)
delta = chunk.get("choices", [{}])[0].get("delta", {})
content = delta.get("content", "")
if content:
yield content
except (json.JSONDecodeError, IndexError, KeyError):
continue
except httpx.ConnectError:
logger.error("%s connection failed: %s", self.name, self.base_url)
raise
except httpx.ReadTimeout:
logger.error("%s read timeout", self.name)
raise
async def complete_chat(self, message: str) -> str:
"""비스트리밍 호출. 전체 응답 텍스트 반환."""
payload = {
"model": self.model,
"messages": [
{"role": "system", "content": self.system_prompt},
{"role": "user", "content": message},
],
"stream": False,
"temperature": self.temperature,
}
async with httpx.AsyncClient(timeout=self.timeout) as client:
resp = await client.post(
f"{self.base_url}/v1/chat/completions",
json=payload,
)
resp.raise_for_status()
data = resp.json()
return data["choices"][0]["message"]["content"]
async def health_check(self) -> bool:
"""GET /v1/models — 3초 timeout."""
try:
async with httpx.AsyncClient(timeout=3.0) as client:
resp = await client.get(f"{self.base_url}/v1/models")
return resp.status_code < 500
except Exception:
return False