feat: 맥미니 MLX 연동 — OpenAI-compat 프록시 + 모델 배치 정정
- proxy_openai.py 추가: MLX 서버 SSE 패스스루 - chat.py: openai-compat 백엔드 타입 라우팅 추가 - backends.json: GPU=embed(bge-m3)만, 맥미니MLX=채팅(qwen3.5:35b-a3b) - LAN IP(192.168.1.122) 사용 (같은 서브넷, Tailscale 불필요) Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -5,7 +5,7 @@ from fastapi.responses import JSONResponse, StreamingResponse
|
||||
from pydantic import BaseModel
|
||||
|
||||
from middleware.rate_limit import check_backend_rate_limit
|
||||
from services import proxy_ollama
|
||||
from services import proxy_ollama, proxy_openai
|
||||
from services.registry import registry
|
||||
|
||||
router = APIRouter(prefix="/v1", tags=["chat"])
|
||||
@@ -79,7 +79,24 @@ async def chat_completions(body: ChatRequest, request: Request):
|
||||
)
|
||||
return JSONResponse(content=result)
|
||||
|
||||
# Placeholder for other backend types
|
||||
if backend.type == "openai-compat":
|
||||
if body.stream:
|
||||
return StreamingResponse(
|
||||
proxy_openai.stream_chat(
|
||||
backend.url, body.model, messages, **kwargs
|
||||
),
|
||||
media_type="text/event-stream",
|
||||
headers={
|
||||
"Cache-Control": "no-cache",
|
||||
"X-Accel-Buffering": "no",
|
||||
},
|
||||
)
|
||||
else:
|
||||
result = await proxy_openai.complete_chat(
|
||||
backend.url, body.model, messages, **kwargs
|
||||
)
|
||||
return JSONResponse(content=result)
|
||||
|
||||
raise HTTPException(
|
||||
status_code=501,
|
||||
detail={
|
||||
|
||||
Reference in New Issue
Block a user