[ { "id": "ollama-gpu", "type": "ollama", "url": "http://host.docker.internal:11434", "models": [ { "id": "qwen3.5:9b-q8_0", "capabilities": ["chat"], "priority": 1 }, { "id": "qwen3-vl:8b", "capabilities": ["chat", "vision"], "priority": 1 } ], "access": "all", "rate_limit": null } ]