feat: local AI server scaffolding (FastAPI, RAG, embeddings). Port policy (>=26000), README/API docs, scripts.
This commit is contained in:
13
scripts/dev_server.sh
Executable file
13
scripts/dev_server.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
export OLLAMA_HOST=${OLLAMA_HOST:-http://localhost:11434}
|
||||
export BASE_MODEL=${BASE_MODEL:-qwen2.5:7b-instruct}
|
||||
export BOOST_MODEL=${BOOST_MODEL:-qwen2.5:14b-instruct}
|
||||
export EMBEDDING_MODEL=${EMBEDDING_MODEL:-nomic-embed-text}
|
||||
export INDEX_PATH=${INDEX_PATH:-data/index.jsonl}
|
||||
export AI_SERVER_PORT=${AI_SERVER_PORT:-26000}
|
||||
|
||||
source .venv/bin/activate
|
||||
exec uvicorn server.main:app --host 0.0.0.0 --port "$AI_SERVER_PORT" --reload
|
||||
|
||||
Reference in New Issue
Block a user