tweaked localai config a bit more

This commit is contained in:
crowetic 2025-05-19 14:05:34 -07:00
parent 716b23fabf
commit 13028b8d2b

View File

@ -73,7 +73,11 @@ docker run -d \
--gpus all \ --gpus all \
-p 8080:8080 \ -p 8080:8080 \
-v ~/ai-stack/localai/models:/models \ -v ~/ai-stack/localai/models:/models \
--env ENABLE_BACKENDS=llama-cuda,ollama \ -v ~/ai-stack/localai/config:/config \
-e ENABLE_BACKENDS=llama-cuda,ollama \
-e INCLUDE_DEFAULT_MODELS=true \
-e AUTOLOAD_MODELS=true \
-e MODEL_PATH=/models \
--restart unless-stopped \ --restart unless-stopped \
localai/localai:latest-aio-gpu-nvidia-cuda-12 localai/localai:latest-aio-gpu-nvidia-cuda-12
@ -439,7 +443,11 @@ EOF
--gpus all \ --gpus all \
-p 8080:8080 \ -p 8080:8080 \
-v ~/ai-stack/localai/models:/models \ -v ~/ai-stack/localai/models:/models \
--env ENABLE_BACKENDS=llama-cuda,ollama \ -v ~/ai-stack/localai/config:/config \
-e ENABLE_BACKENDS=llama-cuda,ollama \
-e INCLUDE_DEFAULT_MODELS=true \
-e AUTOLOAD_MODELS=true \
-e MODEL_PATH=/models \
--restart unless-stopped \ --restart unless-stopped \
localai/localai:latest-aio-gpu-nvidia-cuda-12 localai/localai:latest-aio-gpu-nvidia-cuda-12