diff --git a/setup-ai-stack.sh b/setup-ai-stack.sh index 98937dc..b177b8f 100644 --- a/setup-ai-stack.sh +++ b/setup-ai-stack.sh @@ -73,7 +73,11 @@ docker run -d \ --gpus all \ -p 8080:8080 \ -v ~/ai-stack/localai/models:/models \ - --env ENABLE_BACKENDS=llama-cuda,ollama \ + -v ~/ai-stack/localai/config:/config \ + -e ENABLE_BACKENDS=llama-cuda,ollama \ + -e INCLUDE_DEFAULT_MODELS=true \ + -e AUTOLOAD_MODELS=true \ + -e MODEL_PATH=/models \ --restart unless-stopped \ localai/localai:latest-aio-gpu-nvidia-cuda-12 @@ -439,7 +443,11 @@ EOF --gpus all \ -p 8080:8080 \ -v ~/ai-stack/localai/models:/models \ - --env ENABLE_BACKENDS=llama-cuda,ollama \ + -v ~/ai-stack/localai/config:/config \ + -e ENABLE_BACKENDS=llama-cuda,ollama \ + -e INCLUDE_DEFAULT_MODELS=true \ + -e AUTOLOAD_MODELS=true \ + -e MODEL_PATH=/models \ --restart unless-stopped \ localai/localai:latest-aio-gpu-nvidia-cuda-12