diff --git a/.devcontainer/docker-compose-devcontainer.yml b/.devcontainer/docker-compose-devcontainer.yml index 81610ade5..9841e9d59 100644 --- a/.devcontainer/docker-compose-devcontainer.yml +++ b/.devcontainer/docker-compose-devcontainer.yml @@ -10,7 +10,8 @@ services: - 8080:8080 volumes: - localai_workspace:/workspace - - ../models:/host-models + - models:/host-models + - backends:/host-backends - ./customization:/devcontainer-customization command: /bin/sh -c "while sleep 1000; do :; done" cap_add: @@ -39,6 +40,9 @@ services: - GF_SECURITY_ADMIN_PASSWORD=grafana volumes: - ./grafana:/etc/grafana/provisioning/datasources + volumes: prom_data: - localai_workspace: \ No newline at end of file + localai_workspace: + models: + backends: diff --git a/docker-compose.yaml b/docker-compose.yaml index 765a3fb63..9523457bc 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -20,10 +20,14 @@ services: - MODELS_PATH=/models # - DEBUG=true volumes: - - ./models:/models:cached - - ./images/:/tmp/generated/images/ + - models:/models + - images:/tmp/generated/images/ command: # Here we can specify a list of models to run (see quickstart https://localai.io/basics/getting_started/#running-models ) # or an URL pointing to a YAML configuration file, for example: # - https://gist.githubusercontent.com/mudler/ad601a0488b497b69ec549150d9edd18/raw/a8a8869ef1bb7e3830bf5c0bae29a0cce991ff8d/phi-2.yaml - phi-2 + +volumes: + models: + images: