diff --git a/.env b/.env deleted file mode 100644 index 794d534..0000000 --- a/.env +++ /dev/null @@ -1,4 +0,0 @@ -# If set, HTTP_PROXY messes with inter-container communication in the deployment. -# Ollama downloads the models via https anyway so it should be safe to unset it -HTTP_PROXY= -http_proxy= diff --git a/docker-compose.amd.yml b/docker-compose.amd.yml index 4c8dea8..8ca5938 100644 --- a/docker-compose.amd.yml +++ b/docker-compose.amd.yml @@ -8,8 +8,6 @@ services: restart: unless-stopped entrypoint: /bootstrap.sh command: mistral - env_file: - - .env ports: - 11434:11434 # begin for AMD GPU support diff --git a/docker-compose.base.yml b/docker-compose.base.yml index 027a7ba..6412e73 100644 --- a/docker-compose.base.yml +++ b/docker-compose.base.yml @@ -18,8 +18,6 @@ services: image: ghcr.io/open-webui/open-webui:main ports: - 8080:8080 - env_file: - - .env environment: OLLAMA_BASE_URL: http://ollama:11434 extra_hosts: diff --git a/docker-compose.cpu.yml b/docker-compose.cpu.yml index cba561b..c8727dc 100644 --- a/docker-compose.cpu.yml +++ b/docker-compose.cpu.yml @@ -8,8 +8,6 @@ services: restart: unless-stopped entrypoint: /bootstrap.sh command: mistral - env_file: - - .env ports: - 11434:11434 volumes: diff --git a/docker-compose.nvidia.yml b/docker-compose.nvidia.yml index b678b95..dca8a31 100644 --- a/docker-compose.nvidia.yml +++ b/docker-compose.nvidia.yml @@ -8,8 +8,6 @@ services: restart: unless-stopped entrypoint: /bootstrap.sh command: mistral - env_file: - - .env ports: - 11434:11434 # begin for NVIDIA GPU support