-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathdocker-compose.yaml
113 lines (108 loc) · 2.9 KB
/
docker-compose.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
services:
nginx:
container_name: open-webui-nginx
image: nginx:latest
ports:
- 443:443
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf
- ./nginx/certs:/etc/nginx/certs
restart: unless-stopped
networks:
- wl_network
profiles:
- local-model
- remote-model
open-webui-svc:
container_name: open-webui
image: ghcr.io/open-webui/open-webui:main
environment: # https://docs.openwebui.com/getting-started/env-configuration/
- RAG_EMBEDDING_MODEL_TRUST_REMOTE_CODE=True
- GLOBAL_LOG_LEVEL=DEBUG
- ENABLE_LITELLM=False # Alwasy disable the bundled LiteLLM instance
- OPENAI_API_KEY=$LITELLM_OPENAI_API_KEY
- OPENAI_API_BASE_URL=http://litellm-proxy-svc:8000
- ENABLE_OLLAMA_API=$ENABLE_OLLAMA_LOCAL_MODEL
- OLLAMA_BASE_URL=http://ollama-svc:11434
ports:
- 3000:8080
volumes:
- ./data/open-webui:/app/backend/data
restart: unless-stopped
networks:
- wl_network
profiles:
- local-model
- remote-model
depends_on:
ollama-svc:
required: false
condition: service_started
litellm-proxy-svc:
required: false
condition: service_started
ollama-svc:
image: ollama/ollama:latest
container_name: ollama # https://hub.docker.com/r/ollama/ollama
pull_policy: if_not_present
tty: true
ports:
- 7869:11434
environment:
- OLLAMA_KEEP_ALIVE=24h
volumes:
- ./data/ollama:/root/.ollama
restart: unless-stopped
networks:
- wl_network
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
profiles:
- local-model
postgres-db:
image: postgres
container_name: postgresdb
restart: always
shm_size: 128mb
ports:
- 5432:5432
volumes:
- ./data/pgdata:/var/lib/postgresql/data
networks:
- wl_network
environment:
- POSTGRES_USER=$LITELLM_PG_USER
- POSTGRES_PASSWORD=$LITELLM_PG_PSW
- POSTGRES_DB=$LITELLM_PG_DB
profiles:
- remote-model
litellm-proxy-svc:
container_name: litellm-proxy
image: ghcr.io/berriai/litellm:main-latest
environment:
- OPENAI_API_KEY=$REMOTE_OPENAI_API_KEY
- OPENAI_API_BASE_URL=https://api.openai.com/v1
- MASTER_KEY=$LITELLM_MASTER_KEY
- DB_URL=postgresql://$LITELLM_PG_USER:$LITELLM_PG_PSW@postgres-db:5432/$LITELLM_PG_DB
ports:
- 4000:8000
volumes:
- ./litellm/config.yaml:/app/config.yaml # https://litellm.vercel.app/docs/proxy/deploy
command:
["--config", "/app/config.yaml", "--port", "8000", "--detailed_debug"]
restart: unless-stopped
networks:
- wl_network
depends_on:
- postgres-db
profiles:
- remote-model
networks:
wl_network:
enable_ipv6: false
driver: bridge