If one just want to get the system running, just comment out all nginx and publish on port 8000 and everything works.
So maybe supply another docker-compose-easy.yaml for just a simple deployment?
version: "3"
services:
libre-chat:
# depends_on: [qdrant]
build: .
# image: ghcr.io/vemonet/libre-chat:main
volumes:
- ./config/chat-vectorstore-qa.yml:/data/chat.yml
# - ./chat.yml:/data/chat.yml
- ./data/models:/data/models
- ./data/documents:/data/documents
- ./data/embeddings:/data/embeddings
- ./data/vectorstore:/data/vectorstore
# - ./data:/data # Or directly share the data directory with chat.yml, and folders for models, vectorstore, etc
shm_size: '16g'
ports:
- 8000:8000
# entrypoint: uvicorn scripts.main:app
deploy: # Enable GPU in the container
resources:
reservations:
devices:
- driver: nvidia
count: 3
capabilities: [gpu]
environment:
- LIBRECHAT_WORKERS=1
# For deployment with nginx-proxy https://github.com/nginx-proxy/nginx-proxy
#- VIRTUAL_HOST=chat.semanticscience.org
#- LETSENCRYPT_HOST=chat.semanticscience.org
#- VIRTUAL_PORT=8000
# - CUDA_VISIBLE_DEVICES=0 # Limit which GPU is made available
# Configuring proxy manually is required to access internet within UM network
#- HTTP_PROXY=http://proxy.unimaas.nl:3128
#- HTTPS_PROXY=http://proxy.unimaas.nl:3128
#- http_proxy=http://proxy.unimaas.nl:3128
#- https_proxy=http://proxy.unimaas.nl:3128
#- NO_PROXY=127.0.0.1,localhost,137.120.0.0/16,qdrant
# Containers deployed publicly need to be on the nginx network
#networks:
# - nginx
# qdrant:
# # https://hub.docker.com/r/qdrant/qdrant/tags
# image: qdrant/qdrant:v1.5.1
# restart: unless-stopped
# volumes:
# - ./data/vectorstore/qdrant:/qdrant/storage
# # - ./scripts/qdrant_config.yml:/qdrant/config/production.yaml
# environment:
# - QDRANT_ALLOW_RECOVERY_MODE=true
# # - VIRTUAL_HOST=qdrant.137.120.31.148.nip.io
# # - LETSENCRYPT_HOST=qdrant.137.120.31.148.nip.io
# # - VIRTUAL_PORT=6333
# # ports:
# # - 6333:6333
# # command:
# # - ./qdrant --config-path /qdrant/qdrant_config.yml
# networks:
# - nginx
# Also required to deploy containers publicly
#networks:
# nginx:
# name: nginx
# external: true
Describe the feature
If one just want to get the system running, just comment out all nginx and publish on port 8000 and everything works. So maybe supply another docker-compose-easy.yaml for just a simple deployment?
An idea of implementation
No response
Additional context
No response