# Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 services: redis-vector-db: image: redis/redis-stack:7.2.0-v9 container_name: redis-vector-db ports: - "6379:6379" - "8001:8001" dataprep-redis-service: image: ${REGISTRY:-opea}/dataprep-redis:${TAG:-latest} container_name: dataprep-redis-server depends_on: - redis-vector-db - tei-embedding-service ports: - "6007:6007" environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} REDIS_URL: redis://redis-vector-db:6379 REDIS_HOST: redis-vector-db INDEX_NAME: ${INDEX_NAME} TEI_ENDPOINT: http://tei-embedding-service:80 HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} tei-embedding-service: image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 container_name: tei-embedding-server ports: - "6006:80" volumes: - "./data:/data" shm_size: 1g environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} command: --model-id ${EMBEDDING_MODEL_ID} --auto-truncate retriever: image: ${REGISTRY:-opea}/retriever-redis:${TAG:-latest} container_name: retriever-redis-server depends_on: - redis-vector-db ports: - "7000:7000" ipc: host environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} REDIS_URL: redis://redis-vector-db:6379 REDIS_HOST: redis-vector-db INDEX_NAME: ${INDEX_NAME} TEI_EMBEDDING_ENDPOINT: http://tei-embedding-service:80 HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} restart: unless-stopped tei-reranking-service: image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 container_name: tei-reranking-server ports: - "8808:80" volumes: - "./data:/data" shm_size: 1g environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HF_HUB_DISABLE_PROGRESS_BARS: 1 HF_HUB_ENABLE_HF_TRANSFER: 0 command: --model-id ${RERANK_MODEL_ID} --auto-truncate vllm_service: image: ${REGISTRY:-opea}/vllm:${TAG:-latest} container_name: vllm-service ports: - "9009:80" volumes: - "./data:/data" shm_size: 128g environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} LLM_MODEL_ID: ${LLM_MODEL_ID} command: --model $LLM_MODEL_ID --host 0.0.0.0 --port 80 chatqna-xeon-backend-server: image: ${REGISTRY:-opea}/chatqna:${TAG:-latest} container_name: chatqna-xeon-backend-server depends_on: - redis-vector-db - tei-embedding-service - retriever - tei-reranking-service - vllm_service ports: - "8888:8888" environment: - no_proxy=${no_proxy} - https_proxy=${https_proxy} - http_proxy=${http_proxy} - MEGA_SERVICE_HOST_IP=chatqna-xeon-backend-server - EMBEDDING_SERVER_HOST_IP=tei-embedding-service - EMBEDDING_SERVER_PORT=${EMBEDDING_SERVER_PORT:-80} - RETRIEVER_SERVICE_HOST_IP=retriever - RERANK_SERVER_HOST_IP=tei-reranking-service - RERANK_SERVER_PORT=${RERANK_SERVER_PORT:-80} - LLM_SERVER_HOST_IP=vllm_service - LLM_SERVER_PORT=${LLM_SERVER_PORT:-80} - LLM_MODEL=${LLM_MODEL_ID} - LOGFLAG=${LOGFLAG} ipc: host restart: always chatqna-xeon-ui-server: image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest} container_name: chatqna-xeon-ui-server depends_on: - chatqna-xeon-backend-server ports: - "5173:5173" environment: - no_proxy=${no_proxy} - https_proxy=${https_proxy} - http_proxy=${http_proxy} ipc: host restart: always chatqna-xeon-nginx-server: image: ${REGISTRY:-opea}/nginx:${TAG:-latest} container_name: chatqna-xeon-nginx-server depends_on: - chatqna-xeon-backend-server - chatqna-xeon-ui-server ports: - "${NGINX_PORT:-80}:80" environment: - no_proxy=${no_proxy} - https_proxy=${https_proxy} - http_proxy=${http_proxy} - FRONTEND_SERVICE_IP=chatqna-xeon-ui-server - FRONTEND_SERVICE_PORT=5173 - BACKEND_SERVICE_NAME=chatqna - BACKEND_SERVICE_IP=chatqna-xeon-backend-server - BACKEND_SERVICE_PORT=8888 - DATAPREP_SERVICE_IP=dataprep-redis-service - DATAPREP_SERVICE_PORT=6007 ipc: host restart: always networks: default: driver: bridge