164 lines
5.1 KiB
YAML
164 lines
5.1 KiB
YAML
|
|
# Copyright (C) 2024 Intel Corporation
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
version: "3.8"
|
|
|
|
services:
|
|
dataprep-pinecone-service:
|
|
image: ${REGISTRY:-opea}/dataprep:${TAG:-latest}
|
|
container_name: dataprep-pinecone-server
|
|
depends_on:
|
|
- tei-embedding-service
|
|
ports:
|
|
- "6007:5000"
|
|
environment:
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
PINECONE_API_KEY: ${PINECONE_API_KEY}
|
|
PINECONE_INDEX_NAME: ${PINECONE_INDEX_NAME}
|
|
TEI_EMBEDDING_ENDPOINT: http://tei-embedding-service:80
|
|
LANGCHAIN_API_KEY: ${LANGCHAIN_API_KEY}
|
|
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
|
DATAPREP_COMPONENT_NAME: "OPEA_DATAPREP_PINECONE"
|
|
healthcheck:
|
|
test: ["CMD-SHELL", "curl -f http://localhost:5000/v1/health_check || exit 1"]
|
|
interval: 10s
|
|
timeout: 5s
|
|
retries: 50
|
|
restart: unless-stopped
|
|
tei-embedding-service:
|
|
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
|
container_name: tei-embedding-server
|
|
ports:
|
|
- "6006:80"
|
|
volumes:
|
|
- "${MODEL_CACHE:-./data}:/data"
|
|
shm_size: 1g
|
|
environment:
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
command: --model-id ${EMBEDDING_MODEL_ID} --auto-truncate
|
|
retriever:
|
|
image: ${REGISTRY:-opea}/retriever:${TAG:-latest}
|
|
container_name: retriever-pinecone-server
|
|
ports:
|
|
- "7000:7000"
|
|
ipc: host
|
|
environment:
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
PINECONE_API_KEY: ${PINECONE_API_KEY}
|
|
INDEX_NAME: ${PINECONE_INDEX_NAME}
|
|
PINECONE_INDEX_NAME: ${PINECONE_INDEX_NAME}
|
|
LANGCHAIN_API_KEY: ${LANGCHAIN_API_KEY}
|
|
TEI_EMBEDDING_ENDPOINT: http://tei-embedding-service:80
|
|
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
|
LOGFLAG: ${LOGFLAG}
|
|
RETRIEVER_COMPONENT_NAME: "OPEA_RETRIEVER_PINECONE"
|
|
restart: unless-stopped
|
|
tei-reranking-service:
|
|
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
|
container_name: tei-reranking-server
|
|
ports:
|
|
- "8808:80"
|
|
volumes:
|
|
- "${MODEL_CACHE:-./data}:/data"
|
|
shm_size: 1g
|
|
environment:
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
|
HF_HUB_DISABLE_PROGRESS_BARS: 1
|
|
HF_HUB_ENABLE_HF_TRANSFER: 0
|
|
command: --model-id ${RERANK_MODEL_ID} --auto-truncate
|
|
vllm-service:
|
|
image: ${REGISTRY:-opea}/vllm:${TAG:-latest}
|
|
container_name: vllm-service
|
|
ports:
|
|
- "9009:80"
|
|
volumes:
|
|
- "${MODEL_CACHE:-./data}:/data"
|
|
shm_size: 128g
|
|
environment:
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
|
LLM_MODEL_ID: ${LLM_MODEL_ID}
|
|
VLLM_TORCH_PROFILER_DIR: "/mnt"
|
|
command: --model $LLM_MODEL_ID --host 0.0.0.0 --port 80
|
|
chatqna-xeon-backend-server:
|
|
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}
|
|
container_name: chatqna-xeon-backend-server
|
|
depends_on:
|
|
tei-embedding-service:
|
|
condition: service_started
|
|
dataprep-pinecone-service:
|
|
condition: service_healthy
|
|
retriever:
|
|
condition: service_started
|
|
tei-reranking-service:
|
|
condition: service_started
|
|
vllm-service:
|
|
condition: service_started
|
|
ports:
|
|
- "8888:8888"
|
|
environment:
|
|
- no_proxy=${no_proxy}
|
|
- https_proxy=${https_proxy}
|
|
- http_proxy=${http_proxy}
|
|
- MEGA_SERVICE_HOST_IP=chatqna-xeon-backend-server
|
|
- EMBEDDING_SERVER_HOST_IP=tei-embedding-service
|
|
- EMBEDDING_SERVER_PORT=${EMBEDDING_SERVER_PORT:-80}
|
|
- RETRIEVER_SERVICE_HOST_IP=retriever
|
|
- RERANK_SERVER_HOST_IP=tei-reranking-service
|
|
- RERANK_SERVER_PORT=${RERANK_SERVER_PORT:-80}
|
|
- LLM_SERVER_HOST_IP=vllm-service
|
|
- LLM_SERVER_PORT=80
|
|
- LOGFLAG=${LOGFLAG}
|
|
- LLM_MODEL=${LLM_MODEL_ID}
|
|
ipc: host
|
|
restart: always
|
|
chatqna-xeon-ui-server:
|
|
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}
|
|
container_name: chatqna-xeon-ui-server
|
|
depends_on:
|
|
- chatqna-xeon-backend-server
|
|
ports:
|
|
- "5173:5173"
|
|
environment:
|
|
- no_proxy=${no_proxy}
|
|
- https_proxy=${https_proxy}
|
|
- http_proxy=${http_proxy}
|
|
ipc: host
|
|
restart: always
|
|
chatqna-xeon-nginx-server:
|
|
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
|
|
container_name: chatqna-xeon-nginx-server
|
|
depends_on:
|
|
- chatqna-xeon-backend-server
|
|
- chatqna-xeon-ui-server
|
|
ports:
|
|
- "${NGINX_PORT:-80}:80"
|
|
environment:
|
|
- no_proxy=${no_proxy}
|
|
- https_proxy=${https_proxy}
|
|
- http_proxy=${http_proxy}
|
|
- FRONTEND_SERVICE_IP=chatqna-xeon-ui-server
|
|
- FRONTEND_SERVICE_PORT=5173
|
|
- BACKEND_SERVICE_NAME=chatqna
|
|
- BACKEND_SERVICE_IP=chatqna-xeon-backend-server
|
|
- BACKEND_SERVICE_PORT=8888
|
|
- DATAPREP_SERVICE_IP=dataprep-pinecone-service
|
|
- DATAPREP_SERVICE_PORT=5000
|
|
ipc: host
|
|
restart: always
|
|
|
|
networks:
|
|
default:
|
|
driver: bridge
|