Signed-off-by: lvliang-intel <liang1.lv@intel.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
195 lines
5.9 KiB
YAML
195 lines
5.9 KiB
YAML
# Copyright (C) 2024 Intel Corporation
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
services:
|
|
redis-vector-db:
|
|
image: redis/redis-stack:7.2.0-v9
|
|
container_name: redis-vector-db
|
|
ports:
|
|
- "6379:6379"
|
|
- "8001:8001"
|
|
dataprep-redis-service:
|
|
image: ${REGISTRY:-opea}/dataprep-redis:${TAG:-latest}
|
|
container_name: dataprep-redis-server
|
|
depends_on:
|
|
- redis-vector-db
|
|
- tei-embedding-service
|
|
ports:
|
|
- "6007:6007"
|
|
environment:
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
REDIS_URL: ${REDIS_URL}
|
|
INDEX_NAME: ${INDEX_NAME}
|
|
TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
|
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
|
tei-embedding-service:
|
|
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
|
container_name: tei-embedding-server
|
|
ports:
|
|
- "6006:80"
|
|
volumes:
|
|
- "./data:/data"
|
|
shm_size: 1g
|
|
environment:
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
command: --model-id ${EMBEDDING_MODEL_ID} --auto-truncate
|
|
embedding:
|
|
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
|
container_name: embedding-tei-server
|
|
depends_on:
|
|
- tei-embedding-service
|
|
ports:
|
|
- "6000:6000"
|
|
ipc: host
|
|
environment:
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
|
LANGCHAIN_API_KEY: ${LANGCHAIN_API_KEY}
|
|
LANGCHAIN_TRACING_V2: ${LANGCHAIN_TRACING_V2}
|
|
LANGCHAIN_PROJECT: "opea-embedding-service"
|
|
restart: unless-stopped
|
|
retriever:
|
|
image: ${REGISTRY:-opea}/retriever-redis:${TAG:-latest}
|
|
container_name: retriever-redis-server
|
|
depends_on:
|
|
- redis-vector-db
|
|
ports:
|
|
- "7000:7000"
|
|
ipc: host
|
|
environment:
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
REDIS_URL: ${REDIS_URL}
|
|
INDEX_NAME: ${INDEX_NAME}
|
|
TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
|
LANGCHAIN_API_KEY: ${LANGCHAIN_API_KEY}
|
|
LANGCHAIN_TRACING_V2: ${LANGCHAIN_TRACING_V2}
|
|
LANGCHAIN_PROJECT: "opea-retriever-service"
|
|
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
|
restart: unless-stopped
|
|
tei-reranking-service:
|
|
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
|
container_name: tei-reranking-server
|
|
ports:
|
|
- "8808:80"
|
|
volumes:
|
|
- "./data:/data"
|
|
shm_size: 1g
|
|
environment:
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
|
HF_HUB_DISABLE_PROGRESS_BARS: 1
|
|
HF_HUB_ENABLE_HF_TRANSFER: 0
|
|
command: --model-id ${RERANK_MODEL_ID} --auto-truncate
|
|
reranking:
|
|
image: ${REGISTRY:-opea}/reranking-tei:${TAG:-latest}
|
|
container_name: reranking-tei-xeon-server
|
|
depends_on:
|
|
- tei-reranking-service
|
|
ports:
|
|
- "8000:8000"
|
|
ipc: host
|
|
environment:
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
TEI_RERANKING_ENDPOINT: ${TEI_RERANKING_ENDPOINT}
|
|
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
|
HF_HUB_DISABLE_PROGRESS_BARS: 1
|
|
HF_HUB_ENABLE_HF_TRANSFER: 0
|
|
LANGCHAIN_API_KEY: ${LANGCHAIN_API_KEY}
|
|
LANGCHAIN_TRACING_V2: ${LANGCHAIN_TRACING_V2}
|
|
LANGCHAIN_PROJECT: "opea-reranking-service"
|
|
restart: unless-stopped
|
|
vllm_service:
|
|
image: ${REGISTRY:-opea}/vllm:${TAG:-latest}
|
|
container_name: vllm-service
|
|
ports:
|
|
- "9009:80"
|
|
volumes:
|
|
- "./data:/data"
|
|
shm_size: 128g
|
|
environment:
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
|
LLM_MODEL_ID: ${LLM_MODEL_ID}
|
|
command: --model $LLM_MODEL_ID --host 0.0.0.0 --port 80
|
|
llm:
|
|
image: ${REGISTRY:-opea}/llm-vllm:${TAG:-latest}
|
|
container_name: llm-vllm-server
|
|
depends_on:
|
|
- vllm_service
|
|
ports:
|
|
- "9000:9000"
|
|
ipc: host
|
|
environment:
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
vLLM_ENDPOINT: ${vLLM_LLM_ENDPOINT}
|
|
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
|
LLM_MODEL: ${LLM_MODEL_ID}
|
|
HF_HUB_DISABLE_PROGRESS_BARS: 1
|
|
HF_HUB_ENABLE_HF_TRANSFER: 0
|
|
LANGCHAIN_API_KEY: ${LANGCHAIN_API_KEY}
|
|
LANGCHAIN_TRACING_V2: ${LANGCHAIN_TRACING_V2}
|
|
LANGCHAIN_PROJECT: "opea-llm-service"
|
|
restart: unless-stopped
|
|
chaqna-xeon-backend-server:
|
|
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}
|
|
container_name: chatqna-xeon-backend-server
|
|
depends_on:
|
|
- redis-vector-db
|
|
- tei-embedding-service
|
|
- embedding
|
|
- retriever
|
|
- tei-reranking-service
|
|
- reranking
|
|
- vllm_service
|
|
- llm
|
|
ports:
|
|
- "8888:8888"
|
|
environment:
|
|
- no_proxy=${no_proxy}
|
|
- https_proxy=${https_proxy}
|
|
- http_proxy=${http_proxy}
|
|
- MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP}
|
|
- EMBEDDING_SERVICE_HOST_IP=${EMBEDDING_SERVICE_HOST_IP}
|
|
- RETRIEVER_SERVICE_HOST_IP=${RETRIEVER_SERVICE_HOST_IP}
|
|
- RERANK_SERVICE_HOST_IP=${RERANK_SERVICE_HOST_IP}
|
|
- LLM_SERVICE_HOST_IP=${LLM_SERVICE_HOST_IP}
|
|
ipc: host
|
|
restart: always
|
|
chaqna-xeon-ui-server:
|
|
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}
|
|
container_name: chatqna-xeon-ui-server
|
|
depends_on:
|
|
- chaqna-xeon-backend-server
|
|
ports:
|
|
- "5173:5173"
|
|
environment:
|
|
- no_proxy=${no_proxy}
|
|
- https_proxy=${https_proxy}
|
|
- http_proxy=${http_proxy}
|
|
- CHAT_BASE_URL=${BACKEND_SERVICE_ENDPOINT}
|
|
- UPLOAD_FILE_BASE_URL=${DATAPREP_SERVICE_ENDPOINT}
|
|
- GET_FILE=${DATAPREP_GET_FILE_ENDPOINT}
|
|
- DELETE_FILE=${DATAPREP_DELETE_FILE_ENDPOINT}
|
|
ipc: host
|
|
restart: always
|
|
|
|
networks:
|
|
default:
|
|
driver: bridge
|