Set vllm as default llm serving, and add related docker compose files, readmes, and test scripts. Fix issue #1436 Signed-off-by: letonghan <letong.han@intel.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
98 lines
2.8 KiB
YAML
98 lines
2.8 KiB
YAML
# Copyright (C) 2024 Intel Corporation
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
services:
|
|
vllm-service:
|
|
image: ${REGISTRY:-opea}/vllm:${TAG:-latest}
|
|
container_name: docsum-xeon-vllm-service
|
|
ports:
|
|
- "8008:80"
|
|
volumes:
|
|
- "${MODEL_CACHE:-./data}:/root/.cache/huggingface/hub"
|
|
shm_size: 1g
|
|
environment:
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
|
LLM_MODEL_ID: ${LLM_MODEL_ID}
|
|
VLLM_TORCH_PROFILER_DIR: "/mnt"
|
|
healthcheck:
|
|
test: ["CMD-SHELL", "curl -f http://localhost:80/health || exit 1"]
|
|
interval: 10s
|
|
timeout: 10s
|
|
retries: 100
|
|
command: --model $LLM_MODEL_ID --host 0.0.0.0 --port 80
|
|
|
|
llm-docsum-vllm:
|
|
image: ${REGISTRY:-opea}/llm-docsum:${TAG:-latest}
|
|
container_name: docsum-xeon-llm-server
|
|
depends_on:
|
|
vllm-service:
|
|
condition: service_healthy
|
|
ports:
|
|
- ${LLM_PORT:-9000}:9000
|
|
ipc: host
|
|
environment:
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
LLM_ENDPOINT: ${LLM_ENDPOINT}
|
|
LLM_MODEL_ID: ${LLM_MODEL_ID}
|
|
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
|
MAX_INPUT_TOKENS: ${MAX_INPUT_TOKENS}
|
|
MAX_TOTAL_TOKENS: ${MAX_TOTAL_TOKENS}
|
|
DocSum_COMPONENT_NAME: ${DocSum_COMPONENT_NAME}
|
|
LOGFLAG: ${LOGFLAG:-False}
|
|
restart: unless-stopped
|
|
|
|
whisper:
|
|
image: ${REGISTRY:-opea}/whisper:${TAG:-latest}
|
|
container_name: docsum-xeon-whisper-server
|
|
ports:
|
|
- "7066:7066"
|
|
ipc: host
|
|
environment:
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
restart: unless-stopped
|
|
|
|
docsum-xeon-backend-server:
|
|
image: ${REGISTRY:-opea}/docsum:${TAG:-latest}
|
|
container_name: docsum-xeon-backend-server
|
|
depends_on:
|
|
- vllm-service
|
|
- llm-docsum-vllm
|
|
ports:
|
|
- "${BACKEND_SERVICE_PORT:-8888}:8888"
|
|
environment:
|
|
- no_proxy=${no_proxy}
|
|
- https_proxy=${https_proxy}
|
|
- http_proxy=${http_proxy}
|
|
- MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP}
|
|
- LLM_SERVICE_HOST_IP=${LLM_SERVICE_HOST_IP}
|
|
- ASR_SERVICE_HOST_IP=${ASR_SERVICE_HOST_IP}
|
|
ipc: host
|
|
restart: always
|
|
|
|
docsum-gradio-ui:
|
|
image: ${REGISTRY:-opea}/docsum-gradio-ui:${TAG:-latest}
|
|
container_name: docsum-xeon-ui-server
|
|
depends_on:
|
|
- docsum-xeon-backend-server
|
|
ports:
|
|
- "${FRONTEND_SERVICE_PORT:-5173}:5173"
|
|
environment:
|
|
- no_proxy=${no_proxy}
|
|
- https_proxy=${https_proxy}
|
|
- http_proxy=${http_proxy}
|
|
- BACKEND_SERVICE_ENDPOINT=${BACKEND_SERVICE_ENDPOINT}
|
|
- DOC_BASE_URL=${BACKEND_SERVICE_ENDPOINT}
|
|
ipc: host
|
|
restart: always
|
|
|
|
networks:
|
|
default:
|
|
driver: bridge
|