Files
ZePan110 8d421b7912 [Translation] Integrate set_env.sh into test scripts. (#1785)
Signed-off-by: ZePan110 <ze.pan@intel.com>
Co-authored-by: chen, suyue <suyue.chen@intel.com>
2025-04-11 09:31:40 +08:00

108 lines
3.4 KiB
YAML

# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
services:
translation-tgi-service:
image: ghcr.io/huggingface/text-generation-inference:2.4.1-rocm
container_name: translation-tgi-service
ports:
- "${TRANSLATION_TGI_SERVICE_PORT:-8008}:80"
volumes:
- "${MODEL_CACHE:-/var/lib/GenAI/translation/data}:/data"
shm_size: 8g
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
TGI_LLM_ENDPOINT: ${TRANSLATION_TGI_LLM_ENDPOINT}
HUGGING_FACE_HUB_TOKEN: ${TRANSLATION_HUGGINGFACEHUB_API_TOKEN}
HUGGINGFACEHUB_API_TOKEN: ${TRANSLATION_HUGGINGFACEHUB_API_TOKEN}
host_ip: ${host_ip}
healthcheck:
test: [ "CMD-SHELL", "curl -f http://$host_ip:8008/health || exit 1" ]
interval: 10s
timeout: 10s
retries: 100
devices:
- /dev/kfd:/dev/kfd
- /dev/dri/:/dev/dri/
cap_add:
- SYS_PTRACE
group_add:
- video
security_opt:
- seccomp:unconfined
ipc: host
command: --model-id ${TRANSLATION_LLM_MODEL_ID}
translation-llm:
image: ${REGISTRY:-opea}/llm-textgen:${TAG:-latest}
container_name: translation-llm-textgen-server
depends_on:
translation-tgi-service:
condition: service_healthy
ports:
- "9000:9000"
ipc: host
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
LLM_ENDPOINT: ${TRANSLATION_TGI_LLM_ENDPOINT}
LLM_MODEL_ID: ${TRANSLATION_LLM_MODEL_ID}
HUGGINGFACEHUB_API_TOKEN: ${TRANSLATION_HUGGINGFACEHUB_API_TOKEN}
HF_HUB_DISABLE_PROGRESS_BARS: 1
HF_HUB_ENABLE_HF_TRANSFER: 0
restart: unless-stopped
translation-backend-server:
image: ${REGISTRY:-opea}/translation:${TAG:-latest}
container_name: translation-backend-server
depends_on:
- translation-tgi-service
- translation-llm
ports:
- "${TRANSLATION_BACKEND_SERVICE_PORT:-8888}:8888"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- MEGA_SERVICE_HOST_IP=${TRANSLATION_MEGA_SERVICE_HOST_IP}
- LLM_SERVICE_HOST_IP=${TRANSLATION_LLM_SERVICE_HOST_IP}
ipc: host
restart: always
translation-ui-server:
image: ${REGISTRY:-opea}/translation-ui:${TAG:-latest}
container_name: translation-ui-server
depends_on:
- translation-backend-server
ports:
- "${TRANSLATION_FRONTEND_SERVICE_PORT:-5173}:5173"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- BASE_URL=${TRANSLATION_BACKEND_SERVICE_ENDPOINT}
ipc: host
restart: always
translation-nginx-server:
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
container_name: translation-nginx-server
depends_on:
- translation-backend-server
- translation-ui-server
ports:
- "${TRANSLATION_NGINX_PORT:-80}:80"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- FRONTEND_SERVICE_IP=${TRANSLATION_FRONTEND_SERVICE_IP}
- FRONTEND_SERVICE_PORT=${TRANSLATION_FRONTEND_SERVICE_PORT}
- BACKEND_SERVICE_NAME=${TRANSLATION_BACKEND_SERVICE_NAME}
- BACKEND_SERVICE_IP=${TRANSLATION_BACKEND_SERVICE_IP}
- BACKEND_SERVICE_PORT=${TRANSLATION_BACKEND_SERVICE_PORT}
ipc: host
restart: always
networks:
default:
driver: bridge