Files
GenAIExamples/Translation/docker/xeon/compose.yaml
2024-08-18 17:17:44 +08:00

64 lines
1.8 KiB
YAML

# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
services:
tgi-service:
image: ghcr.io/huggingface/text-generation-inference:latest-intel-cpu
container_name: tgi-service
ports:
- "8008:80"
environment:
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
volumes:
- "./data:/data"
shm_size: 1g
command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0
llm:
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
container_name: llm-tgi-server
depends_on:
- tgi-service
ports:
- "9000:9000"
ipc: host
environment:
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
restart: unless-stopped
translation-xeon-backend-server:
image: ${REGISTRY:-opea}/translation:${TAG:-latest}
container_name: translation-xeon-backend-server
depends_on:
- tgi-service
- llm
ports:
- "8888:8888"
environment:
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP}
- LLM_SERVICE_HOST_IP=${LLM_SERVICE_HOST_IP}
ipc: host
restart: always
translation-xeon-ui-server:
image: ${REGISTRY:-opea}/translation-ui:${TAG:-latest}
container_name: translation-xeon-ui-server
depends_on:
- translation-xeon-backend-server
ports:
- "5173:5173"
environment:
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- BASE_URL=${BACKEND_SERVICE_ENDPOINT}
ipc: host
restart: always
networks:
default:
driver: bridge