Files
GenAIExamples/CodeGen/docker_compose/intel/cpu/xeon/compose.yaml
ZePan110 5aecea8e47 Update compose.yaml (#1619)
Update compose.yaml for CodeGen, CodeTrans and DocSum

Signed-off-by: ZePan110 <ze.pan@intel.com>
2025-03-07 09:20:28 +08:00

75 lines
2.0 KiB
YAML

# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
services:
tgi-service:
image: ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu
container_name: tgi-service
ports:
- "8028:80"
volumes:
- "${MODEL_CACHE:-./data}:/data"
shm_size: 1g
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
host_ip: ${host_ip}
healthcheck:
test: ["CMD-SHELL", "curl -f http://$host_ip:8028/health || exit 1"]
interval: 10s
timeout: 10s
retries: 100
command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0
llm:
image: ${REGISTRY:-opea}/llm-textgen:${TAG:-latest}
container_name: llm-textgen-server
depends_on:
tgi-service:
condition: service_healthy
ports:
- "9000:9000"
ipc: host
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
LLM_MODEL_ID: ${LLM_MODEL_ID}
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
restart: unless-stopped
codegen-xeon-backend-server:
image: ${REGISTRY:-opea}/codegen:${TAG:-latest}
container_name: codegen-xeon-backend-server
depends_on:
- llm
ports:
- "7778:7778"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP}
- LLM_SERVICE_HOST_IP=${LLM_SERVICE_HOST_IP}
ipc: host
restart: always
codegen-xeon-ui-server:
image: ${REGISTRY:-opea}/codegen-ui:${TAG:-latest}
container_name: codegen-xeon-ui-server
depends_on:
- codegen-xeon-backend-server
ports:
- "5173:5173"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- BASIC_URL=${BACKEND_SERVICE_ENDPOINT}
ipc: host
restart: always
networks:
default:
driver: bridge