Signed-off-by: Wang, Xigui <xigui.wang@intel.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
131 lines
3.7 KiB
YAML
131 lines
3.7 KiB
YAML
# Copyright (C) 2024 Intel Corporation
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
services:
|
|
tgi-service:
|
|
image: ghcr.io/huggingface/tgi-gaudi:2.3.1
|
|
container_name: tgi-gaudi-server
|
|
profiles:
|
|
- codegen-gaudi-tgi
|
|
ports:
|
|
- "8028:80"
|
|
volumes:
|
|
- "${MODEL_CACHE:-./data}:/data"
|
|
environment:
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
HABANA_VISIBLE_DEVICES: all
|
|
OMPI_MCA_btl_vader_single_copy_mechanism: none
|
|
HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
|
ENABLE_HPU_GRAPH: true
|
|
LIMIT_HPU_GRAPH: true
|
|
USE_FLASH_ATTENTION: true
|
|
FLASH_ATTENTION_RECOMPUTE: true
|
|
healthcheck:
|
|
test: ["CMD-SHELL", "curl -f http://$host_ip:8028/health || exit 1"]
|
|
interval: 10s
|
|
timeout: 10s
|
|
retries: 100
|
|
runtime: habana
|
|
cap_add:
|
|
- SYS_NICE
|
|
ipc: host
|
|
command: --model-id ${LLM_MODEL_ID} --max-input-length 1024 --max-total-tokens 2048
|
|
vllm-service:
|
|
image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest}
|
|
container_name: vllm-gaudi-server
|
|
profiles:
|
|
- codegen-gaudi-vllm
|
|
ports:
|
|
- "8028:80"
|
|
volumes:
|
|
- "${MODEL_CACHE:-./data}:/root/.cache/huggingface/hub"
|
|
shm_size: 1g
|
|
environment:
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
|
HABANA_VISIBLE_DEVICES: all
|
|
OMPI_MCA_btl_vader_single_copy_mechanism: none
|
|
VLLM_SKIP_WARMUP: ${VLLM_SKIP_WARMUP:-false}
|
|
NUM_CARDS: ${NUM_CARDS:-1}
|
|
VLLM_TORCH_PROFILER_DIR: "/mnt"
|
|
healthcheck:
|
|
test: ["CMD-SHELL", "curl -f http://$host_ip:8028/health || exit 1"]
|
|
interval: 10s
|
|
timeout: 10s
|
|
retries: 100
|
|
runtime: habana
|
|
cap_add:
|
|
- SYS_NICE
|
|
ipc: host
|
|
command: --model ${LLM_MODEL_ID} --tensor-parallel-size ${NUM_CARDS} --host 0.0.0.0 --port 80 --block-size 128 --max-num-seqs 256
|
|
llm-base:
|
|
image: ${REGISTRY:-opea}/llm-textgen:${TAG:-latest}
|
|
container_name: llm-textgen-gaudi-server
|
|
environment:
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
LLM_ENDPOINT: ${LLM_ENDPOINT}
|
|
LLM_MODEL_ID: ${LLM_MODEL_ID}
|
|
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
|
restart: unless-stopped
|
|
llm-tgi-service:
|
|
extends: llm-base
|
|
container_name: llm-codegen-tgi-gaudi-server
|
|
profiles:
|
|
- codegen-gaudi-tgi
|
|
ports:
|
|
- "9000:9000"
|
|
ipc: host
|
|
depends_on:
|
|
tgi-service:
|
|
condition: service_healthy
|
|
llm-vllm-service:
|
|
extends: llm-base
|
|
container_name: llm-codegen-gaudi-vllm-server
|
|
profiles:
|
|
- codegen-gaudi-vllm
|
|
ports:
|
|
- "9000:9000"
|
|
ipc: host
|
|
depends_on:
|
|
vllm-service:
|
|
condition: service_healthy
|
|
codegen-gaudi-backend-server:
|
|
image: ${REGISTRY:-opea}/codegen:${TAG:-latest}
|
|
container_name: codegen-gaudi-backend-server
|
|
depends_on:
|
|
- llm-base
|
|
ports:
|
|
- "7778:7778"
|
|
environment:
|
|
- no_proxy=${no_proxy}
|
|
- https_proxy=${https_proxy}
|
|
- http_proxy=${http_proxy}
|
|
- MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP}
|
|
- LLM_SERVICE_HOST_IP=${LLM_SERVICE_HOST_IP}
|
|
ipc: host
|
|
restart: always
|
|
codegen-gaudi-ui-server:
|
|
image: ${REGISTRY:-opea}/codegen-ui:${TAG:-latest}
|
|
container_name: codegen-gaudi-ui-server
|
|
depends_on:
|
|
- codegen-gaudi-backend-server
|
|
ports:
|
|
- "5173:5173"
|
|
environment:
|
|
- no_proxy=${no_proxy}
|
|
- https_proxy=${https_proxy}
|
|
- http_proxy=${http_proxy}
|
|
- BASIC_URL=${BACKEND_SERVICE_ENDPOINT}
|
|
ipc: host
|
|
restart: always
|
|
|
|
networks:
|
|
default:
|
|
driver: bridge
|