Enable CodeGen vLLM (#1636)
Signed-off-by: Wang, Xigui <xigui.wang@intel.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
@@ -4,7 +4,9 @@
|
||||
services:
|
||||
tgi-service:
|
||||
image: ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu
|
||||
container_name: tgi-service
|
||||
container_name: tgi-server
|
||||
profiles:
|
||||
- codegen-xeon-tgi
|
||||
ports:
|
||||
- "8028:80"
|
||||
volumes:
|
||||
@@ -22,28 +24,66 @@ services:
|
||||
timeout: 10s
|
||||
retries: 100
|
||||
command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0
|
||||
llm:
|
||||
image: ${REGISTRY:-opea}/llm-textgen:${TAG:-latest}
|
||||
container_name: llm-textgen-server
|
||||
depends_on:
|
||||
tgi-service:
|
||||
condition: service_healthy
|
||||
vllm-service:
|
||||
image: ${REGISTRY:-opea}/vllm:${TAG:-latest}
|
||||
container_name: vllm-server
|
||||
profiles:
|
||||
- codegen-xeon-vllm
|
||||
ports:
|
||||
- "9000:9000"
|
||||
ipc: host
|
||||
- "8028:80"
|
||||
volumes:
|
||||
- "${MODEL_CACHE:-./data}:/root/.cache/huggingface/hub"
|
||||
shm_size: 1g
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
|
||||
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
host_ip: ${host_ip}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://$host_ip:8028/health || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 100
|
||||
command: --model ${LLM_MODEL_ID} --host 0.0.0.0 --port 80
|
||||
llm-base:
|
||||
image: ${REGISTRY:-opea}/llm-textgen:${TAG:-latest}
|
||||
container_name: llm-textgen-server
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
LLM_ENDPOINT: ${LLM_ENDPOINT}
|
||||
LLM_MODEL_ID: ${LLM_MODEL_ID}
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
llm-tgi-service:
|
||||
extends: llm-base
|
||||
container_name: llm-codegen-tgi-server
|
||||
profiles:
|
||||
- codegen-xeon-tgi
|
||||
ports:
|
||||
- "9000:9000"
|
||||
ipc: host
|
||||
depends_on:
|
||||
tgi-service:
|
||||
condition: service_healthy
|
||||
llm-vllm-service:
|
||||
extends: llm-base
|
||||
container_name: llm-codegen-vllm-server
|
||||
profiles:
|
||||
- codegen-xeon-vllm
|
||||
ports:
|
||||
- "9000:9000"
|
||||
ipc: host
|
||||
depends_on:
|
||||
vllm-service:
|
||||
condition: service_healthy
|
||||
codegen-xeon-backend-server:
|
||||
image: ${REGISTRY:-opea}/codegen:${TAG:-latest}
|
||||
container_name: codegen-xeon-backend-server
|
||||
depends_on:
|
||||
- llm
|
||||
- llm-base
|
||||
ports:
|
||||
- "7778:7778"
|
||||
environment:
|
||||
|
||||
Reference in New Issue
Block a user