# Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 services: tgi-service: image: ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu container_name: tgi-server profiles: - codegen-xeon-tgi ports: - "8028:80" volumes: - "${MODEL_CACHE:-./data}:/data" shm_size: 1g environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} host_ip: ${host_ip} healthcheck: test: ["CMD-SHELL", "curl -f http://$host_ip:8028/health || exit 1"] interval: 10s timeout: 10s retries: 100 command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0 vllm-service: image: ${REGISTRY:-opea}/vllm:${TAG:-latest} container_name: vllm-server profiles: - codegen-xeon-vllm ports: - "8028:80" volumes: - "${MODEL_CACHE:-./data}:/root/.cache/huggingface/hub" shm_size: 1g environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} host_ip: ${host_ip} healthcheck: test: ["CMD-SHELL", "curl -f http://$host_ip:8028/health || exit 1"] interval: 10s timeout: 10s retries: 100 command: --model ${LLM_MODEL_ID} --host 0.0.0.0 --port 80 llm-base: image: ${REGISTRY:-opea}/llm-textgen:${TAG:-latest} container_name: llm-textgen-server environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} LLM_ENDPOINT: ${LLM_ENDPOINT} LLM_MODEL_ID: ${LLM_MODEL_ID} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} restart: unless-stopped llm-tgi-service: extends: llm-base container_name: llm-codegen-tgi-server profiles: - codegen-xeon-tgi ports: - "9000:9000" ipc: host depends_on: tgi-service: condition: service_healthy llm-vllm-service: extends: llm-base container_name: llm-codegen-vllm-server profiles: - codegen-xeon-vllm ports: - "9000:9000" ipc: host depends_on: vllm-service: condition: service_healthy codegen-xeon-backend-server: image: ${REGISTRY:-opea}/codegen:${TAG:-latest} container_name: codegen-xeon-backend-server depends_on: - llm-base ports: - "7778:7778" environment: - no_proxy=${no_proxy} - https_proxy=${https_proxy} - http_proxy=${http_proxy} - MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP} - LLM_SERVICE_HOST_IP=${LLM_SERVICE_HOST_IP} ipc: host restart: always codegen-xeon-ui-server: image: ${REGISTRY:-opea}/codegen-ui:${TAG:-latest} container_name: codegen-xeon-ui-server depends_on: - codegen-xeon-backend-server ports: - "5173:5173" environment: - no_proxy=${no_proxy} - https_proxy=${https_proxy} - http_proxy=${http_proxy} - BASIC_URL=${BACKEND_SERVICE_ENDPOINT} ipc: host restart: always networks: default: driver: bridge