# Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 services: tgi-service: image: ghcr.io/huggingface/tgi-gaudi:2.3.1 container_name: tgi-gaudi-server profiles: - codegen-gaudi-tgi ports: - "8028:80" volumes: - "${MODEL_CACHE:-./data}:/data" environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} HABANA_VISIBLE_DEVICES: all OMPI_MCA_btl_vader_single_copy_mechanism: none HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} ENABLE_HPU_GRAPH: true LIMIT_HPU_GRAPH: true USE_FLASH_ATTENTION: true FLASH_ATTENTION_RECOMPUTE: true healthcheck: test: ["CMD-SHELL", "curl -f http://localhost:80/health || exit 1"] interval: 10s timeout: 10s retries: 100 runtime: habana cap_add: - SYS_NICE ipc: host command: --model-id ${LLM_MODEL_ID} --max-input-length 1024 --max-total-tokens 2048 vllm-service: image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} container_name: vllm-gaudi-server profiles: - codegen-gaudi-vllm ports: - "8028:80" volumes: - "${MODEL_CACHE:-./data}:/root/.cache/huggingface/hub" shm_size: 1g environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HABANA_VISIBLE_DEVICES: all OMPI_MCA_btl_vader_single_copy_mechanism: none VLLM_SKIP_WARMUP: ${VLLM_SKIP_WARMUP:-false} NUM_CARDS: ${NUM_CARDS:-1} VLLM_TORCH_PROFILER_DIR: "/mnt" healthcheck: test: ["CMD-SHELL", "curl -f http://localhost:80/health || exit 1"] interval: 10s timeout: 10s retries: 100 runtime: habana cap_add: - SYS_NICE ipc: host command: --model ${LLM_MODEL_ID} --tensor-parallel-size ${NUM_CARDS} --host 0.0.0.0 --port 80 --block-size 128 --max-num-seqs 256 llm-base: image: ${REGISTRY:-opea}/llm-textgen:${TAG:-latest} container_name: llm-textgen-gaudi-server environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} LLM_ENDPOINT: ${LLM_ENDPOINT} LLM_MODEL_ID: ${LLM_MODEL_ID} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} restart: unless-stopped llm-tgi-service: extends: llm-base container_name: llm-codegen-tgi-gaudi-server profiles: - codegen-gaudi-tgi ports: - "9000:9000" ipc: host depends_on: tgi-service: condition: service_healthy llm-vllm-service: extends: llm-base container_name: llm-codegen-gaudi-vllm-server profiles: - codegen-gaudi-vllm ports: - "9000:9000" ipc: host depends_on: vllm-service: condition: service_healthy codegen-gaudi-backend-server: image: ${REGISTRY:-opea}/codegen:${TAG:-latest} container_name: codegen-gaudi-backend-server depends_on: llm-base: condition: service_started dataprep-redis-server: condition: service_healthy ports: - "7778:7778" environment: - no_proxy=${no_proxy} - https_proxy=${https_proxy} - http_proxy=${http_proxy} - MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP} - LLM_SERVICE_HOST_IP=${LLM_SERVICE_HOST_IP} - RETRIEVAL_SERVICE_HOST_IP=${RETRIEVAL_SERVICE_HOST_IP} - REDIS_RETRIEVER_PORT=${REDIS_RETRIEVER_PORT} - TEI_EMBEDDING_HOST_IP=${TEI_EMBEDDING_HOST_IP} - EMBEDDER_PORT=${EMBEDDER_PORT} - host_ip=${host_ip} ipc: host restart: always codegen-gaudi-ui-server: image: ${REGISTRY:-opea}/codegen-gradio-ui:${TAG:-latest} container_name: codegen-gaudi-ui-server depends_on: - codegen-gaudi-backend-server ports: - "5173:5173" environment: - no_proxy=${no_proxy} - https_proxy=${https_proxy} - http_proxy=${http_proxy} - BASIC_URL=${BACKEND_SERVICE_ENDPOINT} - MEGA_SERVICE_PORT=${MEGA_SERVICE_PORT} - host_ip=${host_ip} - DATAPREP_ENDPOINT=${DATAPREP_ENDPOINT} - DATAPREP_REDIS_PORT=${DATAPREP_REDIS_PORT} ipc: host restart: always redis-vector-db: image: redis/redis-stack:7.2.0-v9 container_name: redis-vector-db ports: - "${REDIS_DB_PORT}:${REDIS_DB_PORT}" - "${REDIS_INSIGHTS_PORT}:${REDIS_INSIGHTS_PORT}" dataprep-redis-server: image: ${REGISTRY:-opea}/dataprep:${TAG:-latest} container_name: dataprep-redis-server depends_on: - redis-vector-db ports: - "${DATAPREP_REDIS_PORT}:5000" environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} REDIS_URL: ${REDIS_URL} REDIS_HOST: ${host_ip} INDEX_NAME: ${INDEX_NAME} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} LOGFLAG: true healthcheck: test: ["CMD-SHELL", "curl -f http://localhost:5000/v1/health_check || exit 1"] interval: 10s timeout: 5s retries: 10 restart: unless-stopped tei-embedding-serving: image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 container_name: tei-embedding-serving entrypoint: /bin/sh -c "apt-get update && apt-get install -y curl && text-embeddings-router --json-output --model-id ${EMBEDDING_MODEL_ID} --auto-truncate" ports: - "${TEI_EMBEDDER_PORT:-12000}:80" volumes: - "./data:/data" shm_size: 1g environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} host_ip: ${host_ip} HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} healthcheck: test: ["CMD", "curl", "-f", "http://localhost:80/health"] interval: 10s timeout: 6s retries: 48 tei-embedding-server: image: ${REGISTRY:-opea}/embedding:${TAG:-latest} container_name: tei-embedding-server ports: - "${EMBEDDER_PORT:-10201}:6000" ipc: host environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} EMBEDDING_COMPONENT_NAME: "OPEA_TEI_EMBEDDING" depends_on: tei-embedding-serving: condition: service_healthy restart: unless-stopped retriever-redis: image: ${REGISTRY:-opea}/retriever:${TAG:-latest} container_name: retriever-redis depends_on: - redis-vector-db ports: - "${REDIS_RETRIEVER_PORT}:${REDIS_RETRIEVER_PORT}" ipc: host environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} REDIS_URL: ${REDIS_URL} REDIS_DB_PORT: ${REDIS_DB_PORT} REDIS_INSIGHTS_PORT: ${REDIS_INSIGHTS_PORT} REDIS_RETRIEVER_PORT: ${REDIS_RETRIEVER_PORT} INDEX_NAME: ${INDEX_NAME} TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} LOGFLAG: ${LOGFLAG} RETRIEVER_COMPONENT_NAME: ${RETRIEVER_COMPONENT_NAME:-OPEA_RETRIEVER_REDIS} restart: unless-stopped networks: default: driver: bridge