# Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 services: neo4j-apoc: image: neo4j:latest container_name: neo4j-apoc ports: - "${NEO4J_PORT1:-7474}:7474" - "${NEO4J_PORT2:-7687}:7687" volumes: - ./data/neo4j/logs:/logs - ./data/neo4j/config:/config - ./data/neo4j/data:/data - ./data/neo4j/plugins:/plugins ipc: host environment: - no_proxy=${no_proxy} - http_proxy=${http_proxy} - https_proxy=${https_proxy} - NEO4J_AUTH=${NEO4J_USERNAME}/${NEO4J_PASSWORD} - NEO4J_PLUGINS=["apoc"] - NEO4J_apoc_export_file_enabled=true - NEO4J_apoc_import_file_enabled=true - NEO4J_apoc_import_file_use__neo4j__config=true - NEO4J_dbms_security_procedures_unrestricted=apoc.\* - NEO4J_server_bolt_advertised__address=localhost:${NEO4J_PORT2} restart: always healthcheck: test: wget http://localhost:7474 || exit 1 interval: 5s timeout: 10s retries: 20 start_period: 3s tei-embedding-serving: image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 container_name: tei-embedding-serving entrypoint: /bin/sh -c "apt-get update && apt-get install -y curl && text-embeddings-router --json-output --model-id ${EMBEDDING_MODEL_ID} --auto-truncate" ports: - "${TEI_EMBEDDER_PORT:-12000}:80" volumes: - "${MODEL_CACHE:-./data}:/data" shm_size: 1g environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} host_ip: ${host_ip} HF_TOKEN: ${HF_TOKEN} healthcheck: test: ["CMD", "curl", "-f", "http://${host_ip}:${TEI_EMBEDDER_PORT}/health"] interval: 10s timeout: 6s retries: 48 tgi-gaudi-server: image: ghcr.io/huggingface/tgi-gaudi:2.3.1 container_name: tgi-gaudi-server ports: - ${LLM_ENDPOINT_PORT:-8008}:80 volumes: - "${MODEL_CACHE:-./data}:/data" environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} HUGGING_FACE_HUB_TOKEN: ${HF_TOKEN} HF_TOKEN: ${HF_TOKEN} HF_HUB_DISABLE_PROGRESS_BARS: 1 HF_HUB_ENABLE_HF_TRANSFER: 0 HABANA_VISIBLE_DEVICES: all OMPI_MCA_btl_vader_single_copy_mechanism: none ENABLE_HPU_GRAPH: true LIMIT_HPU_GRAPH: true USE_FLASH_ATTENTION: true FLASH_ATTENTION_RECOMPUTE: true host_ip: ${host_ip} LLM_ENDPOINT_PORT: ${LLM_ENDPOINT_PORT} MAX_INPUT_TOKENS: ${MAX_INPUT_TOKENS:-2048} MAX_TOTAL_TOKENS: ${MAX_TOTAL_TOKENS:-4096} TEXT_GENERATION_SERVER_IGNORE_EOS_TOKEN: false runtime: habana cap_add: - SYS_NICE ipc: host healthcheck: test: ["CMD-SHELL", "curl -f http://${host_ip}:${LLM_ENDPOINT_PORT}/health || exit 1"] interval: 10s timeout: 10s retries: 100 command: --model-id ${LLM_MODEL_ID} dataprep-neo4j-llamaindex: image: ${REGISTRY:-opea}/dataprep:${TAG:-latest} container_name: dataprep-neo4j-llamaindex depends_on: neo4j-apoc: condition: service_healthy tgi-gaudi-server: condition: service_healthy tei-embedding-serving: condition: service_healthy ports: - "${DATAPREP_PORT:-11103}:5000" ipc: host environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} host_ip: ${host_ip} DATAPREP_COMPONENT_NAME: "OPEA_DATAPREP_NEO4J_LLAMAINDEX" NEO4J_URL: ${NEO4J_URL} NEO4J_USERNAME: ${NEO4J_USERNAME} NEO4J_PASSWORD: ${NEO4J_PASSWORD} TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT} TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} OPENAI_API_KEY: ${OPENAI_API_KEY} OPENAI_EMBEDDING_MODEL: ${OPENAI_EMBEDDING_MODEL} OPENAI_LLM_MODEL: ${OPENAI_LLM_MODEL} EMBEDDING_MODEL_ID: ${EMBEDDING_MODEL_ID} LLM_MODEL_ID: ${LLM_MODEL_ID} LOGFLAG: ${LOGFLAG} HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN} HF_TOKEN: ${HF_TOKEN} MAX_INPUT_TOKENS: ${MAX_INPUT_TOKENS:-4096} restart: unless-stopped retriever-neo4j: image: ${REGISTRY:-opea}/retriever:${TAG:-latest} container_name: retriever-neo4j ports: - "${RETRIEVER_PORT:-7000}:7000" ipc: host environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN} LOGFLAG: ${LOGFLAG:-False} RETRIEVER_COMPONENT_NAME: ${RETRIEVER_COMPONENT_NAME:-OPEA_RETRIEVER_NEO4J} TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT} EMBEDDING_MODEL_ID: ${EMBEDDING_MODEL_ID} LLM_MODEL_ID: ${LLM_MODEL_ID} NEO4J_URI: ${NEO4J_URI} NEO4J_URL: ${NEO4J_URI} NEO4J_USERNAME: ${NEO4J_USERNAME} NEO4J_PASSWORD: ${NEO4J_PASSWORD} VDMS_USE_CLIP: 0 host_ip: ${host_ip} depends_on: neo4j-apoc: condition: service_healthy tei-embedding-serving: condition: service_healthy tgi-gaudi-server: condition: service_healthy graphrag-gaudi-backend-server: image: ${REGISTRY:-opea}/graphrag:${TAG:-latest} container_name: graphrag-gaudi-backend-server depends_on: - neo4j-apoc - tei-embedding-serving - retriever-neo4j - tgi-gaudi-server ports: - "8888:8888" - "${MEGA_SERVICE_PORT:-8888}:8888" environment: - no_proxy=${no_proxy} - https_proxy=${https_proxy} - http_proxy=${http_proxy} - MEGA_SERVICE_HOST_IP=graphrag-gaudi-backend-server - RETRIEVER_SERVICE_HOST_IP=retriever-neo4j - RETRIEVER_SERVICE_PORT=7000 - LLM_SERVER_HOST_IP=tgi-gaudi-server - LLM_SERVER_PORT=80 - LLM_MODEL_ID=${LLM_MODEL_ID} - LOGFLAG=${LOGFLAG} ipc: host restart: always graphrag-ui-server: image: ${REGISTRY:-opea}/graphrag-ui:${TAG:-latest} container_name: graphrag-ui-server depends_on: - graphrag-gaudi-backend-server ports: - "5173:5173" environment: - no_proxy=${no_proxy} - https_proxy=${https_proxy} - http_proxy=${http_proxy} ipc: host restart: always chatqna-gaudi-nginx-server: image: ${REGISTRY:-opea}/nginx:${TAG:-latest} container_name: chatqna-gaudi-nginx-server depends_on: - graphrag-gaudi-backend-server - graphrag-ui-server ports: - "${NGINX_PORT:-80}:80" environment: - no_proxy=${no_proxy} - https_proxy=${https_proxy} - http_proxy=${http_proxy} - FRONTEND_SERVICE_IP=graphrag-ui-server - FRONTEND_SERVICE_PORT=5173 - BACKEND_SERVICE_NAME=graphrag - BACKEND_SERVICE_IP=graphrag-gaudi-backend-server - BACKEND_SERVICE_PORT=8888 - DATAPREP_SERVICE_IP=dataprep-neo4j-llamaindex - DATAPREP_SERVICE_PORT=5000 ipc: host restart: always networks: default: driver: bridge