Compare commits
4 Commits
suyue/ci
...
update_vLL
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c712c01829 | ||
|
|
66ecaf2e6f | ||
|
|
46fab4a736 | ||
|
|
8ba40a5bff |
2
.github/env/_build_image.sh
vendored
2
.github/env/_build_image.sh
vendored
@@ -2,4 +2,4 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
export VLLM_VER=v0.8.3
|
||||
export VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
|
||||
export VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
|
||||
|
||||
@@ -37,7 +37,7 @@ function build_agent_docker_image_gaudi_vllm() {
|
||||
get_genai_comps
|
||||
|
||||
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
|
||||
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
|
||||
VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
|
||||
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
|
||||
|
||||
echo "Build agent image with --no-cache..."
|
||||
|
||||
@@ -27,7 +27,7 @@ function build_docker_images() {
|
||||
|
||||
git clone https://github.com/HabanaAI/vllm-fork.git
|
||||
cd vllm-fork/
|
||||
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
|
||||
VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
|
||||
echo "Check out vLLM tag ${VLLM_FORK_VER}"
|
||||
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ services:
|
||||
- chatqna-redis-vector-db
|
||||
- chatqna-tei-embedding-service
|
||||
ports:
|
||||
- "${CHATQNA_REDIS_DATAPREP_PORT:-18103}:5000"
|
||||
- "${CHATQNA_REDIS_DATAPREP_PORT}:5000"
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
|
||||
@@ -16,7 +16,7 @@ services:
|
||||
- chatqna-redis-vector-db
|
||||
- chatqna-tei-embedding-service
|
||||
ports:
|
||||
- "${CHATQNA_REDIS_DATAPREP_PORT:-18103}:5000"
|
||||
- "${CHATQNA_REDIS_DATAPREP_PORT}:5000"
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
|
||||
@@ -16,7 +16,7 @@ services:
|
||||
- chatqna-redis-vector-db
|
||||
- chatqna-tei-embedding-service
|
||||
ports:
|
||||
- "${CHATQNA_REDIS_DATAPREP_PORT:-18103}:5000"
|
||||
- "${CHATQNA_REDIS_DATAPREP_PORT}:5000"
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
|
||||
@@ -16,7 +16,7 @@ services:
|
||||
- chatqna-redis-vector-db
|
||||
- chatqna-tei-embedding-service
|
||||
ports:
|
||||
- "${CHATQNA_REDIS_DATAPREP_PORT:-18103}:5000"
|
||||
- "${CHATQNA_REDIS_DATAPREP_PORT:-5000}:5000"
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
|
||||
@@ -2,17 +2,17 @@
|
||||
|
||||
# Copyright (C) 2025 Advanced Micro Devices, Inc.
|
||||
|
||||
export HOST_IP=${ip_address}
|
||||
export HOST_IP_EXTERNAL=${ip_address}
|
||||
export HOST_IP=''
|
||||
export HOST_IP_EXTERNAL=''
|
||||
|
||||
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_PORT=8888
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5173
|
||||
export CHATQNA_NGINX_PORT=80
|
||||
export CHATQNA_BACKEND_SERVICE_PORT=18102
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=18101
|
||||
export CHATQNA_NGINX_PORT=18104
|
||||
export CHATQNA_REDIS_DATAPREP_PORT=18103
|
||||
export CHATQNA_REDIS_RETRIEVER_PORT=7000
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001
|
||||
|
||||
@@ -2,18 +2,18 @@
|
||||
|
||||
# Copyright (C) 2025 Advanced Micro Devices, Inc.
|
||||
|
||||
export HOST_IP=${ip_address}
|
||||
export HOST_IP_EXTERNAL=${ip_address}
|
||||
export HOST_IP=''
|
||||
export HOST_IP_EXTERNAL=''
|
||||
|
||||
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_PORT=8888
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5173
|
||||
export CHATQNA_BACKEND_SERVICE_PORT=18102
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=18101
|
||||
export CHATQNA_LLM_FAQGEN_PORT=18011
|
||||
export CHATQNA_NGINX_PORT=80
|
||||
export CHATQNA_NGINX_PORT=18104
|
||||
export CHATQNA_REDIS_DATAPREP_PORT=18103
|
||||
export CHATQNA_REDIS_RETRIEVER_PORT=7000
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001
|
||||
|
||||
@@ -2,18 +2,18 @@
|
||||
|
||||
# Copyright (C) 2025 Advanced Micro Devices, Inc.
|
||||
|
||||
export HOST_IP=${ip_address}
|
||||
export HOST_IP_EXTERNAL=${ip_address}
|
||||
export HOST_IP=''
|
||||
export HOST_IP_EXTERNAL=''
|
||||
|
||||
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_PORT=8888
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5173
|
||||
export CHATQNA_BACKEND_SERVICE_PORT=18102
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=18101
|
||||
export CHATQNA_LLM_FAQGEN_PORT=18011
|
||||
export CHATQNA_NGINX_PORT=80
|
||||
export CHATQNA_NGINX_PORT=18104
|
||||
export CHATQNA_REDIS_DATAPREP_PORT=18103
|
||||
export CHATQNA_REDIS_RETRIEVER_PORT=7000
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001
|
||||
|
||||
@@ -2,17 +2,17 @@
|
||||
|
||||
# Copyright (C) 2025 Advanced Micro Devices, Inc.
|
||||
|
||||
export HOST_IP=${ip_address}
|
||||
export HOST_IP_EXTERNAL=${ip_address}
|
||||
export HOST_IP=''
|
||||
export HOST_IP_EXTERNAL=''
|
||||
|
||||
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_PORT=8888
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5173
|
||||
export CHATQNA_NGINX_PORT=80
|
||||
export CHATQNA_BACKEND_SERVICE_PORT=18102
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=18101
|
||||
export CHATQNA_NGINX_PORT=18104
|
||||
export CHATQNA_REDIS_DATAPREP_PORT=18103
|
||||
export CHATQNA_REDIS_RETRIEVER_PORT=7000
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
# Copyright (C) 2025 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
if ls *.json 1> /dev/null 2>&1; then
|
||||
rm *.json
|
||||
fi
|
||||
rm *.json
|
||||
wget https://raw.githubusercontent.com/opea-project/GenAIEval/refs/heads/main/evals/benchmark/grafana/chatqna_megaservice_grafana.json
|
||||
wget https://raw.githubusercontent.com/opea-project/GenAIEval/refs/heads/main/evals/benchmark/grafana/qdrant_grafana.json
|
||||
wget https://raw.githubusercontent.com/opea-project/GenAIEval/refs/heads/main/evals/benchmark/grafana/milvus_grafana.json
|
||||
|
||||
@@ -7,9 +7,6 @@ pushd "../../../../../" > /dev/null
|
||||
source .set_env.sh
|
||||
popd > /dev/null
|
||||
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export HF_TOKEN=${HF_TOKEN}
|
||||
export host_ip=${ip_address}
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
|
||||
@@ -43,7 +43,7 @@ Some HuggingFace resources, such as some models, are only accessible if you have
|
||||
|
||||
### Configure the Deployment Environment
|
||||
|
||||
To set up environment variables for deploying ChatQnA services, source the _setup_env.sh_ script in this directory (If using faqgen or guardrails, source the _set_env_faqgen.sh_):
|
||||
To set up environment variables for deploying ChatQnA services, source the _setup_env.sh_ script in this directory:
|
||||
|
||||
```
|
||||
source ./set_env.sh
|
||||
|
||||
@@ -4,20 +4,12 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Function to prompt for input and set environment variables
|
||||
NON_INTERACTIVE=${NON_INTERACTIVE:-false}
|
||||
|
||||
prompt_for_env_var() {
|
||||
local var_name="$1"
|
||||
local prompt_message="$2"
|
||||
local default_value="$3"
|
||||
local mandatory="$4"
|
||||
|
||||
if [[ "$NON_INTERACTIVE" == "true" ]]; then
|
||||
echo "Non-interactive environment detected. Setting $var_name to default: $default_value"
|
||||
export "$var_name"="$default_value"
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ "$mandatory" == "true" ]]; then
|
||||
while [[ -z "$value" ]]; do
|
||||
read -p "$prompt_message [default: \"${default_value}\"]: " value
|
||||
@@ -42,7 +34,7 @@ popd > /dev/null
|
||||
|
||||
# Prompt the user for each required environment variable
|
||||
prompt_for_env_var "EMBEDDING_MODEL_ID" "Enter the EMBEDDING_MODEL_ID" "BAAI/bge-base-en-v1.5" false
|
||||
prompt_for_env_var "HUGGINGFACEHUB_API_TOKEN" "Enter the HUGGINGFACEHUB_API_TOKEN" "${HF_TOKEN}" true
|
||||
prompt_for_env_var "HUGGINGFACEHUB_API_TOKEN" "Enter the HUGGINGFACEHUB_API_TOKEN" "" true
|
||||
prompt_for_env_var "RERANK_MODEL_ID" "Enter the RERANK_MODEL_ID" "BAAI/bge-reranker-base" false
|
||||
prompt_for_env_var "LLM_MODEL_ID" "Enter the LLM_MODEL_ID" "meta-llama/Meta-Llama-3-8B-Instruct" false
|
||||
prompt_for_env_var "INDEX_NAME" "Enter the INDEX_NAME" "rag-redis" false
|
||||
@@ -50,40 +42,34 @@ prompt_for_env_var "NUM_CARDS" "Enter the number of Gaudi devices" "1" false
|
||||
prompt_for_env_var "host_ip" "Enter the host_ip" "$(curl ifconfig.me)" false
|
||||
|
||||
#Query for enabling http_proxy
|
||||
prompt_for_env_var "http_proxy" "Enter the http_proxy." "${http_proxy}" false
|
||||
prompt_for_env_var "http_proxy" "Enter the http_proxy." "" false
|
||||
|
||||
#Query for enabling https_proxy
|
||||
prompt_for_env_var "http_proxy" "Enter the http_proxy." "${https_proxy}" false
|
||||
prompt_for_env_var "https_proxy" "Enter the https_proxy." "" false
|
||||
|
||||
#Query for enabling no_proxy
|
||||
prompt_for_env_var "no_proxy" "Enter the no_proxy." "${no_proxy}" false
|
||||
prompt_for_env_var "no_proxy" "Enter the no_proxy." "" false
|
||||
|
||||
# Query for enabling logging
|
||||
if [[ "$NON_INTERACTIVE" == "true" ]]; then
|
||||
# Query for enabling logging
|
||||
prompt_for_env_var "LOGFLAG" "Enable logging? (yes/no): " "true" false
|
||||
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
|
||||
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
|
||||
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
|
||||
telemetry_flag=true
|
||||
read -p "Enable logging? (yes/no): " logging && logging=$(echo "$logging" | tr '[:upper:]' '[:lower:]')
|
||||
if [[ "$logging" == "yes" || "$logging" == "y" ]]; then
|
||||
export LOGFLAG=true
|
||||
else
|
||||
# Query for enabling logging
|
||||
read -p "Enable logging? (yes/no): " logging && logging=$(echo "$logging" | tr '[:upper:]' '[:lower:]')
|
||||
if [[ "$logging" == "yes" || "$logging" == "y" ]]; then
|
||||
export LOGFLAG=true
|
||||
else
|
||||
export LOGFLAG=false
|
||||
fi
|
||||
# Query for enabling OpenTelemetry Tracing Endpoint
|
||||
read -p "Enable OpenTelemetry Tracing Endpoint? (yes/no): " telemetry && telemetry=$(echo "$telemetry" | tr '[:upper:]' '[:lower:]')
|
||||
if [[ "$telemetry" == "yes" || "$telemetry" == "y" ]]; then
|
||||
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
|
||||
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
|
||||
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
|
||||
telemetry_flag=true
|
||||
else
|
||||
telemetry_flag=false
|
||||
fi
|
||||
export LOGFLAG=false
|
||||
fi
|
||||
|
||||
# Query for enabling OpenTelemetry Tracing Endpoint
|
||||
read -p "Enable OpenTelemetry Tracing Endpoint? (yes/no): " telemetry && telemetry=$(echo "$telemetry" | tr '[:upper:]' '[:lower:]')
|
||||
if [[ "$telemetry" == "yes" || "$telemetry" == "y" ]]; then
|
||||
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
|
||||
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
|
||||
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
|
||||
telemetry_flag=true
|
||||
pushd "grafana/dashboards" > /dev/null
|
||||
source download_opea_dashboard.sh
|
||||
popd > /dev/null
|
||||
else
|
||||
telemetry_flag=false
|
||||
fi
|
||||
|
||||
# Generate the .env file
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
pushd "../../../../../" > /dev/null
|
||||
source .set_env.sh
|
||||
popd > /dev/null
|
||||
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export HF_TOKEN=${HF_TOKEN}
|
||||
export host_ip=${ip_address}
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export INDEX_NAME="rag-redis"
|
||||
export NUM_CARDS=1
|
||||
export VLLM_SKIP_WARMUP=true
|
||||
export LOGFLAG=True
|
||||
export http_proxy=${http_proxy}
|
||||
export https_proxy=${https_proxy}
|
||||
export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-gaudi-backend-server,chatqna-gaudi-ui-server,chatqna-gaudi-nginx-server"
|
||||
|
||||
export LLM_ENDPOINT_PORT=8010
|
||||
export LLM_SERVER_PORT=9001
|
||||
export CHATQNA_BACKEND_PORT=8888
|
||||
export CHATQNA_REDIS_VECTOR_PORT=6377
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5175
|
||||
export NGINX_PORT=80
|
||||
export FAQGen_COMPONENT_NAME="OpeaFaqGenvLLM"
|
||||
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
|
||||
@@ -1,123 +0,0 @@
|
||||
# ChatQnA E2E test scripts
|
||||
|
||||
## Set the required environment variable
|
||||
|
||||
```bash
|
||||
export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token"
|
||||
```
|
||||
|
||||
## Run test
|
||||
|
||||
On Intel Xeon with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_tgi_on_xeon.sh
|
||||
```
|
||||
|
||||
On Intel Xeon with vLLM:
|
||||
|
||||
```bash
|
||||
bash test_compose_on_xeon.sh
|
||||
```
|
||||
|
||||
On Intel Xeon with MariaDB Vector:
|
||||
|
||||
```bash
|
||||
bash test_compose_mariadb_on_xeon.sh
|
||||
```
|
||||
|
||||
On Intel Xeon with Pinecone:
|
||||
|
||||
```bash
|
||||
bash test_compose_pinecone_on_xeon.sh
|
||||
```
|
||||
|
||||
On Intel Xeon with Milvus
|
||||
|
||||
```bash
|
||||
bash test_compose_milvus_on_xeon.sh
|
||||
```
|
||||
|
||||
On Intel Xeon with Qdrant
|
||||
|
||||
```bash
|
||||
bash test_compose_qdrant_on_xeon.sh
|
||||
```
|
||||
|
||||
On Intel Xeon without Rerank:
|
||||
|
||||
```bash
|
||||
bash test_compose_without_rerank_on_xeon.sh
|
||||
```
|
||||
|
||||
On Intel Gaudi with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_tgi_on_gaudi.sh
|
||||
```
|
||||
|
||||
On Intel Gaudi with vLLM:
|
||||
|
||||
```bash
|
||||
bash test_compose_on_gaudi.sh
|
||||
```
|
||||
|
||||
On Intel Gaudi with Guardrails:
|
||||
|
||||
```bash
|
||||
bash test_compose_guardrails_on_gaudi.sh
|
||||
```
|
||||
|
||||
On Intel Gaudi without Rerank:
|
||||
|
||||
```bash
|
||||
bash test_compose_without_rerank_on_gaudi.sh
|
||||
```
|
||||
|
||||
On AMD ROCm with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_on_rocm.sh
|
||||
```
|
||||
|
||||
On AMD ROCm with vLLM:
|
||||
|
||||
```bash
|
||||
bash test_compose_vllm_on_rocm.sh
|
||||
```
|
||||
|
||||
Test FAQ Generation On Intel Xeon with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_faqgen_tgi_on_xeon.sh
|
||||
```
|
||||
|
||||
Test FAQ Generation On Intel Xeon with vLLM:
|
||||
|
||||
```bash
|
||||
bash test_compose_faqgen_on_xeon.sh
|
||||
```
|
||||
|
||||
Test FAQ Generation On Intel Gaudi with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_faqgen_tgi_on_gaudi.sh
|
||||
```
|
||||
|
||||
Test FAQ Generation On Intel Gaudi with vLLM:
|
||||
|
||||
```bash
|
||||
bash test_compose_faqgen_on_gaudi.sh
|
||||
```
|
||||
|
||||
Test FAQ Generation On AMD ROCm with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_faqgen_on_rocm.sh
|
||||
```
|
||||
|
||||
Test FAQ Generation On AMD ROCm with vLLM:
|
||||
|
||||
```bash
|
||||
bash test_compose_faqgen_vllm_on_rocm.sh
|
||||
```
|
||||
@@ -24,7 +24,7 @@ function build_docker_images() {
|
||||
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
popd && sleep 1s
|
||||
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
|
||||
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
|
||||
VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
|
||||
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
@@ -36,7 +36,27 @@ function build_docker_images() {
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/hpu/gaudi
|
||||
source set_env_faqgen.sh
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export NUM_CARDS=1
|
||||
export INDEX_NAME="rag-redis"
|
||||
export host_ip=${ip_address}
|
||||
export LLM_ENDPOINT_PORT=8010
|
||||
export LLM_SERVER_PORT=9001
|
||||
export CHATQNA_BACKEND_PORT=8888
|
||||
export CHATQNA_REDIS_VECTOR_PORT=6377
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5175
|
||||
export NGINX_PORT=80
|
||||
export FAQGen_COMPONENT_NAME="OpeaFaqGenvLLM"
|
||||
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
|
||||
export HF_TOKEN=${HF_TOKEN}
|
||||
export VLLM_SKIP_WARMUP=true
|
||||
export LOGFLAG=True
|
||||
export http_proxy=${http_proxy}
|
||||
export https_proxy=${https_proxy}
|
||||
export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-gaudi-backend-server,chatqna-gaudi-ui-server,chatqna-gaudi-nginx-server"
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_faqgen.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -15,7 +15,44 @@ WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
source $WORKPATH/docker_compose/amd/gpu/rocm/set_env_faqgen.sh
|
||||
export HOST_IP=${ip_address}
|
||||
export HOST_IP_EXTERNAL=${ip_address}
|
||||
|
||||
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_PORT=8888
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5173
|
||||
export CHATQNA_LLM_FAQGEN_PORT=18011
|
||||
export CHATQNA_NGINX_PORT=80
|
||||
export CHATQNA_REDIS_DATAPREP_PORT=18103
|
||||
export CHATQNA_REDIS_RETRIEVER_PORT=7000
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001
|
||||
export CHATQNA_REDIS_VECTOR_PORT=6379
|
||||
export CHATQNA_TEI_EMBEDDING_PORT=18090
|
||||
export CHATQNA_TEI_RERANKING_PORT=18808
|
||||
export CHATQNA_TGI_SERVICE_PORT=18008
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_BACKEND_SERVICE_PORT}/v1/chatqna"
|
||||
export CHATQNA_BACKEND_SERVICE_IP=${HOST_IP}
|
||||
export CHATQNA_DATAPREP_DELETE_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/delete"
|
||||
export CHATQNA_DATAPREP_GET_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/get"
|
||||
export CHATQNA_DATAPREP_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/ingest"
|
||||
export CHATQNA_EMBEDDING_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_FRONTEND_SERVICE_IP=${HOST_IP}
|
||||
export CHATQNA_LLM_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_LLM_ENDPOINT="http://${HOST_IP}:${CHATQNA_TGI_SERVICE_PORT}"
|
||||
export CHATQNA_MEGA_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_REDIS_URL="redis://${HOST_IP}:${CHATQNA_REDIS_VECTOR_PORT}"
|
||||
export CHATQNA_RERANK_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_RETRIEVER_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_TEI_EMBEDDING_ENDPOINT="http://${HOST_IP}:${CHATQNA_TEI_EMBEDDING_PORT}"
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_NAME=chatqna
|
||||
export CHATQNA_INDEX_NAME="rag-redis"
|
||||
export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi"
|
||||
|
||||
export PATH="~/miniconda3/bin:$PATH"
|
||||
|
||||
|
||||
@@ -37,16 +37,26 @@ function build_docker_images() {
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export INDEX_NAME="rag-redis"
|
||||
export host_ip=${ip_address}
|
||||
export LLM_ENDPOINT_PORT=8010
|
||||
export LLM_SERVER_PORT=9001
|
||||
export CHATQNA_BACKEND_PORT=8888
|
||||
export CHATQNA_REDIS_VECTOR_PORT=6377
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5175
|
||||
export NGINX_PORT=80
|
||||
export FAQGen_COMPONENT_NAME="OpeaFaqGenvLLM"
|
||||
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
|
||||
export HF_TOKEN=${HF_TOKEN}
|
||||
export VLLM_SKIP_WARMUP=true
|
||||
export LOGFLAG=True
|
||||
export http_proxy=${http_proxy}
|
||||
export https_proxy=${https_proxy}
|
||||
export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-xeon-backend-server,chatqna-xeon-ui-server,chatqna-xeon-nginx-server"
|
||||
source set_env.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_faqgen.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -33,8 +33,25 @@ function build_docker_images() {
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/hpu/gaudi
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export INDEX_NAME="rag-redis"
|
||||
export host_ip=${ip_address}
|
||||
export LLM_ENDPOINT_PORT=8010
|
||||
export LLM_SERVER_PORT=9001
|
||||
export CHATQNA_BACKEND_PORT=8888
|
||||
export CHATQNA_REDIS_VECTOR_PORT=6377
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5175
|
||||
export NGINX_PORT=80
|
||||
export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi"
|
||||
source set_env_faqgen.sh
|
||||
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
|
||||
export HF_TOKEN=${HF_TOKEN}
|
||||
export LOGFLAG=True
|
||||
export http_proxy=${http_proxy}
|
||||
export https_proxy=${https_proxy}
|
||||
export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-gaudi-backend-server,chatqna-gaudi-ui-server,chatqna-gaudi-nginx-server"
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_faqgen_tgi.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -37,16 +37,25 @@ function build_docker_images() {
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export INDEX_NAME="rag-redis"
|
||||
export host_ip=${ip_address}
|
||||
export LLM_ENDPOINT_PORT=8010
|
||||
export LLM_SERVER_PORT=9001
|
||||
export CHATQNA_BACKEND_PORT=8888
|
||||
export CHATQNA_REDIS_VECTOR_PORT=6377
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5175
|
||||
export NGINX_PORT=80
|
||||
export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi"
|
||||
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
|
||||
export HF_TOKEN=${HF_TOKEN}
|
||||
export LOGFLAG=True
|
||||
export http_proxy=${http_proxy}
|
||||
export https_proxy=${https_proxy}
|
||||
export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-xeon-backend-server,chatqna-xeon-ui-server,chatqna-xeon-nginx-server"
|
||||
source set_env.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_faqgen_tgi.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -14,7 +14,41 @@ WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
source $WORKPATH/docker_compose/amd/gpu/rocm/set_env_faqgen_vllm.sh
|
||||
export HOST_IP=${ip_address}
|
||||
export HOST_IP_EXTERNAL=${ip_address}
|
||||
|
||||
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_PORT=8888
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5173
|
||||
export CHATQNA_LLM_FAQGEN_PORT=18011
|
||||
export CHATQNA_NGINX_PORT=80
|
||||
export CHATQNA_REDIS_DATAPREP_PORT=18103
|
||||
export CHATQNA_REDIS_RETRIEVER_PORT=7000
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001
|
||||
export CHATQNA_REDIS_VECTOR_PORT=6379
|
||||
export CHATQNA_TEI_EMBEDDING_PORT=18090
|
||||
export CHATQNA_TEI_RERANKING_PORT=18808
|
||||
export CHATQNA_VLLM_SERVICE_PORT=18008
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_BACKEND_SERVICE_PORT}/v1/chatqna"
|
||||
export CHATQNA_BACKEND_SERVICE_IP=${HOST_IP_EXTERNAL}
|
||||
export CHATQNA_DATAPREP_DELETE_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/delete"
|
||||
export CHATQNA_DATAPREP_GET_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/get"
|
||||
export CHATQNA_DATAPREP_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/ingest"
|
||||
export CHATQNA_FRONTEND_SERVICE_IP=${HOST_IP}
|
||||
export CHATQNA_MEGA_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_REDIS_URL="redis://${HOST_IP}:${CHATQNA_REDIS_VECTOR_PORT}"
|
||||
export CHATQNA_TEI_EMBEDDING_ENDPOINT="http://${HOST_IP}:${CHATQNA_TEI_EMBEDDING_PORT}"
|
||||
export LLM_ENDPOINT="http://${HOST_IP}:${CHATQNA_VLLM_SERVICE_PORT}"
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_NAME=chatqna
|
||||
export CHATQNA_INDEX_NAME="rag-redis"
|
||||
export CHATQNA_TYPE="CHATQNA_FAQGEN"
|
||||
export FAQGen_COMPONENT_NAME="OpeaFaqGenvLLM"
|
||||
|
||||
function build_docker_images() {
|
||||
opea_branch=${opea_branch:-"main"}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -xe
|
||||
set -e
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -24,7 +24,7 @@ function build_docker_images() {
|
||||
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
popd && sleep 1s
|
||||
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
|
||||
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
|
||||
VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
|
||||
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
@@ -36,8 +36,14 @@ function build_docker_images() {
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/hpu/gaudi
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export NUM_CARDS=1
|
||||
export INDEX_NAME="rag-redis"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export host_ip=${ip_address}
|
||||
export GURADRAILS_MODEL_ID="meta-llama/Meta-Llama-Guard-2-8B"
|
||||
source set_env_faqgen.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_guardrails.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2025 MariaDB Foundation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -xe
|
||||
set -e
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -39,8 +39,14 @@ function build_docker_images() {
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon
|
||||
export MARIADB_DATABASE="vectordb"
|
||||
export MARIADB_USER="chatqna"
|
||||
export MARIADB_PASSWORD="test"
|
||||
source set_env_mariadb.sh
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export host_ip=${ip_address}
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_mariadb.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -xe
|
||||
set -e
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -39,8 +39,11 @@ function build_docker_images() {
|
||||
}
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon/
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export LOGFLAG=true
|
||||
source set_env.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_milvus.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -xe
|
||||
set -e
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -24,7 +24,7 @@ function build_docker_images() {
|
||||
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
popd && sleep 1s
|
||||
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
|
||||
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
|
||||
VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
|
||||
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
@@ -36,10 +36,16 @@ function build_docker_images() {
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/hpu/gaudi
|
||||
export NON_INTERACTIVE=true
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export NUM_CARDS=1
|
||||
export INDEX_NAME="rag-redis"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export host_ip=${ip_address}
|
||||
export telemetry=yes
|
||||
source set_env.sh
|
||||
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
|
||||
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
|
||||
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose.yaml -f compose.telemetry.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -15,7 +15,41 @@ WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
source $WORKPATH/docker_compose/amd/gpu/rocm/set_env.sh
|
||||
export HOST_IP=${ip_address}
|
||||
export HOST_IP_EXTERNAL=${ip_address}
|
||||
|
||||
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_PORT=8888
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5173
|
||||
export CHATQNA_NGINX_PORT=80
|
||||
export CHATQNA_REDIS_DATAPREP_PORT=18103
|
||||
export CHATQNA_REDIS_RETRIEVER_PORT=7000
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001
|
||||
export CHATQNA_REDIS_VECTOR_PORT=6379
|
||||
export CHATQNA_TEI_EMBEDDING_PORT=18090
|
||||
export CHATQNA_TEI_RERANKING_PORT=18808
|
||||
export CHATQNA_TGI_SERVICE_PORT=18008
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_BACKEND_SERVICE_PORT}/v1/chatqna"
|
||||
export CHATQNA_BACKEND_SERVICE_IP=${HOST_IP}
|
||||
export CHATQNA_DATAPREP_DELETE_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/delete"
|
||||
export CHATQNA_DATAPREP_GET_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/get"
|
||||
export CHATQNA_DATAPREP_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/ingest"
|
||||
export CHATQNA_EMBEDDING_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_FRONTEND_SERVICE_IP=${HOST_IP}
|
||||
export CHATQNA_LLM_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_MEGA_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_REDIS_URL="redis://${HOST_IP}:${CHATQNA_REDIS_VECTOR_PORT}"
|
||||
export CHATQNA_RERANK_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_RETRIEVER_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_TEI_EMBEDDING_ENDPOINT="http://${HOST_IP}:${CHATQNA_TEI_EMBEDDING_PORT}"
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_NAME=chatqna
|
||||
export CHATQNA_INDEX_NAME="rag-redis"
|
||||
|
||||
export PATH="~/miniconda3/bin:$PATH"
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -xe
|
||||
set -e
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -40,7 +40,15 @@ function build_docker_images() {
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon
|
||||
|
||||
source set_env.sh
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export INDEX_NAME="rag-redis"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export host_ip=${ip_address}
|
||||
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
|
||||
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
|
||||
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose.yaml -f compose.telemetry.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -xe
|
||||
set -e
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -41,11 +41,14 @@ function build_docker_images() {
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon/
|
||||
export no_proxy=${no_proxy},${ip_address}
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export PINECONE_API_KEY=${PINECONE_KEY_LANGCHAIN_TEST}
|
||||
export PINECONE_INDEX_NAME="langchain-test"
|
||||
export INDEX_NAME="langchain-test"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export LOGFLAG=true
|
||||
source set_env.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_pinecone.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -xe
|
||||
set -e
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -40,8 +40,11 @@ function build_docker_images() {
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon
|
||||
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export INDEX_NAME="rag-qdrant"
|
||||
source set_env.sh
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -xe
|
||||
set -e
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -32,10 +32,15 @@ function build_docker_images() {
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/hpu/gaudi
|
||||
export NON_INTERACTIVE=true
|
||||
export host_ip=${ip_address}
|
||||
export telemetry=yes
|
||||
source set_env.sh
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export NUM_CARDS=1
|
||||
export INDEX_NAME="rag-redis"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
|
||||
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
|
||||
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_tgi.yaml -f compose_tgi.telemetry.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -xe
|
||||
set -e
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -33,7 +33,14 @@ function build_docker_images() {
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon
|
||||
|
||||
source set_env.sh
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export INDEX_NAME="rag-redis"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
|
||||
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
|
||||
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_tgi.yaml -f compose_tgi.telemetry.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -xe
|
||||
set -e
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -14,7 +14,42 @@ WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
source $WORKPATH/docker_compose/amd/gpu/rocm/set_env_vllm.sh
|
||||
export HOST_IP=${ip_address}
|
||||
export HOST_IP_EXTERNAL=${ip_address}
|
||||
|
||||
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_PORT=8888
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5173
|
||||
export CHATQNA_NGINX_PORT=80
|
||||
export CHATQNA_REDIS_DATAPREP_PORT=18103
|
||||
export CHATQNA_REDIS_RETRIEVER_PORT=7000
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001
|
||||
export CHATQNA_REDIS_VECTOR_PORT=6379
|
||||
export CHATQNA_TEI_EMBEDDING_PORT=18090
|
||||
export CHATQNA_TEI_RERANKING_PORT=18808
|
||||
export CHATQNA_VLLM_SERVICE_PORT=18008
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_BACKEND_SERVICE_PORT}/v1/chatqna"
|
||||
export CHATQNA_BACKEND_SERVICE_IP=${HOST_IP_EXTERNAL}
|
||||
export CHATQNA_DATAPREP_DELETE_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/delete"
|
||||
export CHATQNA_DATAPREP_GET_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/get"
|
||||
export CHATQNA_DATAPREP_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/ingest"
|
||||
export CHATQNA_EMBEDDING_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_FRONTEND_SERVICE_IP=${HOST_IP}
|
||||
export CHATQNA_LLM_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_MEGA_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_REDIS_URL="redis://${HOST_IP}:${CHATQNA_REDIS_VECTOR_PORT}"
|
||||
export CHATQNA_RERANK_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_RETRIEVER_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_TEI_EMBEDDING_ENDPOINT="http://${HOST_IP}:${CHATQNA_TEI_EMBEDDING_PORT}"
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_NAME=chatqna
|
||||
export CHATQNA_INDEX_NAME="rag-redis"
|
||||
|
||||
|
||||
function build_docker_images() {
|
||||
opea_branch=${opea_branch:-"main"}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -xe
|
||||
set -e
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -24,7 +24,7 @@ function build_docker_images() {
|
||||
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
popd && sleep 1s
|
||||
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
|
||||
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
|
||||
VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
|
||||
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
@@ -36,8 +36,11 @@ function build_docker_images() {
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/hpu/gaudi
|
||||
export NON_INTERACTIVE=true
|
||||
source set_env.sh
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export NUM_CARDS=1
|
||||
export INDEX_NAME="rag-redis"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_without_rerank.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -xe
|
||||
set -e
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -41,7 +41,10 @@ function build_docker_images() {
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon
|
||||
|
||||
source set_env.sh
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export INDEX_NAME="rag-redis"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_without_rerank.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -5,8 +5,8 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
### The IP address or domain name of the server on which the application is running
|
||||
export HOST_IP=${ip_address}
|
||||
export EXTERNAL_HOST_IP=${ip_address}
|
||||
export HOST_IP=''
|
||||
export EXTERNAL_HOST_IP=''
|
||||
|
||||
### The port of the TGI service. On this port, the TGI service will accept connections
|
||||
export CODEGEN_TGI_SERVICE_PORT=8028
|
||||
@@ -27,7 +27,7 @@ export CODEGEN_TGI_LLM_ENDPOINT="http://${HOST_IP}:${CODEGEN_TGI_SERVICE_PORT}"
|
||||
export CODEGEN_MEGA_SERVICE_HOST_IP=${HOST_IP}
|
||||
|
||||
### The port for CodeGen backend service
|
||||
export CODEGEN_BACKEND_SERVICE_PORT=7778
|
||||
export CODEGEN_BACKEND_SERVICE_PORT=18150
|
||||
|
||||
### The URL of CodeGen backend service, used by the frontend service
|
||||
export CODEGEN_BACKEND_SERVICE_URL="http://${EXTERNAL_HOST_IP}:${CODEGEN_BACKEND_SERVICE_PORT}/v1/codegen"
|
||||
@@ -36,4 +36,4 @@ export CODEGEN_BACKEND_SERVICE_URL="http://${EXTERNAL_HOST_IP}:${CODEGEN_BACKEND
|
||||
export CODEGEN_LLM_SERVICE_HOST_IP=${HOST_IP}
|
||||
|
||||
### The CodeGen service UI port
|
||||
export CODEGEN_UI_SERVICE_PORT=5173
|
||||
export CODEGEN_UI_SERVICE_PORT=18151
|
||||
|
||||
@@ -5,8 +5,8 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
### The IP address or domain name of the server on which the application is running
|
||||
export HOST_IP=${ip_address}
|
||||
export EXTERNAL_HOST_IP=${ip_address}
|
||||
export HOST_IP=''
|
||||
export EXTERNAL_HOST_IP=''
|
||||
|
||||
### The port of the vLLM service. On this port, the TGI service will accept connections
|
||||
export CODEGEN_VLLM_SERVICE_PORT=8028
|
||||
@@ -25,7 +25,7 @@ export CODEGEN_LLM_SERVICE_PORT=9000
|
||||
export CODEGEN_MEGA_SERVICE_HOST_IP=${HOST_IP}
|
||||
|
||||
### The port for CodeGen backend service
|
||||
export CODEGEN_BACKEND_SERVICE_PORT=7778
|
||||
export CODEGEN_BACKEND_SERVICE_PORT=18150
|
||||
|
||||
### The URL of CodeGen backend service, used by the frontend service
|
||||
export CODEGEN_BACKEND_SERVICE_URL="http://${EXTERNAL_HOST_IP}:${CODEGEN_BACKEND_SERVICE_PORT}/v1/codegen"
|
||||
@@ -34,4 +34,4 @@ export CODEGEN_BACKEND_SERVICE_URL="http://${EXTERNAL_HOST_IP}:${CODEGEN_BACKEND
|
||||
export CODEGEN_LLM_SERVICE_HOST_IP=${HOST_IP}
|
||||
|
||||
### The CodeGen service UI port
|
||||
export CODEGEN_UI_SERVICE_PORT=5173
|
||||
export CODEGEN_UI_SERVICE_PORT=18151
|
||||
|
||||
@@ -6,10 +6,22 @@ This README provides instructions for deploying the CodeGen application using Do
|
||||
|
||||
- [Overview](#overview)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Quick Start Deployment](#quick-start-deployment)
|
||||
- [Quick Start](#quick-start)
|
||||
- [Available Deployment Options](#available-deployment-options)
|
||||
- [Default: vLLM-based Deployment (`--profile codegen-xeon-vllm`)](#default-vllm-based-deployment---profile-codegen-xeon-vllm)
|
||||
- [TGI-based Deployment (`--profile codegen-xeon-tgi`)](#tgi-based-deployment---profile-codegen-xeon-tgi)
|
||||
- [Configuration Parameters](#configuration-parameters)
|
||||
- [Environment Variables](#environment-variables)
|
||||
- [Compose Profiles](#compose-profiles)
|
||||
- [Building Custom Images (Optional)](#building-custom-images-optional)
|
||||
- [Validate Services](#validate-services)
|
||||
- [Check Container Status](#check-container-status)
|
||||
- [Run Validation Script/Commands](#run-validation-scriptcommands)
|
||||
- [Accessing the User Interface (UI)](#accessing-the-user-interface-ui)
|
||||
- [Gradio UI (Default)](#gradio-ui-default)
|
||||
- [Svelte UI (Optional)](#svelte-ui-optional)
|
||||
- [React UI (Optional)](#react-ui-optional)
|
||||
- [VS Code Extension (Optional)](#vs-code-extension-optional)
|
||||
- [Troubleshooting](#troubleshooting)
|
||||
- [Stopping the Application](#stopping-the-application)
|
||||
- [Next Steps](#next-steps)
|
||||
@@ -31,37 +43,27 @@ This guide focuses on running the pre-configured CodeGen service using Docker Co
|
||||
cd GenAIExamples/CodeGen/docker_compose/intel/cpu/xeon
|
||||
```
|
||||
|
||||
## Quick Start Deployment
|
||||
## Quick Start
|
||||
|
||||
This uses the default vLLM-based deployment profile (`codegen-xeon-vllm`).
|
||||
|
||||
1. **Configure Environment:**
|
||||
Set required environment variables in your shell:
|
||||
|
||||
```bash
|
||||
# Replace with your host's external IP address (do not use localhost or 127.0.0.1)
|
||||
export HOST_IP="your_external_ip_address"
|
||||
# Replace with your Hugging Face Hub API token
|
||||
export HUGGINGFACEHUB_API_TOKEN="your_huggingface_token"
|
||||
```bash
|
||||
# Replace with your host's external IP address (do not use localhost or 127.0.0.1)
|
||||
export HOST_IP="your_external_ip_address"
|
||||
# Replace with your Hugging Face Hub API token
|
||||
export HUGGINGFACEHUB_API_TOKEN="your_huggingface_token"
|
||||
|
||||
# Optional: Configure proxy if needed
|
||||
# export http_proxy="your_http_proxy"
|
||||
# export https_proxy="your_https_proxy"
|
||||
# export no_proxy="localhost,127.0.0.1,${HOST_IP}" # Add other hosts if necessary
|
||||
source ../../set_env.sh
|
||||
```
|
||||
# Optional: Configure proxy if needed
|
||||
# export http_proxy="your_http_proxy"
|
||||
# export https_proxy="your_https_proxy"
|
||||
# export no_proxy="localhost,127.0.0.1,${HOST_IP}" # Add other hosts if necessary
|
||||
source ../../../set_env.sh
|
||||
```
|
||||
|
||||
_Note: The compose file might read additional variables from set_env.sh. Ensure all required variables like ports (`LLM_SERVICE_PORT`, `MEGA_SERVICE_PORT`, etc.) are set if not using defaults from the compose file._
|
||||
|
||||
For instance, edit the set_env.sh to change the LLM model
|
||||
|
||||
```
|
||||
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
|
||||
```
|
||||
can be changed to other model if needed
|
||||
```
|
||||
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-32B-Instruct"
|
||||
```
|
||||
_Note: The compose file might read additional variables from a `.env` file or expect them defined elsewhere. Ensure all required variables like ports (`LLM_SERVICE_PORT`, `MEGA_SERVICE_PORT`, etc.) are set if not using defaults from the compose file._
|
||||
|
||||
2. **Start Services (vLLM Profile):**
|
||||
|
||||
@@ -72,17 +74,17 @@ This uses the default vLLM-based deployment profile (`codegen-xeon-vllm`).
|
||||
3. **Validate:**
|
||||
Wait several minutes for models to download (especially the first time) and services to initialize. Check container logs (`docker compose logs -f <service_name>`) or proceed to the validation steps below.
|
||||
|
||||
### Available Deployment Options
|
||||
## Available Deployment Options
|
||||
|
||||
The `compose.yaml` file uses Docker Compose profiles to select the LLM serving backend.
|
||||
|
||||
#### Default: vLLM-based Deployment (`--profile codegen-xeon-vllm`)
|
||||
### Default: vLLM-based Deployment (`--profile codegen-xeon-vllm`)
|
||||
|
||||
- **Profile:** `codegen-xeon-vllm`
|
||||
- **Description:** Uses vLLM optimized for Intel CPUs as the LLM serving engine. This is the default profile used in the Quick Start.
|
||||
- **Services Deployed:** `codegen-vllm-server`, `codegen-llm-server`, `codegen-tei-embedding-server`, `codegen-retriever-server`, `redis-vector-db`, `codegen-dataprep-server`, `codegen-backend-server`, `codegen-gradio-ui-server`.
|
||||
|
||||
#### TGI-based Deployment (`--profile codegen-xeon-tgi`)
|
||||
### TGI-based Deployment (`--profile codegen-xeon-tgi`)
|
||||
|
||||
- **Profile:** `codegen-xeon-tgi`
|
||||
- **Description:** Uses Hugging Face Text Generation Inference (TGI) optimized for Intel CPUs as the LLM serving engine.
|
||||
@@ -93,24 +95,24 @@ The `compose.yaml` file uses Docker Compose profiles to select the LLM serving b
|
||||
docker compose --profile codegen-xeon-tgi up -d
|
||||
```
|
||||
|
||||
### Configuration Parameters
|
||||
## Configuration Parameters
|
||||
|
||||
#### Environment Variables
|
||||
### Environment Variables
|
||||
|
||||
Key parameters are configured via environment variables set before running `docker compose up`.
|
||||
|
||||
| Environment Variable | Description | Default (Set Externally) |
|
||||
| :-------------------------------------- | :------------------------------------------------------------------------------------------------------------------ | :--------------------------------------------- | ------------------------------------ |
|
||||
| `HOST_IP` | External IP address of the host machine. **Required.** | `your_external_ip_address` |
|
||||
| `HUGGINGFACEHUB_API_TOKEN` | Your Hugging Face Hub token for model access. **Required.** | `your_huggingface_token` |
|
||||
| `LLM_MODEL_ID` | Hugging Face model ID for the CodeGen LLM (used by TGI/vLLM service). Configured within `compose.yaml` environment. | `Qwen/Qwen2.5-Coder-7B-Instruct` |
|
||||
| `EMBEDDING_MODEL_ID` | Hugging Face model ID for the embedding model (used by TEI service). Configured within `compose.yaml` environment. | `BAAI/bge-base-en-v1.5` |
|
||||
| `LLM_ENDPOINT` | Internal URL for the LLM serving endpoint (used by `codegen-llm-server`). Configured in `compose.yaml`. | `http://codegen-vllm | tgi-server:9000/v1/chat/completions` |
|
||||
| `TEI_EMBEDDING_ENDPOINT` | Internal URL for the Embedding service. Configured in `compose.yaml`. | `http://codegen-tei-embedding-server:80/embed` |
|
||||
| `DATAPREP_ENDPOINT` | Internal URL for the Data Preparation service. Configured in `compose.yaml`. | `http://codegen-dataprep-server:80/dataprep` |
|
||||
| `BACKEND_SERVICE_ENDPOINT` | External URL for the CodeGen Gateway (MegaService). Derived from `HOST_IP` and port `7778`. | `http://${HOST_IP}:7778/v1/codegen` |
|
||||
| `*_PORT` (Internal) | Internal container ports (e.g., `80`, `6379`). Defined in `compose.yaml`. | N/A |
|
||||
| `http_proxy` / `https_proxy`/`no_proxy` | Network proxy settings (if required). | `""` |
|
||||
| Environment Variable | Description | Default (Set Externally) |
|
||||
| :-------------------------------------- | :------------------------------------------------------------------------------------------------------------------ | :----------------------------------------------------------------------------------------------- |
|
||||
| `HOST_IP` | External IP address of the host machine. **Required.** | `your_external_ip_address` |
|
||||
| `HUGGINGFACEHUB_API_TOKEN` | Your Hugging Face Hub token for model access. **Required.** | `your_huggingface_token` |
|
||||
| `LLM_MODEL_ID` | Hugging Face model ID for the CodeGen LLM (used by TGI/vLLM service). Configured within `compose.yaml` environment. | `Qwen/Qwen2.5-Coder-7B-Instruct` |
|
||||
| `EMBEDDING_MODEL_ID` | Hugging Face model ID for the embedding model (used by TEI service). Configured within `compose.yaml` environment. | `BAAI/bge-base-en-v1.5` |
|
||||
| `LLM_ENDPOINT` | Internal URL for the LLM serving endpoint (used by `codegen-llm-server`). Configured in `compose.yaml`. | `http://codegen-tgi-server:80/generate` or `http://codegen-vllm-server:8000/v1/chat/completions` |
|
||||
| `TEI_EMBEDDING_ENDPOINT` | Internal URL for the Embedding service. Configured in `compose.yaml`. | `http://codegen-tei-embedding-server:80/embed` |
|
||||
| `DATAPREP_ENDPOINT` | Internal URL for the Data Preparation service. Configured in `compose.yaml`. | `http://codegen-dataprep-server:80/dataprep` |
|
||||
| `BACKEND_SERVICE_ENDPOINT` | External URL for the CodeGen Gateway (MegaService). Derived from `HOST_IP` and port `7778`. | `http://${HOST_IP}:7778/v1/codegen` |
|
||||
| `*_PORT` (Internal) | Internal container ports (e.g., `80`, `6379`). Defined in `compose.yaml`. | N/A |
|
||||
| `http_proxy` / `https_proxy`/`no_proxy` | Network proxy settings (if required). | `""` |
|
||||
|
||||
Most of these parameters are in `set_env.sh`, you can either modify this file or overwrite the env variables by setting them.
|
||||
|
||||
@@ -118,7 +120,7 @@ Most of these parameters are in `set_env.sh`, you can either modify this file or
|
||||
source CodeGen/docker_compose/set_env.sh
|
||||
```
|
||||
|
||||
#### Compose Profiles
|
||||
### Compose Profiles
|
||||
|
||||
Docker Compose profiles (`codegen-xeon-vllm`, `codegen-xeon-tgi`) control which LLM serving backend (vLLM or TGI) and its associated dependencies are started. Only one profile should typically be active.
|
||||
|
||||
@@ -150,11 +152,11 @@ Check logs for specific services: `docker compose logs <service_name>`
|
||||
|
||||
Use `curl` commands to test the main service endpoints. Ensure `HOST_IP` is correctly set in your environment.
|
||||
|
||||
1. **Validate LLM Serving Endpoint (Example for vLLM on default port 9000 internally, exposed differently):**
|
||||
1. **Validate LLM Serving Endpoint (Example for vLLM on default port 8000 internally, exposed differently):**
|
||||
|
||||
```bash
|
||||
# This command structure targets the OpenAI-compatible vLLM endpoint
|
||||
curl http://${HOST_IP}:9000/v1/chat/completions \
|
||||
curl http://${HOST_IP}:8000/v1/chat/completions \
|
||||
-X POST \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"model": "Qwen/Qwen2.5-Coder-7B-Instruct", "messages": [{"role": "user", "content": "Implement a basic Python class"}], "max_tokens":32}'
|
||||
@@ -177,8 +179,8 @@ Multiple UI options can be configured via the `compose.yaml`.
|
||||
### Gradio UI (Default)
|
||||
|
||||
Access the default Gradio UI by navigating to:
|
||||
`http://{HOST_IP}:5173`
|
||||
_(Port `5173` is the default host mapping for `codegen-gradio-ui-server`)_
|
||||
`http://{HOST_IP}:8080`
|
||||
_(Port `8080` is the default host mapping for `codegen-gradio-ui-server`)_
|
||||
|
||||

|
||||

|
||||
|
||||
@@ -6,10 +6,23 @@ This README provides instructions for deploying the CodeGen application using Do
|
||||
|
||||
- [Overview](#overview)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Quick Start Deployment](#quick-start-deployment)
|
||||
- [Quick Start](#quick-start)
|
||||
- [Available Deployment Options](#available-deployment-options)
|
||||
- [Default: vLLM-based Deployment (`--profile codegen-gaudi-vllm`)](#default-vllm-based-deployment---profile-codegen-gaudi-vllm)
|
||||
- [TGI-based Deployment (`--profile codegen-gaudi-tgi`)](#tgi-based-deployment---profile-codegen-gaudi-tgi)
|
||||
- [Configuration Parameters](#configuration-parameters)
|
||||
- [Environment Variables](#environment-variables)
|
||||
- [Compose Profiles](#compose-profiles)
|
||||
- [Docker Compose Gaudi Configuration](#docker-compose-gaudi-configuration)
|
||||
- [Building Custom Images (Optional)](#building-custom-images-optional)
|
||||
- [Validate Services](#validate-services)
|
||||
- [Check Container Status](#check-container-status)
|
||||
- [Run Validation Script/Commands](#run-validation-scriptcommands)
|
||||
- [Accessing the User Interface (UI)](#accessing-the-user-interface-ui)
|
||||
- [Gradio UI (Default)](#gradio-ui-default)
|
||||
- [Svelte UI (Optional)](#svelte-ui-optional)
|
||||
- [React UI (Optional)](#react-ui-optional)
|
||||
- [VS Code Extension (Optional)](#vs-code-extension-optional)
|
||||
- [Troubleshooting](#troubleshooting)
|
||||
- [Stopping the Application](#stopping-the-application)
|
||||
- [Next Steps](#next-steps)
|
||||
@@ -31,7 +44,7 @@ This guide focuses on running the pre-configured CodeGen service using Docker Co
|
||||
cd GenAIExamples/CodeGen/docker_compose/intel/hpu/gaudi
|
||||
```
|
||||
|
||||
## Quick Start Deployment
|
||||
## Quick Start
|
||||
|
||||
This uses the default vLLM-based deployment profile (`codegen-gaudi-vllm`).
|
||||
|
||||
@@ -48,21 +61,10 @@ This uses the default vLLM-based deployment profile (`codegen-gaudi-vllm`).
|
||||
# export http_proxy="your_http_proxy"
|
||||
# export https_proxy="your_https_proxy"
|
||||
# export no_proxy="localhost,127.0.0.1,${HOST_IP}" # Add other hosts if necessary
|
||||
source ../../set_env.sh
|
||||
source ../../../set_env.sh
|
||||
```
|
||||
|
||||
_Note: The compose file might read additional variables from set_env.sh. Ensure all required variables like ports (`LLM_SERVICE_PORT`, `MEGA_SERVICE_PORT`, etc.) are set if not using defaults from the compose file._
|
||||
For instance, edit the set_env.sh to change the LLM model
|
||||
|
||||
```
|
||||
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
|
||||
```
|
||||
|
||||
can be changed to other model if needed
|
||||
|
||||
```
|
||||
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-32B-Instruct"
|
||||
```
|
||||
_Note: Ensure all required variables like ports (`LLM_SERVICE_PORT`, `MEGA_SERVICE_PORT`, etc.) are set if not using defaults from the compose file._
|
||||
|
||||
2. **Start Services (vLLM Profile):**
|
||||
|
||||
@@ -102,18 +104,18 @@ The `compose.yaml` file uses Docker Compose profiles to select the LLM serving b
|
||||
|
||||
Key parameters are configured via environment variables set before running `docker compose up`.
|
||||
|
||||
| Environment Variable | Description | Default (Set Externally) |
|
||||
| :-------------------------------------- | :------------------------------------------------------------------------------------------------------------------ | :--------------------------------------------- | ------------------------------------ |
|
||||
| `HOST_IP` | External IP address of the host machine. **Required.** | `your_external_ip_address` |
|
||||
| `HUGGINGFACEHUB_API_TOKEN` | Your Hugging Face Hub token for model access. **Required.** | `your_huggingface_token` |
|
||||
| `LLM_MODEL_ID` | Hugging Face model ID for the CodeGen LLM (used by TGI/vLLM service). Configured within `compose.yaml` environment. | `Qwen/Qwen2.5-Coder-7B-Instruct` |
|
||||
| `EMBEDDING_MODEL_ID` | Hugging Face model ID for the embedding model (used by TEI service). Configured within `compose.yaml` environment. | `BAAI/bge-base-en-v1.5` |
|
||||
| `LLM_ENDPOINT` | Internal URL for the LLM serving endpoint (used by `llm-codegen-vllm-server`). Configured in `compose.yaml`. | http://codegen-vllm | tgi-server:9000/v1/chat/completions` |
|
||||
| `TEI_EMBEDDING_ENDPOINT` | Internal URL for the Embedding service. Configured in `compose.yaml`. | `http://codegen-tei-embedding-server:80/embed` |
|
||||
| `DATAPREP_ENDPOINT` | Internal URL for the Data Preparation service. Configured in `compose.yaml`. | `http://codegen-dataprep-server:80/dataprep` |
|
||||
| `BACKEND_SERVICE_ENDPOINT` | External URL for the CodeGen Gateway (MegaService). Derived from `HOST_IP` and port `7778`. | `http://${HOST_IP}:7778/v1/codegen` |
|
||||
| `*_PORT` (Internal) | Internal container ports (e.g., `80`, `6379`). Defined in `compose.yaml`. | N/A |
|
||||
| `http_proxy` / `https_proxy`/`no_proxy` | Network proxy settings (if required). | `""` |
|
||||
| Environment Variable | Description | Default (Set Externally) |
|
||||
| :-------------------------------------- | :------------------------------------------------------------------------------------------------------------------ | :----------------------------------------------------------------------------------------------- |
|
||||
| `HOST_IP` | External IP address of the host machine. **Required.** | `your_external_ip_address` |
|
||||
| `HUGGINGFACEHUB_API_TOKEN` | Your Hugging Face Hub token for model access. **Required.** | `your_huggingface_token` |
|
||||
| `LLM_MODEL_ID` | Hugging Face model ID for the CodeGen LLM (used by TGI/vLLM service). Configured within `compose.yaml` environment. | `Qwen/Qwen2.5-Coder-7B-Instruct` |
|
||||
| `EMBEDDING_MODEL_ID` | Hugging Face model ID for the embedding model (used by TEI service). Configured within `compose.yaml` environment. | `BAAI/bge-base-en-v1.5` |
|
||||
| `LLM_ENDPOINT` | Internal URL for the LLM serving endpoint (used by `codegen-llm-server`). Configured in `compose.yaml`. | `http://codegen-tgi-server:80/generate` or `http://codegen-vllm-server:8000/v1/chat/completions` |
|
||||
| `TEI_EMBEDDING_ENDPOINT` | Internal URL for the Embedding service. Configured in `compose.yaml`. | `http://codegen-tei-embedding-server:80/embed` |
|
||||
| `DATAPREP_ENDPOINT` | Internal URL for the Data Preparation service. Configured in `compose.yaml`. | `http://codegen-dataprep-server:80/dataprep` |
|
||||
| `BACKEND_SERVICE_ENDPOINT` | External URL for the CodeGen Gateway (MegaService). Derived from `HOST_IP` and port `7778`. | `http://${HOST_IP}:7778/v1/codegen` |
|
||||
| `*_PORT` (Internal) | Internal container ports (e.g., `80`, `6379`). Defined in `compose.yaml`. | N/A |
|
||||
| `http_proxy` / `https_proxy`/`no_proxy` | Network proxy settings (if required). | `""` |
|
||||
|
||||
Most of these parameters are in `set_env.sh`, you can either modify this file or overwrite the env variables by setting them.
|
||||
|
||||
@@ -170,11 +172,11 @@ Check logs: `docker compose logs <service_name>`. Pay attention to `vllm-gaudi-s
|
||||
|
||||
Use `curl` commands targeting the main service endpoints. Ensure `HOST_IP` is correctly set.
|
||||
|
||||
1. **Validate LLM Serving Endpoint (Example for vLLM on default port 9000 internally, exposed differently):**
|
||||
1. **Validate LLM Serving Endpoint (Example for vLLM on default port 8000 internally, exposed differently):**
|
||||
|
||||
```bash
|
||||
# This command structure targets the OpenAI-compatible vLLM endpoint
|
||||
curl http://${HOST_IP}:9000/v1/chat/completions \
|
||||
curl http://${HOST_IP}:8000/v1/chat/completions \
|
||||
-X POST \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"model": "Qwen/Qwen2.5-Coder-7B-Instruct", "messages": [{"role": "user", "content": "Implement a basic Python class"}], "max_tokens":32}'
|
||||
@@ -195,8 +197,8 @@ UI options are similar to the Xeon deployment.
|
||||
### Gradio UI (Default)
|
||||
|
||||
Access the default Gradio UI:
|
||||
`http://{HOST_IP}:5173`
|
||||
_(Port `5173` is the default host mapping)_
|
||||
`http://{HOST_IP}:8080`
|
||||
_(Port `8080` is the default host mapping)_
|
||||
|
||||

|
||||
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
pushd "../../" > /dev/null
|
||||
source .set_env.sh
|
||||
popd > /dev/null
|
||||
|
||||
export HOST_IP=$(hostname -I | awk '{print $1}')
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
if [ -z "${HUGGINGFACEHUB_API_TOKEN}" ]; then
|
||||
echo "Error: HUGGINGFACEHUB_API_TOKEN is not set. Please set HUGGINGFACEHUB_API_TOKEN"
|
||||
fi
|
||||
|
||||
if [ -z "${HOST_IP}" ]; then
|
||||
echo "Error: HOST_IP is not set. Please set HOST_IP first."
|
||||
fi
|
||||
|
||||
export no_proxy=${no_proxy},${HOST_IP}
|
||||
export http_proxy=${http_proxy}
|
||||
export https_proxy=${https_proxy}
|
||||
|
||||
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
|
||||
export LLM_SERVICE_PORT=9000
|
||||
export LLM_ENDPOINT="http://${HOST_IP}:8028"
|
||||
export LLM_SERVICE_HOST_IP=${HOST_IP}
|
||||
export TGI_LLM_ENDPOINT="http://${HOST_IP}:8028"
|
||||
|
||||
export MEGA_SERVICE_PORT=7778
|
||||
export MEGA_SERVICE_HOST_IP=${HOST_IP}
|
||||
export BACKEND_SERVICE_ENDPOINT="http://${HOST_IP}:7778/v1/codegen"
|
||||
|
||||
export REDIS_DB_PORT=6379
|
||||
export REDIS_INSIGHTS_PORT=8001
|
||||
export REDIS_RETRIEVER_PORT=7000
|
||||
export REDIS_URL="redis://${HOST_IP}:${REDIS_DB_PORT}"
|
||||
export RETRIEVAL_SERVICE_HOST_IP=${HOST_IP}
|
||||
export RETRIEVER_COMPONENT_NAME="OPEA_RETRIEVER_REDIS"
|
||||
export INDEX_NAME="CodeGen"
|
||||
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export EMBEDDER_PORT=6000
|
||||
export TEI_EMBEDDER_PORT=8090
|
||||
export TEI_EMBEDDING_HOST_IP=${HOST_IP}
|
||||
export TEI_EMBEDDING_ENDPOINT="http://${HOST_IP}:${TEI_EMBEDDER_PORT}"
|
||||
|
||||
export DATAPREP_REDIS_PORT=6007
|
||||
export DATAPREP_ENDPOINT="http://${HOST_IP}:${DATAPREP_REDIS_PORT}/v1/dataprep"
|
||||
export LOGFLAG=false
|
||||
export MODEL_CACHE=${model_cache:-"./data"}
|
||||
export NUM_CARDS=1
|
||||
50
CodeGen/docker_compose/set_env.sh
Normal file
50
CodeGen/docker_compose/set_env.sh
Normal file
@@ -0,0 +1,50 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
pushd "../../" > /dev/null
|
||||
source .set_env.sh
|
||||
popd > /dev/null
|
||||
|
||||
export host_ip=$(hostname -I | awk '{print $1}')
|
||||
if [ -z "${HUGGINGFACEHUB_API_TOKEN}" ]; then
|
||||
echo "Error: HUGGINGFACEHUB_API_TOKEN is not set. Please set HUGGINGFACEHUB_API_TOKEN"
|
||||
fi
|
||||
|
||||
if [ -z "${host_ip}" ]; then
|
||||
echo "Error: host_ip is not set. Please set host_ip first."
|
||||
fi
|
||||
|
||||
export no_proxy=${no_proxy},${host_ip}
|
||||
export http_proxy=${http_proxy}
|
||||
export https_proxy=${https_proxy}
|
||||
|
||||
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-32B-Instruct"
|
||||
export LLM_SERVICE_PORT=9000
|
||||
export LLM_ENDPOINT="http://${host_ip}:8028"
|
||||
export LLM_SERVICE_HOST_IP=${host_ip}
|
||||
export TGI_LLM_ENDPOINT="http://${host_ip}:8028"
|
||||
|
||||
export MEGA_SERVICE_PORT=7778
|
||||
export MEGA_SERVICE_HOST_IP=${host_ip}
|
||||
export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:7778/v1/codegen"
|
||||
|
||||
export REDIS_DB_PORT=6379
|
||||
export REDIS_INSIGHTS_PORT=8001
|
||||
export REDIS_RETRIEVER_PORT=7000
|
||||
export REDIS_URL="redis://${host_ip}:${REDIS_DB_PORT}"
|
||||
export RETRIEVAL_SERVICE_HOST_IP=${host_ip}
|
||||
export RETRIEVER_COMPONENT_NAME="OPEA_RETRIEVER_REDIS"
|
||||
export INDEX_NAME="CodeGen"
|
||||
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export EMBEDDER_PORT=6000
|
||||
export TEI_EMBEDDER_PORT=8090
|
||||
export TEI_EMBEDDING_HOST_IP=${host_ip}
|
||||
export TEI_EMBEDDING_ENDPOINT="http://${host_ip}:${TEI_EMBEDDER_PORT}"
|
||||
|
||||
export DATAPREP_REDIS_PORT=6007
|
||||
export DATAPREP_ENDPOINT="http://${host_ip}:${DATAPREP_REDIS_PORT}/v1/dataprep"
|
||||
export LOGFLAG=false
|
||||
export MODEL_CACHE="./data"
|
||||
export NUM_CARDS=1
|
||||
@@ -1,33 +0,0 @@
|
||||
# CodeGen E2E test scripts
|
||||
|
||||
## Set the required environment variable
|
||||
|
||||
```bash
|
||||
export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token"
|
||||
```
|
||||
|
||||
## Run test
|
||||
|
||||
On Intel Xeon with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_on_xeon.sh
|
||||
```
|
||||
|
||||
On Intel Gaudi with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_on_gaudi.sh
|
||||
```
|
||||
|
||||
On AMD ROCm with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_on_rocm.sh
|
||||
```
|
||||
|
||||
On AMD ROCm with vLLM:
|
||||
|
||||
```bash
|
||||
bash test_compose_vllm_on_rocm.sh
|
||||
```
|
||||
@@ -10,11 +10,21 @@ echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
export MODEL_CACHE=${model_cache:-"./data"}
|
||||
export REDIS_DB_PORT=6379
|
||||
export REDIS_INSIGHTS_PORT=8001
|
||||
export REDIS_RETRIEVER_PORT=7000
|
||||
export EMBEDDER_PORT=6000
|
||||
export TEI_EMBEDDER_PORT=8090
|
||||
export DATAPREP_REDIS_PORT=6007
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
source $WORKPATH/docker_compose/intel/set_env.sh
|
||||
|
||||
export http_proxy=${http_proxy}
|
||||
export https_proxy=${https_proxy}
|
||||
export no_proxy=${no_proxy},${ip_address}
|
||||
|
||||
function build_docker_images() {
|
||||
opea_branch=${opea_branch:-"main"}
|
||||
|
||||
@@ -27,7 +37,7 @@ function build_docker_images() {
|
||||
|
||||
# Download Gaudi vllm of latest tag
|
||||
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
|
||||
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
|
||||
VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
|
||||
echo "Check out vLLM tag ${VLLM_FORK_VER}"
|
||||
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
|
||||
|
||||
@@ -44,6 +54,28 @@ function start_services() {
|
||||
|
||||
cd $WORKPATH/docker_compose/intel/hpu/gaudi
|
||||
|
||||
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
|
||||
export LLM_ENDPOINT="http://${ip_address}:8028"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export MEGA_SERVICE_PORT=7778
|
||||
export MEGA_SERVICE_HOST_IP=${ip_address}
|
||||
export LLM_SERVICE_HOST_IP=${ip_address}
|
||||
export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:${MEGA_SERVICE_PORT}/v1/codegen"
|
||||
export NUM_CARDS=1
|
||||
export host_ip=${ip_address}
|
||||
|
||||
export REDIS_URL="redis://${host_ip}:${REDIS_DB_PORT}"
|
||||
export RETRIEVAL_SERVICE_HOST_IP=${host_ip}
|
||||
export RETRIEVER_COMPONENT_NAME="OPEA_RETRIEVER_REDIS"
|
||||
export INDEX_NAME="CodeGen"
|
||||
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export TEI_EMBEDDING_HOST_IP=${host_ip}
|
||||
export TEI_EMBEDDING_ENDPOINT="http://${host_ip}:${TEI_EMBEDDER_PORT}"
|
||||
export DATAPREP_ENDPOINT="http://${host_ip}:${DATAPREP_REDIS_PORT}/v1/dataprep"
|
||||
|
||||
export INDEX_NAME="CodeGen"
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose --profile ${compose_profile} up -d | tee ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
|
||||
@@ -35,7 +35,18 @@ function build_docker_images() {
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/amd/gpu/rocm/
|
||||
source set_env.sh
|
||||
|
||||
export CODEGEN_LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
|
||||
export CODEGEN_TGI_SERVICE_PORT=8028
|
||||
export CODEGEN_TGI_LLM_ENDPOINT="http://${ip_address}:${CODEGEN_TGI_SERVICE_PORT}"
|
||||
export CODEGEN_LLM_SERVICE_PORT=9000
|
||||
export CODEGEN_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export CODEGEN_MEGA_SERVICE_HOST_IP=${ip_address}
|
||||
export CODEGEN_LLM_SERVICE_HOST_IP=${ip_address}
|
||||
export CODEGEN_BACKEND_SERVICE_PORT=7778
|
||||
export CODEGEN_BACKEND_SERVICE_URL="http://${ip_address}:${CODEGEN_BACKEND_SERVICE_PORT}/v1/codegen"
|
||||
export CODEGEN_UI_SERVICE_PORT=5173
|
||||
export HOST_IP=${ip_address}
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
|
||||
|
||||
|
||||
@@ -10,11 +10,20 @@ echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
export MODEL_CACHE=${model_cache:-"./data"}
|
||||
export REDIS_DB_PORT=6379
|
||||
export REDIS_INSIGHTS_PORT=8001
|
||||
export REDIS_RETRIEVER_PORT=7000
|
||||
export EMBEDDER_PORT=6000
|
||||
export TEI_EMBEDDER_PORT=8090
|
||||
export DATAPREP_REDIS_PORT=6007
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
source $WORKPATH/docker_compose/intel/set_env.sh
|
||||
|
||||
export http_proxy=${http_proxy}
|
||||
export https_proxy=${https_proxy}
|
||||
export no_proxy=${no_proxy},${ip_address}
|
||||
|
||||
function build_docker_images() {
|
||||
opea_branch=${opea_branch:-"main"}
|
||||
@@ -47,6 +56,25 @@ function start_services() {
|
||||
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon/
|
||||
|
||||
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
|
||||
export LLM_ENDPOINT="http://${ip_address}:8028"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export MEGA_SERVICE_PORT=7778
|
||||
export MEGA_SERVICE_HOST_IP=${ip_address}
|
||||
export LLM_SERVICE_HOST_IP=${ip_address}
|
||||
export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:${MEGA_SERVICE_PORT}/v1/codegen"
|
||||
export host_ip=${ip_address}
|
||||
|
||||
export REDIS_URL="redis://${host_ip}:${REDIS_DB_PORT}"
|
||||
export RETRIEVAL_SERVICE_HOST_IP=${host_ip}
|
||||
export RETRIEVER_COMPONENT_NAME="OPEA_RETRIEVER_REDIS"
|
||||
export INDEX_NAME="CodeGen"
|
||||
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export TEI_EMBEDDING_HOST_IP=${host_ip}
|
||||
export TEI_EMBEDDING_ENDPOINT="http://${host_ip}:${TEI_EMBEDDER_PORT}"
|
||||
export DATAPREP_ENDPOINT="http://${host_ip}:${DATAPREP_REDIS_PORT}/v1/dataprep"
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose --profile ${compose_profile} up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
|
||||
@@ -34,7 +34,18 @@ function build_docker_images() {
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/amd/gpu/rocm/
|
||||
source set_env_vllm.sh
|
||||
|
||||
export CODEGEN_LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
|
||||
export CODEGEN_VLLM_SERVICE_PORT=8028
|
||||
export CODEGEN_VLLM_ENDPOINT="http://${ip_address}:${CODEGEN_VLLM_SERVICE_PORT}"
|
||||
export CODEGEN_LLM_SERVICE_PORT=9000
|
||||
export CODEGEN_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export CODEGEN_MEGA_SERVICE_HOST_IP=${ip_address}
|
||||
export CODEGEN_LLM_SERVICE_HOST_IP=${ip_address}
|
||||
export CODEGEN_BACKEND_SERVICE_PORT=7778
|
||||
export CODEGEN_BACKEND_SERVICE_URL="http://${ip_address}:${CODEGEN_BACKEND_SERVICE_PORT}/v1/codegen"
|
||||
export CODEGEN_UI_SERVICE_PORT=5173
|
||||
export HOST_IP=${ip_address}
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
|
||||
|
||||
|
||||
@@ -22,11 +22,12 @@ This Code Translation use case demonstrates Text Generation Inference across mul
|
||||
|
||||
The table below lists currently available deployment options. They outline in detail the implementation of this example on selected hardware.
|
||||
|
||||
| Category | Deployment Option | Description |
|
||||
| ---------------------- | -------------------- | --------------------------------------------------------------------------- |
|
||||
| On-premise Deployments | Docker compose | [CodeTrans deployment on Xeon](./docker_compose/intel/cpu/xeon/README.md) |
|
||||
| | | [CodeTrans deployment on Gaudi](./docker_compose/intel/hpu/gaudi/README.md) |
|
||||
| | | [CodeTrans deployment on AMD ROCm](./docker_compose/amd/gpu/rocm/README.md) |
|
||||
| | Kubernetes | [Helm Charts](./kubernetes/helm/README.md) |
|
||||
| | Azure | Work-in-progress |
|
||||
| | Intel Tiber AI Cloud | Work-in-progress |
|
||||
| Category | Deployment Option | Description |
|
||||
| ---------------------- | -------------------- | ----------------------------------------------------------------- |
|
||||
| On-premise Deployments | Docker compose | [CodeTrans deployment on Xeon](./docker_compose/intel/cpu/xeon) |
|
||||
| | | [CodeTrans deployment on Gaudi](./docker_compose/intel/hpu/gaudi) |
|
||||
| | | [CodeTrans deployment on AMD ROCm](./docker_compose/amd/gpu/rocm) |
|
||||
| | Kubernetes | [Helm Charts](./kubernetes/helm) |
|
||||
| | | [GMC](./kubernetes/gmc) |
|
||||
| | Azure | Work-in-progress |
|
||||
| | Intel Tiber AI Cloud | Work-in-progress |
|
||||
|
||||
@@ -44,38 +44,3 @@ Some HuggingFace resources, such as some models, are only accessible if the deve
|
||||
|
||||
2. (Docker only) If all microservices work well, check the port ${host_ip}:7777, the port may be allocated by other users, you can modify the `compose.yaml`.
|
||||
3. (Docker only) If you get errors like "The container name is in use", change container name in `compose.yaml`.
|
||||
|
||||
## Monitoring OPEA Services with Prometheus and Grafana Dashboard
|
||||
|
||||
OPEA microservice deployment can easily be monitored through Grafana dashboards using data collected via Prometheus. Follow the [README](https://github.com/opea-project/GenAIEval/blob/main/evals/benchmark/grafana/README.md) to setup Prometheus and Grafana servers and import dashboards to monitor the OPEA services.
|
||||
|
||||

|
||||

|
||||
|
||||
## Tracing with OpenTelemetry and Jaeger
|
||||
|
||||
> NOTE: This feature is disabled by default. Please use the compose.telemetry.yaml file to enable this feature.
|
||||
|
||||
OPEA microservice and [TGI](https://huggingface.co/docs/text-generation-inference/en/index)/[TEI](https://huggingface.co/docs/text-embeddings-inference/en/index) serving can easily be traced through [Jaeger](https://www.jaegertracing.io/) dashboards in conjunction with [OpenTelemetry](https://opentelemetry.io/) Tracing feature. Follow the [README](https://github.com/opea-project/GenAIComps/tree/main/comps/cores/telemetry#tracing) to trace additional functions if needed.
|
||||
|
||||
Tracing data is exported to http://{EXTERNAL_IP}:4318/v1/traces via Jaeger.
|
||||
Users could also get the external IP via below command.
|
||||
|
||||
```bash
|
||||
ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+'
|
||||
```
|
||||
|
||||
Access the Jaeger dashboard UI at http://{EXTERNAL_IP}:16686
|
||||
|
||||
For TGI serving on Gaudi, users could see different services like opea, TEI and TGI.
|
||||

|
||||
|
||||
Here is a screenshot for one tracing of TGI serving request.
|
||||

|
||||
|
||||
There are also OPEA related tracings. Users could understand the time breakdown of each service request by looking into each opea:schedule operation.
|
||||

|
||||
|
||||
There could be asynchronous function such as `llm/MicroService_asyn_generate` and user needs to check the trace of the asynchronous function in another operation like
|
||||
opea:llm_generate_stream.
|
||||

|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 90 KiB After Width: | Height: | Size: 120 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 100 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 414 KiB |
@@ -26,7 +26,7 @@ function build_docker_images() {
|
||||
popd && sleep 1s
|
||||
|
||||
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
|
||||
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
|
||||
VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
|
||||
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
|
||||
@@ -2,11 +2,7 @@
|
||||
|
||||
DocRetriever are the most widely adopted use case for leveraging the different methodologies to match user query against a set of free-text records. DocRetriever is essential to RAG system, which bridges the knowledge gap by dynamically fetching relevant information from external sources, ensuring that responses generated remain factual and current. The core of this architecture are vector databases, which are instrumental in enabling efficient and semantic retrieval of information. These databases store data as vectors, allowing RAG to swiftly access the most pertinent documents or data points based on semantic similarity.
|
||||
|
||||
\_Note:
|
||||
|
||||
As the related docker images were published to Docker Hub, you can ignore the below step 1 and 2, quick start from step 3.
|
||||
|
||||
## 1. Build Images for necessary microservices. (Optional)
|
||||
## 1. Build Images for necessary microservices. (Optional after docker image release)
|
||||
|
||||
- Embedding TEI Image
|
||||
|
||||
@@ -34,7 +30,7 @@ As the related docker images were published to Docker Hub, you can ignore the be
|
||||
docker build -t opea/dataprep:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/dataprep/src/Dockerfile .
|
||||
```
|
||||
|
||||
## 2. Build Images for MegaService (Optional)
|
||||
## 2. Build Images for MegaService
|
||||
|
||||
```bash
|
||||
cd ..
|
||||
@@ -48,19 +44,6 @@ docker build --no-cache -t opea/doc-index-retriever:latest --build-arg https_pro
|
||||
```bash
|
||||
export host_ip="YOUR IP ADDR"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token}
|
||||
```
|
||||
|
||||
Set environment variables by
|
||||
|
||||
```
|
||||
cd GenAIExamples/DocIndexRetriever/docker_compose/intel/cpu/xeon
|
||||
source set_env.sh
|
||||
```
|
||||
|
||||
Note: set_env.sh will help to set all required variables. Please ensure all required variables like ports (LLM_SERVICE_PORT, MEGA_SERVICE_PORT, etc.) are set if not using defaults from the compose file.
|
||||
or Set environment variables manually
|
||||
|
||||
```
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export TEI_EMBEDDING_ENDPOINT="http://${host_ip}:6006"
|
||||
|
||||
@@ -40,7 +40,6 @@ services:
|
||||
LLM_ENDPOINT: ${LLM_ENDPOINT}
|
||||
LLM_MODEL_ID: ${LLM_MODEL_ID}
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
MAX_INPUT_TOKENS: ${MAX_INPUT_TOKENS}
|
||||
MAX_TOTAL_TOKENS: ${MAX_TOTAL_TOKENS}
|
||||
DocSum_COMPONENT_NAME: ${DocSum_COMPONENT_NAME}
|
||||
|
||||
@@ -40,7 +40,6 @@ services:
|
||||
LLM_ENDPOINT: ${LLM_ENDPOINT}
|
||||
LLM_MODEL_ID: ${LLM_MODEL_ID}
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
MAX_INPUT_TOKENS: ${MAX_INPUT_TOKENS}
|
||||
MAX_TOTAL_TOKENS: ${MAX_TOTAL_TOKENS}
|
||||
DocSum_COMPONENT_NAME: ${DocSum_COMPONENT_NAME}
|
||||
|
||||
@@ -45,7 +45,6 @@ services:
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
MAX_INPUT_TOKENS: ${MAX_INPUT_TOKENS}
|
||||
MAX_TOTAL_TOKENS: ${MAX_TOTAL_TOKENS}
|
||||
LLM_ENDPOINT: ${LLM_ENDPOINT}
|
||||
|
||||
@@ -49,7 +49,6 @@ services:
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
MAX_INPUT_TOKENS: ${MAX_INPUT_TOKENS}
|
||||
MAX_TOTAL_TOKENS: ${MAX_TOTAL_TOKENS}
|
||||
LLM_ENDPOINT: ${LLM_ENDPOINT}
|
||||
|
||||
@@ -13,7 +13,7 @@ export https_proxy=$https_proxy
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
|
||||
export LLM_ENDPOINT_PORT=8008
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
|
||||
export MAX_INPUT_TOKENS=1024
|
||||
export MAX_TOTAL_TOKENS=2048
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ function build_docker_images() {
|
||||
popd && sleep 1s
|
||||
|
||||
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
|
||||
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
|
||||
VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
|
||||
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
|
||||
@@ -60,7 +60,7 @@ function build_vllm_docker_image() {
|
||||
fi
|
||||
cd ./vllm-fork
|
||||
|
||||
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
|
||||
VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
|
||||
git checkout ${VLLM_FORK_VER} &> /dev/null
|
||||
docker build --no-cache -f Dockerfile.hpu -t $vllm_image --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
|
||||
if [ $? -ne 0 ]; then
|
||||
|
||||
@@ -30,38 +30,66 @@ The architecture of the SearchQnA Application is illustrated below:
|
||||
The SearchQnA example is implemented using the component-level microservices defined in [GenAIComps](https://github.com/opea-project/GenAIComps). The flow chart below shows the information flow between different microservices for this example.
|
||||
|
||||
```mermaid
|
||||
%% Orange are microservices from third parties that are 'wrapped' as OPEA components.
|
||||
---
|
||||
config:
|
||||
flowchart:
|
||||
nodeSpacing: 400
|
||||
rankSpacing: 100
|
||||
curve: linear
|
||||
themeVariables:
|
||||
fontSize: 50px
|
||||
---
|
||||
flowchart LR
|
||||
User["User"] --> Nginx["Nginx<br>searchqna-nginx-server"]
|
||||
Nginx --> UI["UI<br>searchqna-ui-server"] & Gateway & User
|
||||
UI --> Nginx
|
||||
Gateway --> Nginx & Embedding
|
||||
Embedding --> Retriever
|
||||
Retriever --> Reranker
|
||||
Reranker --> LLM
|
||||
LLM --> Gateway
|
||||
LLM <-.-> TGI_Service["LLM<br>tgi-service"]
|
||||
Embedding <-.-> TEI_Embedding["TEI Embedding<br>tei-embedding-server"]
|
||||
Reranker <-.-> TEI_Reranker["TEI Reranker<br>tei-reranking-server"]
|
||||
%% Colors %%
|
||||
classDef blue fill:#ADD8E6,stroke:#ADD8E6,stroke-width:2px,fill-opacity:0.5
|
||||
classDef orange fill:#FBAA60,stroke:#ADD8E6,stroke-width:2px,fill-opacity:0.5
|
||||
classDef orchid fill:#C26DBC,stroke:#ADD8E6,stroke-width:2px,fill-opacity:0.5
|
||||
classDef invisible fill:transparent,stroke:transparent;
|
||||
style SearchQnA-MegaService stroke:#000000
|
||||
|
||||
TEI_Embedding:::ext
|
||||
TEI_Reranker:::ext
|
||||
TGI_Service:::ext
|
||||
%% Subgraphs %%
|
||||
subgraph SearchQnA-MegaService["SearchQnA MegaService "]
|
||||
direction LR
|
||||
EM([Embedding MicroService]):::blue
|
||||
RET([Web Retrieval MicroService]):::blue
|
||||
RER([Rerank MicroService]):::blue
|
||||
LLM([LLM MicroService]):::blue
|
||||
end
|
||||
subgraph UserInterface[" User Interface "]
|
||||
direction LR
|
||||
a([User Input Query]):::orchid
|
||||
UI([UI server<br>]):::orchid
|
||||
end
|
||||
|
||||
|
||||
|
||||
TEI_RER{{Reranking service<br>}}
|
||||
TEI_EM{{Embedding service <br>}}
|
||||
VDB{{Vector DB<br><br>}}
|
||||
R_RET{{Web Retriever service <br>}}
|
||||
LLM_gen{{LLM Service <br>}}
|
||||
GW([SearchQnA GateWay<br>]):::orange
|
||||
|
||||
%% Questions interaction
|
||||
direction LR
|
||||
a[User Input Query] --> UI
|
||||
UI --> GW
|
||||
GW <==> SearchQnA-MegaService
|
||||
EM ==> RET
|
||||
RET ==> RER
|
||||
RER ==> LLM
|
||||
|
||||
%% Embedding service flow
|
||||
direction LR
|
||||
EM <-.-> TEI_EM
|
||||
RET <-.-> R_RET
|
||||
RER <-.-> TEI_RER
|
||||
LLM <-.-> LLM_gen
|
||||
|
||||
subgraph MegaService["MegaService"]
|
||||
LLM["LLM<br>llm-textgen-server"]
|
||||
Reranker["Reranker<br>reranking-tei-server"]
|
||||
Retriever["Retriever<br>web-retriever-server"]
|
||||
Embedding["Embedding<br>embedding-server"]
|
||||
end
|
||||
subgraph Backend["searchqna-backend-server"]
|
||||
direction TB
|
||||
MegaService
|
||||
Gateway["Backend Endpoint"]
|
||||
end
|
||||
classDef default fill:#fff,stroke:#000,color:#000
|
||||
classDef ext fill:#f9cb9c,stroke:#000,color:#000
|
||||
style MegaService margin-top:20px,margin-bottom:20px
|
||||
%% Vector DB interaction
|
||||
R_RET <-.-> VDB
|
||||
|
||||
```
|
||||
|
||||
This SearchQnA use case performs Search-augmented Question Answering across multiple platforms. Currently, we provide the example for Intel® Gaudi® 2 and Intel® Xeon® Scalable Processors, and we invite contributions from other hardware vendors to expand OPEA ecosystem.
|
||||
@@ -70,8 +98,8 @@ This SearchQnA use case performs Search-augmented Question Answering across mult
|
||||
|
||||
The table below lists the available deployment options and their implementation details for different hardware platforms.
|
||||
|
||||
| Category | Deployment Option | Description |
|
||||
| ---------------------- | ---------------------- | --------------------------------------------------------------------------- |
|
||||
| On-premise Deployments | Docker Compose (Xeon) | [SearchQnA deployment on Xeon](./docker_compose/intel/cpu/xeon/README.md) |
|
||||
| | Docker Compose (Gaudi) | [SearchQnA deployment on Gaudi](./docker_compose/intel/hpu/gaudi/README.md) |
|
||||
| | Docker Compose (ROCm) | [SearchQnA deployment on AMD ROCm](./docker_compose/amd/gpu/rocm/README.md) |
|
||||
| Category | Deployment Option | Description |
|
||||
| ---------------------- | ---------------------- | -------------------------------------------------------------- |
|
||||
| On-premise Deployments | Docker Compose (Xeon) | [DocSum deployment on Xeon](./docker_compose/intel/cpu/xeon) |
|
||||
| | Docker Compose (Gaudi) | [DocSum deployment on Gaudi](./docker_compose/intel/hpu/gaudi) |
|
||||
| | Docker Compose (ROCm) | [DocSum deployment on AMD ROCm](./docker_compose/amd/gpu/rocm) |
|
||||
|
||||
@@ -170,25 +170,7 @@ services:
|
||||
no_proxy: ${no_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
ipc: host
|
||||
restart: always
|
||||
search-nginx-server:
|
||||
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
|
||||
container_name: search-nginx-server
|
||||
depends_on:
|
||||
- search-backend-server
|
||||
- search-ui-server
|
||||
ports:
|
||||
- "${NGINX_PORT:-80}:80"
|
||||
environment:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- FRONTEND_SERVICE_IP=search-ui-server
|
||||
- FRONTEND_SERVICE_PORT=5173
|
||||
- BACKEND_SERVICE_NAME=search
|
||||
- BACKEND_SERVICE_IP=search-backend-server
|
||||
- BACKEND_SERVICE_PORT=8888
|
||||
BACKEND_BASE_URL: ${SEARCH_BACKEND_SERVICE_ENDPOINT}
|
||||
ipc: host
|
||||
restart: always
|
||||
|
||||
|
||||
@@ -176,27 +176,10 @@ services:
|
||||
no_proxy: ${no_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
BACKEND_BASE_URL: ${SEARCH_BACKEND_SERVICE_ENDPOINT}
|
||||
ipc: host
|
||||
restart: always
|
||||
search-nginx-server:
|
||||
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
|
||||
container_name: search-nginx-server
|
||||
depends_on:
|
||||
- search-backend-server
|
||||
- search-ui-server
|
||||
ports:
|
||||
- "${NGINX_PORT:-80}:80"
|
||||
environment:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- FRONTEND_SERVICE_IP=search-ui-server
|
||||
- FRONTEND_SERVICE_PORT=5173
|
||||
- BACKEND_SERVICE_NAME=search
|
||||
- BACKEND_SERVICE_IP=search-backend-server
|
||||
- BACKEND_SERVICE_PORT=8888
|
||||
ipc: host
|
||||
restart: always
|
||||
|
||||
networks:
|
||||
default:
|
||||
driver: bridge
|
||||
|
||||
@@ -168,27 +168,10 @@ services:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- BACKEND_BASE_URL=${BACKEND_SERVICE_ENDPOINT}
|
||||
ipc: host
|
||||
restart: always
|
||||
searchqna-xeon-nginx-server:
|
||||
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
|
||||
container_name: searchqna-xeon-nginx-server
|
||||
depends_on:
|
||||
- searchqna-xeon-backend-server
|
||||
- searchqna-xeon-ui-server
|
||||
ports:
|
||||
- "${NGINX_PORT:-80}:80"
|
||||
environment:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- FRONTEND_SERVICE_IP=searchqna-xeon-ui-server
|
||||
- FRONTEND_SERVICE_PORT=5173
|
||||
- BACKEND_SERVICE_NAME=searchqna
|
||||
- BACKEND_SERVICE_IP=searchqna-xeon-backend-server
|
||||
- BACKEND_SERVICE_PORT=8888
|
||||
ipc: host
|
||||
restart: always
|
||||
|
||||
|
||||
networks:
|
||||
default:
|
||||
|
||||
@@ -187,25 +187,7 @@ services:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
ipc: host
|
||||
restart: always
|
||||
searchqna-gaudi-nginx-server:
|
||||
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
|
||||
container_name: searchqna-gaudi-nginx-server
|
||||
depends_on:
|
||||
- searchqna-gaudi-backend-server
|
||||
- searchqna-gaudi-ui-server
|
||||
ports:
|
||||
- "${NGINX_PORT:-80}:80"
|
||||
environment:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- FRONTEND_SERVICE_IP=searchqna-gaudi-ui-server
|
||||
- FRONTEND_SERVICE_PORT=5173
|
||||
- BACKEND_SERVICE_NAME=searchqna
|
||||
- BACKEND_SERVICE_IP=searchqna-gaudi-backend-server
|
||||
- BACKEND_SERVICE_PORT=8888
|
||||
- BACKEND_BASE_URL=${BACKEND_SERVICE_ENDPOINT}
|
||||
ipc: host
|
||||
restart: always
|
||||
|
||||
|
||||
@@ -46,9 +46,3 @@ services:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/third_parties/vllm/src/Dockerfile.amd_gpu
|
||||
image: ${REGISTRY:-opea}/vllm-rocm:${TAG:-latest}
|
||||
nginx:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/third_parties/nginx/src/Dockerfile
|
||||
extends: searchqna
|
||||
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
|
||||
|
||||
@@ -32,7 +32,7 @@ function build_docker_images() {
|
||||
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="searchqna searchqna-ui embedding web-retriever reranking llm-textgen nginx"
|
||||
service_list="searchqna searchqna-ui embedding web-retriever reranking llm-textgen"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
|
||||
@@ -20,7 +20,7 @@ function build_docker_images() {
|
||||
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="searchqna searchqna-ui embedding web-retriever reranking llm-textgen nginx"
|
||||
service_list="searchqna searchqna-ui embedding web-retriever reranking llm-textgen"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
|
||||
@@ -32,7 +32,7 @@ function build_docker_images() {
|
||||
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="searchqna searchqna-ui embedding web-retriever reranking llm-textgen nginx"
|
||||
service_list="searchqna searchqna-ui embedding web-retriever reranking llm-textgen"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
|
||||
@@ -20,7 +20,7 @@ function build_docker_images() {
|
||||
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="searchqna searchqna-ui embedding web-retriever reranking llm-textgen vllm-rocm nginx"
|
||||
service_list="searchqna searchqna-ui embedding web-retriever reranking llm-textgen vllm-rocm"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
|
||||
@@ -1 +1 @@
|
||||
BACKEND_BASE_URL = '/v1/searchqna'
|
||||
BACKEND_BASE_URL = 'http://backend_address:3008/v1/searchqna'
|
||||
|
||||
@@ -38,7 +38,7 @@ export default defineConfig({
|
||||
/* Maximum time each action such as `click()` can take. Defaults to 0 (no limit). */
|
||||
actionTimeout: 0,
|
||||
/* Base URL to use in actions like `await page.goto('/')`. */
|
||||
baseURL: "http://localhost:80",
|
||||
baseURL: "http://localhost:5173",
|
||||
|
||||
/* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */
|
||||
trace: "on-first-retry",
|
||||
|
||||
@@ -27,7 +27,7 @@ function build_docker_images() {
|
||||
popd && sleep 1s
|
||||
|
||||
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
|
||||
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
|
||||
VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
|
||||
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
|
||||
|
||||
service_list="visualqna visualqna-ui lvm nginx vllm-gaudi"
|
||||
|
||||
Reference in New Issue
Block a user