Compare commits

...

15 Commits

Author SHA1 Message Date
ZePan110
97fa6b2d73 Reduce the verification delay time to 1s
Signed-off-by: ZePan110 <ze.pan@intel.com>
2025-05-21 14:03:06 +08:00
ZePan110
c70b021689 Integrate CodeGen set_env to ut scripts. (#1976)
Signed-off-by: ZePan110 <ze.pan@intel.com>
Co-authored-by: Ying Hu <ying.hu@intel.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
2025-05-21 12:58:07 +08:00
Ying Hu
26cb531766 Update README.md of model/port change (#1969)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-05-20 14:29:23 +08:00
Ed Lee @ Intel
e9153b82bb Updated SearchQnA to use nginx like ChatQnA (#1769)
Signed-off-by: Ed Lee <16417837+edlee123@users.noreply.github.com>
2025-05-20 14:15:46 +08:00
Letong Han
0890e94a21 Refine CodeTrans README (#1960)
Signed-off-by: letonghan <letong.han@intel.com>
Co-authored-by: Ying Hu <ying.hu@intel.com>
2025-05-20 13:43:24 +08:00
ZePan110
581e954a8d Integrate ChatQnA set_env to ut scripts and add README.md for UT scripts. (#1971)
Signed-off-by: ZePan110 <ze.pan@intel.com>
2025-05-20 13:42:18 +08:00
ZePan110
8a9f3f4351 Organize set_env.sh paths and update README.md (#1920)
Signed-off-by: ZePan110 <ze.pan@intel.com>
Co-authored-by: chen, suyue <suyue.chen@intel.com>
Co-authored-by: Ying Hu <ying.hu@intel.com>
2025-05-20 10:05:00 +08:00
chen, suyue
09d93ecce6 [CICD enhance] CodeGen run CI with latest base image, group logs in GHA outputs. (#1928)
Signed-off-by: chensuyue <suyue.chen@intel.com>
2025-05-19 09:31:56 +08:00
ZePan110
ed918bcef1 Integrate EdgeCraftRAG set_env to ut scripts and add README.md for UT scripts. (#1963)
Signed-off-by: ZePan110 <ze.pan@intel.com>
2025-05-16 15:19:36 +08:00
ZePan110
1c0b1731c5 Integrate VisualQnA set_env to ut scripts. (#1947)
Signed-off-by: ZePan110 <ze.pan@intel.com>
Co-authored-by: chen, suyue <suyue.chen@intel.com>
2025-05-16 15:19:07 +08:00
ZePan110
22174e68a5 Integrate AvatarChatbot set_env to ut scripts. (#1961)
Signed-off-by: ZePan110 <ze.pan@intel.com>
2025-05-16 15:18:06 +08:00
ZePan110
c8abbc4958 Integrate ProductivitySuite set_env to ut scripts and add README.md for UT scripts. (#1966)
Signed-off-by: ZePan110 <ze.pan@intel.com>
Co-authored-by: Ying Hu <ying.hu@intel.com>
2025-05-16 15:16:38 +08:00
chen, suyue
7ee6f3657c [CICD enhance] DocIndexRetriever run CI with latest base image, group logs in GHA outputs. (#1932)
Signed-off-by: chensuyue <suyue.chen@intel.com>
2025-05-16 15:16:02 +08:00
ZePan110
11b04b38db Integrate SearchQnA set_env to ut scripts. (#1950)
Integrate SearchQnA set_env to ut scripts.
Add README.md for UT scripts.

Signed-off-by: ZePan110 <ze.pan@intel.com>
2025-05-16 15:09:07 +08:00
chen, suyue
7f55b5a100 Specify image build list for VisualQnA (#1967)
Signed-off-by: chensuyue <suyue.chen@intel.com>
2025-05-16 14:44:17 +08:00
122 changed files with 1293 additions and 1215 deletions

View File

@@ -204,6 +204,10 @@ jobs:
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid && sleep 1s; fi
echo "Cleaning up images ..."
df -h
sleep 1
docker system df
sleep 1
if [[ "${{ inputs.hardware }}" == "xeon"* ]]; then
docker system prune -a -f
else
@@ -213,7 +217,13 @@ jobs:
docker images --filter reference="opea/comps-base" -q | xargs -r docker rmi && sleep 1s
docker system prune -f
fi
sleep 5
docker images
sleep 1
df -h
sleep 1
docker system df
sleep 1
- name: Publish pipeline artifact
if: ${{ !cancelled() }}

View File

@@ -23,7 +23,7 @@ jobs:
- name: Check the Validity of Hyperlinks
run: |
cd ${{github.workspace}}
delay=15
delay=1
fail="FALSE"
merged_commit=$(git log -1 --format='%H')
changed_files="$(git diff --name-status --diff-filter=ARM ${{ github.event.pull_request.base.sha }} ${merged_commit} | awk '/\.md$/ {print $NF}')"
@@ -80,6 +80,7 @@ jobs:
- name: Checking Relative Path Validity
run: |
cd ${{github.workspace}}
delay=1
fail="FALSE"
repo_name=${{ github.event.pull_request.head.repo.full_name }}
branch="https://github.com/$repo_name/blob/${{ github.event.pull_request.head.ref }}"
@@ -111,14 +112,15 @@ jobs:
if [[ "$png_line" == *#* ]]; then
if [ -n "changed_files" ] && echo "$changed_files" | grep -q "^${refer_path}$"; then
url_dev=$branch$(echo "$real_path" | sed 's|.*/GenAIExamples||')$png_path
sleep $delay
response=$(curl -I -L -s -o /dev/null -w "%{http_code}" "$url_dev")
if [ "$response" -ne 200 ]; then
echo "**********Validation failed, try again**********"
echo "**********Validation failed ($response), try again**********"
response_retry=$(curl -s -o /dev/null -w "%{http_code}" "$url_dev")
if [ "$response_retry" -eq 200 ]; then
echo "*****Retry successfully*****"
else
echo "Invalid path from ${{github.workspace}}/$refer_path: $png_path"
echo "Invalid path ($response_retry) from ${{github.workspace}}/$refer_path: $png_path"
fail="TRUE"
fi
else

View File

@@ -41,7 +41,7 @@ export FACE="/home/user/comps/animation/src/assets/img/avatar5.png"
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
export AUDIO='None'
export FACESIZE=96
export OUTFILE="/outputs/result.mp4"
export OUTFILE="./outputs/result.mp4"
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
export UPSCALE_FACTOR=1
export FPS=10
export FPS=5

View File

@@ -5,3 +5,32 @@
pushd "../../../../../" > /dev/null
source .set_env.sh
popd > /dev/null
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export host_ip=$(hostname -I | awk '{print $1}')
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3
export WAV2LIP_ENDPOINT=http://$host_ip:7860
export MEGA_SERVICE_HOST_IP=${host_ip}
export WHISPER_SERVER_HOST_IP=${host_ip}
export WHISPER_SERVER_PORT=7066
export SPEECHT5_SERVER_HOST_IP=${host_ip}
export SPEECHT5_SERVER_PORT=7055
export LLM_SERVER_HOST_IP=${host_ip}
export LLM_SERVER_PORT=3006
export ANIMATION_SERVICE_HOST_IP=${host_ip}
export ANIMATION_SERVICE_PORT=3008
export MEGA_SERVICE_PORT=8888
export DEVICE="cpu"
export WAV2LIP_PORT=7860
export INFERENCE_MODE='wav2lip+gfpgan'
export CHECKPOINT_PATH='/usr/local/lib/python3.11/site-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
export FACE="/home/user/comps/animation/src/assets/img/avatar5.png"
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
export AUDIO='None'
export FACESIZE=96
export OUTFILE="/outputs/result.mp4"
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
export UPSCALE_FACTOR=1
export FPS=10

View File

@@ -5,3 +5,35 @@
pushd "../../../../../" > /dev/null
source .set_env.sh
popd > /dev/null
export HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN
export host_ip=$(hostname -I | awk '{print $1}')
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3
export WAV2LIP_ENDPOINT=http://$host_ip:7860
export MEGA_SERVICE_HOST_IP=${host_ip}
export WHISPER_SERVER_HOST_IP=${host_ip}
export WHISPER_SERVER_PORT=7066
export SPEECHT5_SERVER_HOST_IP=${host_ip}
export SPEECHT5_SERVER_PORT=7055
export LLM_SERVER_HOST_IP=${host_ip}
export LLM_SERVER_PORT=3006
export ANIMATION_SERVICE_HOST_IP=${host_ip}
export ANIMATION_SERVICE_PORT=3008
export MEGA_SERVICE_PORT=8888
export DEVICE="hpu"
export WAV2LIP_PORT=7860
export INFERENCE_MODE='wav2lip+gfpgan'
export CHECKPOINT_PATH='/usr/local/lib/python3.10/dist-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
export FACE="/home/user/comps/animation/src/assets/img/avatar1.jpg"
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
export AUDIO='None'
export FACESIZE=96
export OUTFILE="/outputs/result.mp4"
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
export UPSCALE_FACTOR=1
export FPS=10

View File

@@ -0,0 +1,27 @@
# AvatarChatbot E2E test scripts
## Set the required environment variable
```bash
export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token"
```
## Run test
On Intel Xeon with TGI:
```bash
bash test_compose_on_xeon.sh
```
On Intel Gaudi with TGI:
```bash
bash test_compose_on_gaudi.sh
```
On AMD ROCm with TGI:
```bash
bash test_compose_on_rocm.sh
```

View File

@@ -45,37 +45,7 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/hpu/gaudi
export HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN
export host_ip=$(hostname -I | awk '{print $1}')
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3
export WAV2LIP_ENDPOINT=http://$host_ip:7860
export MEGA_SERVICE_HOST_IP=${host_ip}
export WHISPER_SERVER_HOST_IP=${host_ip}
export WHISPER_SERVER_PORT=7066
export SPEECHT5_SERVER_HOST_IP=${host_ip}
export SPEECHT5_SERVER_PORT=7055
export LLM_SERVER_HOST_IP=${host_ip}
export LLM_SERVER_PORT=3006
export ANIMATION_SERVICE_HOST_IP=${host_ip}
export ANIMATION_SERVICE_PORT=3008
export MEGA_SERVICE_PORT=8888
export DEVICE="hpu"
export WAV2LIP_PORT=7860
export INFERENCE_MODE='wav2lip+gfpgan'
export CHECKPOINT_PATH='/usr/local/lib/python3.10/dist-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
export FACE="/home/user/comps/animation/src/assets/img/avatar1.jpg"
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
export AUDIO='None'
export FACESIZE=96
export OUTFILE="/outputs/result.mp4"
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
export UPSCALE_FACTOR=1
export FPS=10
source set_env.sh
# Start Docker Containers
docker compose up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -42,48 +42,8 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/amd/gpu/rocm
export HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN
export OPENAI_API_KEY=$OPENAI_API_KEY
export host_ip=${ip_address}
export TGI_SERVICE_PORT=3006
export TGI_LLM_ENDPOINT=http://${host_ip}:${TGI_SERVICE_PORT}
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
export ASR_ENDPOINT=http://${host_ip}:7066
export TTS_ENDPOINT=http://${host_ip}:7055
export WAV2LIP_ENDPOINT=http://${host_ip}:7860
export MEGA_SERVICE_HOST_IP=${host_ip}
export ASR_SERVICE_HOST_IP=${host_ip}
export TTS_SERVICE_HOST_IP=${host_ip}
export LLM_SERVICE_HOST_IP=${host_ip}
export ANIMATION_SERVICE_HOST_IP=${host_ip}
export WHISPER_SERVER_HOST_IP=${host_ip}
export WHISPER_SERVER_PORT=7066
export SPEECHT5_SERVER_HOST_IP=${host_ip}
export SPEECHT5_SERVER_PORT=7055
export MEGA_SERVICE_PORT=8888
export ASR_SERVICE_PORT=3001
export TTS_SERVICE_PORT=3002
export LLM_SERVICE_PORT=3006
export ANIMATION_SERVICE_PORT=3008
export DEVICE="cpu"
export WAV2LIP_PORT=7860
export INFERENCE_MODE='wav2lip+gfpgan'
export CHECKPOINT_PATH='/usr/local/lib/python3.11/site-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
export FACE="/home/user/comps/animation/src/assets/img/avatar5.png"
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
export AUDIO='None'
export FACESIZE=96
export OUTFILE="./outputs/result.mp4"
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
export UPSCALE_FACTOR=1
export FPS=5
source set_env.sh
# Start Docker Containers
docker compose up -d --force-recreate

View File

@@ -45,37 +45,7 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon
export HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN
export host_ip=$(hostname -I | awk '{print $1}')
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3
export WAV2LIP_ENDPOINT=http://$host_ip:7860
export MEGA_SERVICE_HOST_IP=${host_ip}
export WHISPER_SERVER_HOST_IP=${host_ip}
export WHISPER_SERVER_PORT=7066
export SPEECHT5_SERVER_HOST_IP=${host_ip}
export SPEECHT5_SERVER_PORT=7055
export LLM_SERVER_HOST_IP=${host_ip}
export LLM_SERVER_PORT=3006
export ANIMATION_SERVICE_HOST_IP=${host_ip}
export ANIMATION_SERVICE_PORT=3008
export MEGA_SERVICE_PORT=8888
export DEVICE="cpu"
export WAV2LIP_PORT=7860
export INFERENCE_MODE='wav2lip+gfpgan'
export CHECKPOINT_PATH='/usr/local/lib/python3.11/site-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
export FACE="/home/user/comps/animation/src/assets/img/avatar5.png"
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
export AUDIO='None'
export FACESIZE=96
export OUTFILE="/outputs/result.mp4"
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
export UPSCALE_FACTOR=1
export FPS=10
source set_env.sh
# Start Docker Containers
docker compose up -d

View File

@@ -16,7 +16,7 @@ services:
- chatqna-redis-vector-db
- chatqna-tei-embedding-service
ports:
- "${CHATQNA_REDIS_DATAPREP_PORT}:5000"
- "${CHATQNA_REDIS_DATAPREP_PORT:-18103}:5000"
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}

View File

@@ -16,7 +16,7 @@ services:
- chatqna-redis-vector-db
- chatqna-tei-embedding-service
ports:
- "${CHATQNA_REDIS_DATAPREP_PORT}:5000"
- "${CHATQNA_REDIS_DATAPREP_PORT:-18103}:5000"
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}

View File

@@ -16,7 +16,7 @@ services:
- chatqna-redis-vector-db
- chatqna-tei-embedding-service
ports:
- "${CHATQNA_REDIS_DATAPREP_PORT}:5000"
- "${CHATQNA_REDIS_DATAPREP_PORT:-18103}:5000"
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}

View File

@@ -16,7 +16,7 @@ services:
- chatqna-redis-vector-db
- chatqna-tei-embedding-service
ports:
- "${CHATQNA_REDIS_DATAPREP_PORT:-5000}:5000"
- "${CHATQNA_REDIS_DATAPREP_PORT:-18103}:5000"
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}

View File

@@ -2,17 +2,17 @@
# Copyright (C) 2025 Advanced Micro Devices, Inc.
export HOST_IP=''
export HOST_IP_EXTERNAL=''
export HOST_IP=${ip_address}
export HOST_IP_EXTERNAL=${ip_address}
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
export CHATQNA_BACKEND_SERVICE_PORT=18102
export CHATQNA_FRONTEND_SERVICE_PORT=18101
export CHATQNA_NGINX_PORT=18104
export CHATQNA_BACKEND_SERVICE_PORT=8888
export CHATQNA_FRONTEND_SERVICE_PORT=5173
export CHATQNA_NGINX_PORT=80
export CHATQNA_REDIS_DATAPREP_PORT=18103
export CHATQNA_REDIS_RETRIEVER_PORT=7000
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001

View File

@@ -2,18 +2,18 @@
# Copyright (C) 2025 Advanced Micro Devices, Inc.
export HOST_IP=''
export HOST_IP_EXTERNAL=''
export HOST_IP=${ip_address}
export HOST_IP_EXTERNAL=${ip_address}
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
export CHATQNA_BACKEND_SERVICE_PORT=18102
export CHATQNA_FRONTEND_SERVICE_PORT=18101
export CHATQNA_BACKEND_SERVICE_PORT=8888
export CHATQNA_FRONTEND_SERVICE_PORT=5173
export CHATQNA_LLM_FAQGEN_PORT=18011
export CHATQNA_NGINX_PORT=18104
export CHATQNA_NGINX_PORT=80
export CHATQNA_REDIS_DATAPREP_PORT=18103
export CHATQNA_REDIS_RETRIEVER_PORT=7000
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001

View File

@@ -2,18 +2,18 @@
# Copyright (C) 2025 Advanced Micro Devices, Inc.
export HOST_IP=''
export HOST_IP_EXTERNAL=''
export HOST_IP=${ip_address}
export HOST_IP_EXTERNAL=${ip_address}
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
export CHATQNA_BACKEND_SERVICE_PORT=18102
export CHATQNA_FRONTEND_SERVICE_PORT=18101
export CHATQNA_BACKEND_SERVICE_PORT=8888
export CHATQNA_FRONTEND_SERVICE_PORT=5173
export CHATQNA_LLM_FAQGEN_PORT=18011
export CHATQNA_NGINX_PORT=18104
export CHATQNA_NGINX_PORT=80
export CHATQNA_REDIS_DATAPREP_PORT=18103
export CHATQNA_REDIS_RETRIEVER_PORT=7000
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001

View File

@@ -2,17 +2,17 @@
# Copyright (C) 2025 Advanced Micro Devices, Inc.
export HOST_IP=''
export HOST_IP_EXTERNAL=''
export HOST_IP=${ip_address}
export HOST_IP_EXTERNAL=${ip_address}
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
export CHATQNA_BACKEND_SERVICE_PORT=18102
export CHATQNA_FRONTEND_SERVICE_PORT=18101
export CHATQNA_NGINX_PORT=18104
export CHATQNA_BACKEND_SERVICE_PORT=8888
export CHATQNA_FRONTEND_SERVICE_PORT=5173
export CHATQNA_NGINX_PORT=80
export CHATQNA_REDIS_DATAPREP_PORT=18103
export CHATQNA_REDIS_RETRIEVER_PORT=7000
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001

View File

@@ -1,6 +1,8 @@
# Copyright (C) 2025 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
rm *.json
if ls *.json 1> /dev/null 2>&1; then
rm *.json
fi
wget https://raw.githubusercontent.com/opea-project/GenAIEval/refs/heads/main/evals/benchmark/grafana/chatqna_megaservice_grafana.json
wget https://raw.githubusercontent.com/opea-project/GenAIEval/refs/heads/main/evals/benchmark/grafana/qdrant_grafana.json
wget https://raw.githubusercontent.com/opea-project/GenAIEval/refs/heads/main/evals/benchmark/grafana/milvus_grafana.json

View File

@@ -7,6 +7,9 @@ pushd "../../../../../" > /dev/null
source .set_env.sh
popd > /dev/null
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export HF_TOKEN=${HF_TOKEN}
export host_ip=${ip_address}
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"

View File

@@ -43,7 +43,7 @@ Some HuggingFace resources, such as some models, are only accessible if you have
### Configure the Deployment Environment
To set up environment variables for deploying ChatQnA services, source the _setup_env.sh_ script in this directory:
To set up environment variables for deploying ChatQnA services, source the _setup_env.sh_ script in this directory (If using faqgen or guardrails, source the _set_env_faqgen.sh_):
```
source ./set_env.sh

View File

@@ -4,12 +4,20 @@
# SPDX-License-Identifier: Apache-2.0
# Function to prompt for input and set environment variables
NON_INTERACTIVE=${NON_INTERACTIVE:-false}
prompt_for_env_var() {
local var_name="$1"
local prompt_message="$2"
local default_value="$3"
local mandatory="$4"
if [[ "$NON_INTERACTIVE" == "true" ]]; then
echo "Non-interactive environment detected. Setting $var_name to default: $default_value"
export "$var_name"="$default_value"
return
fi
if [[ "$mandatory" == "true" ]]; then
while [[ -z "$value" ]]; do
read -p "$prompt_message [default: \"${default_value}\"]: " value
@@ -34,7 +42,7 @@ popd > /dev/null
# Prompt the user for each required environment variable
prompt_for_env_var "EMBEDDING_MODEL_ID" "Enter the EMBEDDING_MODEL_ID" "BAAI/bge-base-en-v1.5" false
prompt_for_env_var "HUGGINGFACEHUB_API_TOKEN" "Enter the HUGGINGFACEHUB_API_TOKEN" "" true
prompt_for_env_var "HUGGINGFACEHUB_API_TOKEN" "Enter the HUGGINGFACEHUB_API_TOKEN" "${HF_TOKEN}" true
prompt_for_env_var "RERANK_MODEL_ID" "Enter the RERANK_MODEL_ID" "BAAI/bge-reranker-base" false
prompt_for_env_var "LLM_MODEL_ID" "Enter the LLM_MODEL_ID" "meta-llama/Meta-Llama-3-8B-Instruct" false
prompt_for_env_var "INDEX_NAME" "Enter the INDEX_NAME" "rag-redis" false
@@ -42,34 +50,40 @@ prompt_for_env_var "NUM_CARDS" "Enter the number of Gaudi devices" "1" false
prompt_for_env_var "host_ip" "Enter the host_ip" "$(curl ifconfig.me)" false
#Query for enabling http_proxy
prompt_for_env_var "http_proxy" "Enter the http_proxy." "" false
prompt_for_env_var "http_proxy" "Enter the http_proxy." "${http_proxy}" false
#Query for enabling https_proxy
prompt_for_env_var "https_proxy" "Enter the https_proxy." "" false
prompt_for_env_var "http_proxy" "Enter the http_proxy." "${https_proxy}" false
#Query for enabling no_proxy
prompt_for_env_var "no_proxy" "Enter the no_proxy." "" false
prompt_for_env_var "no_proxy" "Enter the no_proxy." "${no_proxy}" false
# Query for enabling logging
read -p "Enable logging? (yes/no): " logging && logging=$(echo "$logging" | tr '[:upper:]' '[:lower:]')
if [[ "$logging" == "yes" || "$logging" == "y" ]]; then
export LOGFLAG=true
if [[ "$NON_INTERACTIVE" == "true" ]]; then
# Query for enabling logging
prompt_for_env_var "LOGFLAG" "Enable logging? (yes/no): " "true" false
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
telemetry_flag=true
else
export LOGFLAG=false
fi
# Query for enabling OpenTelemetry Tracing Endpoint
read -p "Enable OpenTelemetry Tracing Endpoint? (yes/no): " telemetry && telemetry=$(echo "$telemetry" | tr '[:upper:]' '[:lower:]')
if [[ "$telemetry" == "yes" || "$telemetry" == "y" ]]; then
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
telemetry_flag=true
pushd "grafana/dashboards" > /dev/null
source download_opea_dashboard.sh
popd > /dev/null
else
telemetry_flag=false
# Query for enabling logging
read -p "Enable logging? (yes/no): " logging && logging=$(echo "$logging" | tr '[:upper:]' '[:lower:]')
if [[ "$logging" == "yes" || "$logging" == "y" ]]; then
export LOGFLAG=true
else
export LOGFLAG=false
fi
# Query for enabling OpenTelemetry Tracing Endpoint
read -p "Enable OpenTelemetry Tracing Endpoint? (yes/no): " telemetry && telemetry=$(echo "$telemetry" | tr '[:upper:]' '[:lower:]')
if [[ "$telemetry" == "yes" || "$telemetry" == "y" ]]; then
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
telemetry_flag=true
else
telemetry_flag=false
fi
fi
# Generate the .env file

View File

@@ -0,0 +1,32 @@
#!/usr/bin/env bash
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
pushd "../../../../../" > /dev/null
source .set_env.sh
popd > /dev/null
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export HF_TOKEN=${HF_TOKEN}
export host_ip=${ip_address}
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export INDEX_NAME="rag-redis"
export NUM_CARDS=1
export VLLM_SKIP_WARMUP=true
export LOGFLAG=True
export http_proxy=${http_proxy}
export https_proxy=${https_proxy}
export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-gaudi-backend-server,chatqna-gaudi-ui-server,chatqna-gaudi-nginx-server"
export LLM_ENDPOINT_PORT=8010
export LLM_SERVER_PORT=9001
export CHATQNA_BACKEND_PORT=8888
export CHATQNA_REDIS_VECTOR_PORT=6377
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006
export CHATQNA_FRONTEND_SERVICE_PORT=5175
export NGINX_PORT=80
export FAQGen_COMPONENT_NAME="OpeaFaqGenvLLM"
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"

123
ChatQnA/tests/README.md Normal file
View File

@@ -0,0 +1,123 @@
# ChatQnA E2E test scripts
## Set the required environment variable
```bash
export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token"
```
## Run test
On Intel Xeon with TGI:
```bash
bash test_compose_tgi_on_xeon.sh
```
On Intel Xeon with vLLM:
```bash
bash test_compose_on_xeon.sh
```
On Intel Xeon with MariaDB Vector:
```bash
bash test_compose_mariadb_on_xeon.sh
```
On Intel Xeon with Pinecone:
```bash
bash test_compose_pinecone_on_xeon.sh
```
On Intel Xeon with Milvus
```bash
bash test_compose_milvus_on_xeon.sh
```
On Intel Xeon with Qdrant
```bash
bash test_compose_qdrant_on_xeon.sh
```
On Intel Xeon without Rerank:
```bash
bash test_compose_without_rerank_on_xeon.sh
```
On Intel Gaudi with TGI:
```bash
bash test_compose_tgi_on_gaudi.sh
```
On Intel Gaudi with vLLM:
```bash
bash test_compose_on_gaudi.sh
```
On Intel Gaudi with Guardrails:
```bash
bash test_compose_guardrails_on_gaudi.sh
```
On Intel Gaudi without Rerank:
```bash
bash test_compose_without_rerank_on_gaudi.sh
```
On AMD ROCm with TGI:
```bash
bash test_compose_on_rocm.sh
```
On AMD ROCm with vLLM:
```bash
bash test_compose_vllm_on_rocm.sh
```
Test FAQ Generation On Intel Xeon with TGI:
```bash
bash test_compose_faqgen_tgi_on_xeon.sh
```
Test FAQ Generation On Intel Xeon with vLLM:
```bash
bash test_compose_faqgen_on_xeon.sh
```
Test FAQ Generation On Intel Gaudi with TGI:
```bash
bash test_compose_faqgen_tgi_on_gaudi.sh
```
Test FAQ Generation On Intel Gaudi with vLLM:
```bash
bash test_compose_faqgen_on_gaudi.sh
```
Test FAQ Generation On AMD ROCm with TGI:
```bash
bash test_compose_faqgen_on_rocm.sh
```
Test FAQ Generation On AMD ROCm with vLLM:
```bash
bash test_compose_faqgen_vllm_on_rocm.sh
```

View File

@@ -36,27 +36,7 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/hpu/gaudi
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export NUM_CARDS=1
export INDEX_NAME="rag-redis"
export host_ip=${ip_address}
export LLM_ENDPOINT_PORT=8010
export LLM_SERVER_PORT=9001
export CHATQNA_BACKEND_PORT=8888
export CHATQNA_REDIS_VECTOR_PORT=6377
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006
export CHATQNA_FRONTEND_SERVICE_PORT=5175
export NGINX_PORT=80
export FAQGen_COMPONENT_NAME="OpeaFaqGenvLLM"
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
export HF_TOKEN=${HF_TOKEN}
export VLLM_SKIP_WARMUP=true
export LOGFLAG=True
export http_proxy=${http_proxy}
export https_proxy=${https_proxy}
export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-gaudi-backend-server,chatqna-gaudi-ui-server,chatqna-gaudi-nginx-server"
source set_env_faqgen.sh
# Start Docker Containers
docker compose -f compose_faqgen.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -15,44 +15,7 @@ WORKPATH=$(dirname "$PWD")
LOG_PATH="$WORKPATH/tests"
ip_address=$(hostname -I | awk '{print $1}')
export HOST_IP=${ip_address}
export HOST_IP_EXTERNAL=${ip_address}
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
export CHATQNA_BACKEND_SERVICE_PORT=8888
export CHATQNA_FRONTEND_SERVICE_PORT=5173
export CHATQNA_LLM_FAQGEN_PORT=18011
export CHATQNA_NGINX_PORT=80
export CHATQNA_REDIS_DATAPREP_PORT=18103
export CHATQNA_REDIS_RETRIEVER_PORT=7000
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001
export CHATQNA_REDIS_VECTOR_PORT=6379
export CHATQNA_TEI_EMBEDDING_PORT=18090
export CHATQNA_TEI_RERANKING_PORT=18808
export CHATQNA_TGI_SERVICE_PORT=18008
export CHATQNA_BACKEND_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_BACKEND_SERVICE_PORT}/v1/chatqna"
export CHATQNA_BACKEND_SERVICE_IP=${HOST_IP}
export CHATQNA_DATAPREP_DELETE_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/delete"
export CHATQNA_DATAPREP_GET_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/get"
export CHATQNA_DATAPREP_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/ingest"
export CHATQNA_EMBEDDING_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_FRONTEND_SERVICE_IP=${HOST_IP}
export CHATQNA_LLM_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_LLM_ENDPOINT="http://${HOST_IP}:${CHATQNA_TGI_SERVICE_PORT}"
export CHATQNA_MEGA_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_REDIS_URL="redis://${HOST_IP}:${CHATQNA_REDIS_VECTOR_PORT}"
export CHATQNA_RERANK_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_RETRIEVER_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_TEI_EMBEDDING_ENDPOINT="http://${HOST_IP}:${CHATQNA_TEI_EMBEDDING_PORT}"
export CHATQNA_BACKEND_SERVICE_NAME=chatqna
export CHATQNA_INDEX_NAME="rag-redis"
export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi"
source $WORKPATH/docker_compose/amd/gpu/rocm/set_env_faqgen.sh
export PATH="~/miniconda3/bin:$PATH"

View File

@@ -37,26 +37,16 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export INDEX_NAME="rag-redis"
export host_ip=${ip_address}
export LLM_ENDPOINT_PORT=8010
export LLM_SERVER_PORT=9001
export CHATQNA_BACKEND_PORT=8888
export CHATQNA_REDIS_VECTOR_PORT=6377
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006
export CHATQNA_FRONTEND_SERVICE_PORT=5175
export NGINX_PORT=80
export FAQGen_COMPONENT_NAME="OpeaFaqGenvLLM"
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
export HF_TOKEN=${HF_TOKEN}
export VLLM_SKIP_WARMUP=true
export LOGFLAG=True
export http_proxy=${http_proxy}
export https_proxy=${https_proxy}
export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-xeon-backend-server,chatqna-xeon-ui-server,chatqna-xeon-nginx-server"
source set_env.sh
# Start Docker Containers
docker compose -f compose_faqgen.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -33,25 +33,8 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/hpu/gaudi
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export INDEX_NAME="rag-redis"
export host_ip=${ip_address}
export LLM_ENDPOINT_PORT=8010
export LLM_SERVER_PORT=9001
export CHATQNA_BACKEND_PORT=8888
export CHATQNA_REDIS_VECTOR_PORT=6377
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006
export CHATQNA_FRONTEND_SERVICE_PORT=5175
export NGINX_PORT=80
export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi"
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
export HF_TOKEN=${HF_TOKEN}
export LOGFLAG=True
export http_proxy=${http_proxy}
export https_proxy=${https_proxy}
export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-gaudi-backend-server,chatqna-gaudi-ui-server,chatqna-gaudi-nginx-server"
source set_env_faqgen.sh
# Start Docker Containers
docker compose -f compose_faqgen_tgi.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -37,25 +37,16 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export INDEX_NAME="rag-redis"
export host_ip=${ip_address}
export LLM_ENDPOINT_PORT=8010
export LLM_SERVER_PORT=9001
export CHATQNA_BACKEND_PORT=8888
export CHATQNA_REDIS_VECTOR_PORT=6377
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006
export CHATQNA_FRONTEND_SERVICE_PORT=5175
export NGINX_PORT=80
export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi"
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
export HF_TOKEN=${HF_TOKEN}
export LOGFLAG=True
export http_proxy=${http_proxy}
export https_proxy=${https_proxy}
export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-xeon-backend-server,chatqna-xeon-ui-server,chatqna-xeon-nginx-server"
source set_env.sh
# Start Docker Containers
docker compose -f compose_faqgen_tgi.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -14,41 +14,7 @@ WORKPATH=$(dirname "$PWD")
LOG_PATH="$WORKPATH/tests"
ip_address=$(hostname -I | awk '{print $1}')
export HOST_IP=${ip_address}
export HOST_IP_EXTERNAL=${ip_address}
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
export CHATQNA_BACKEND_SERVICE_PORT=8888
export CHATQNA_FRONTEND_SERVICE_PORT=5173
export CHATQNA_LLM_FAQGEN_PORT=18011
export CHATQNA_NGINX_PORT=80
export CHATQNA_REDIS_DATAPREP_PORT=18103
export CHATQNA_REDIS_RETRIEVER_PORT=7000
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001
export CHATQNA_REDIS_VECTOR_PORT=6379
export CHATQNA_TEI_EMBEDDING_PORT=18090
export CHATQNA_TEI_RERANKING_PORT=18808
export CHATQNA_VLLM_SERVICE_PORT=18008
export CHATQNA_BACKEND_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_BACKEND_SERVICE_PORT}/v1/chatqna"
export CHATQNA_BACKEND_SERVICE_IP=${HOST_IP_EXTERNAL}
export CHATQNA_DATAPREP_DELETE_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/delete"
export CHATQNA_DATAPREP_GET_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/get"
export CHATQNA_DATAPREP_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/ingest"
export CHATQNA_FRONTEND_SERVICE_IP=${HOST_IP}
export CHATQNA_MEGA_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_REDIS_URL="redis://${HOST_IP}:${CHATQNA_REDIS_VECTOR_PORT}"
export CHATQNA_TEI_EMBEDDING_ENDPOINT="http://${HOST_IP}:${CHATQNA_TEI_EMBEDDING_PORT}"
export LLM_ENDPOINT="http://${HOST_IP}:${CHATQNA_VLLM_SERVICE_PORT}"
export CHATQNA_BACKEND_SERVICE_NAME=chatqna
export CHATQNA_INDEX_NAME="rag-redis"
export CHATQNA_TYPE="CHATQNA_FAQGEN"
export FAQGen_COMPONENT_NAME="OpeaFaqGenvLLM"
source $WORKPATH/docker_compose/amd/gpu/rocm/set_env_faqgen_vllm.sh
function build_docker_images() {
opea_branch=${opea_branch:-"main"}

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -36,14 +36,8 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/hpu/gaudi
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export NUM_CARDS=1
export INDEX_NAME="rag-redis"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export host_ip=${ip_address}
export GURADRAILS_MODEL_ID="meta-llama/Meta-Llama-Guard-2-8B"
source set_env_faqgen.sh
# Start Docker Containers
docker compose -f compose_guardrails.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2025 MariaDB Foundation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -39,14 +39,8 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon
export MARIADB_DATABASE="vectordb"
export MARIADB_USER="chatqna"
export MARIADB_PASSWORD="test"
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export host_ip=${ip_address}
source set_env_mariadb.sh
# Start Docker Containers
docker compose -f compose_mariadb.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
@@ -140,7 +134,7 @@ function validate_megaservice() {
function stop_docker() {
cd $WORKPATH/docker_compose/intel/cpu/xeon
docker compose down
docker compose -f compose_mariadb.yaml down
}
function main() {

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -39,11 +39,8 @@ function build_docker_images() {
}
function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon/
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export LOGFLAG=true
source set_env.sh
# Start Docker Containers
docker compose -f compose_milvus.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -36,16 +36,10 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/hpu/gaudi
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export NUM_CARDS=1
export INDEX_NAME="rag-redis"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export NON_INTERACTIVE=true
export host_ip=${ip_address}
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
export telemetry=yes
source set_env.sh
# Start Docker Containers
docker compose -f compose.yaml -f compose.telemetry.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -15,41 +15,7 @@ WORKPATH=$(dirname "$PWD")
LOG_PATH="$WORKPATH/tests"
ip_address=$(hostname -I | awk '{print $1}')
export HOST_IP=${ip_address}
export HOST_IP_EXTERNAL=${ip_address}
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
export CHATQNA_BACKEND_SERVICE_PORT=8888
export CHATQNA_FRONTEND_SERVICE_PORT=5173
export CHATQNA_NGINX_PORT=80
export CHATQNA_REDIS_DATAPREP_PORT=18103
export CHATQNA_REDIS_RETRIEVER_PORT=7000
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001
export CHATQNA_REDIS_VECTOR_PORT=6379
export CHATQNA_TEI_EMBEDDING_PORT=18090
export CHATQNA_TEI_RERANKING_PORT=18808
export CHATQNA_TGI_SERVICE_PORT=18008
export CHATQNA_BACKEND_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_BACKEND_SERVICE_PORT}/v1/chatqna"
export CHATQNA_BACKEND_SERVICE_IP=${HOST_IP}
export CHATQNA_DATAPREP_DELETE_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/delete"
export CHATQNA_DATAPREP_GET_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/get"
export CHATQNA_DATAPREP_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/ingest"
export CHATQNA_EMBEDDING_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_FRONTEND_SERVICE_IP=${HOST_IP}
export CHATQNA_LLM_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_MEGA_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_REDIS_URL="redis://${HOST_IP}:${CHATQNA_REDIS_VECTOR_PORT}"
export CHATQNA_RERANK_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_RETRIEVER_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_TEI_EMBEDDING_ENDPOINT="http://${HOST_IP}:${CHATQNA_TEI_EMBEDDING_PORT}"
export CHATQNA_BACKEND_SERVICE_NAME=chatqna
export CHATQNA_INDEX_NAME="rag-redis"
source $WORKPATH/docker_compose/amd/gpu/rocm/set_env.sh
export PATH="~/miniconda3/bin:$PATH"

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -40,15 +40,7 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export INDEX_NAME="rag-redis"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export host_ip=${ip_address}
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
source set_env.sh
# Start Docker Containers
docker compose -f compose.yaml -f compose.telemetry.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -41,14 +41,11 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon/
export no_proxy=${no_proxy},${ip_address}
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export PINECONE_API_KEY=${PINECONE_KEY_LANGCHAIN_TEST}
export PINECONE_INDEX_NAME="langchain-test"
export INDEX_NAME="langchain-test"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export LOGFLAG=true
source set_env.sh
# Start Docker Containers
docker compose -f compose_pinecone.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -40,11 +40,8 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export INDEX_NAME="rag-qdrant"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
source set_env.sh
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -32,15 +32,10 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/hpu/gaudi
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export NUM_CARDS=1
export INDEX_NAME="rag-redis"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
export NON_INTERACTIVE=true
export host_ip=${ip_address}
export telemetry=yes
source set_env.sh
# Start Docker Containers
docker compose -f compose_tgi.yaml -f compose_tgi.telemetry.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -33,14 +33,7 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export INDEX_NAME="rag-redis"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
source set_env.sh
# Start Docker Containers
docker compose -f compose_tgi.yaml -f compose_tgi.telemetry.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -14,42 +14,7 @@ WORKPATH=$(dirname "$PWD")
LOG_PATH="$WORKPATH/tests"
ip_address=$(hostname -I | awk '{print $1}')
export HOST_IP=${ip_address}
export HOST_IP_EXTERNAL=${ip_address}
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
export CHATQNA_BACKEND_SERVICE_PORT=8888
export CHATQNA_FRONTEND_SERVICE_PORT=5173
export CHATQNA_NGINX_PORT=80
export CHATQNA_REDIS_DATAPREP_PORT=18103
export CHATQNA_REDIS_RETRIEVER_PORT=7000
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001
export CHATQNA_REDIS_VECTOR_PORT=6379
export CHATQNA_TEI_EMBEDDING_PORT=18090
export CHATQNA_TEI_RERANKING_PORT=18808
export CHATQNA_VLLM_SERVICE_PORT=18008
export CHATQNA_BACKEND_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_BACKEND_SERVICE_PORT}/v1/chatqna"
export CHATQNA_BACKEND_SERVICE_IP=${HOST_IP_EXTERNAL}
export CHATQNA_DATAPREP_DELETE_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/delete"
export CHATQNA_DATAPREP_GET_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/get"
export CHATQNA_DATAPREP_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/ingest"
export CHATQNA_EMBEDDING_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_FRONTEND_SERVICE_IP=${HOST_IP}
export CHATQNA_LLM_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_MEGA_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_REDIS_URL="redis://${HOST_IP}:${CHATQNA_REDIS_VECTOR_PORT}"
export CHATQNA_RERANK_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_RETRIEVER_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_TEI_EMBEDDING_ENDPOINT="http://${HOST_IP}:${CHATQNA_TEI_EMBEDDING_PORT}"
export CHATQNA_BACKEND_SERVICE_NAME=chatqna
export CHATQNA_INDEX_NAME="rag-redis"
source $WORKPATH/docker_compose/amd/gpu/rocm/set_env_vllm.sh
function build_docker_images() {
opea_branch=${opea_branch:-"main"}

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -36,11 +36,8 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/hpu/gaudi
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export NUM_CARDS=1
export INDEX_NAME="rag-redis"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export NON_INTERACTIVE=true
source set_env.sh
# Start Docker Containers
docker compose -f compose_without_rerank.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -41,10 +41,7 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export INDEX_NAME="rag-redis"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
source set_env.sh
# Start Docker Containers
docker compose -f compose_without_rerank.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -1,8 +1,9 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
ARG IMAGE_REPO=opea
ARG BASE_TAG=latest
FROM opea/comps-base:$BASE_TAG
FROM $IMAGE_REPO/comps-base:$BASE_TAG
COPY ./codegen.py $HOME/codegen.py

View File

@@ -5,8 +5,8 @@
# SPDX-License-Identifier: Apache-2.0
### The IP address or domain name of the server on which the application is running
export HOST_IP=''
export EXTERNAL_HOST_IP=''
export HOST_IP=${ip_address}
export EXTERNAL_HOST_IP=${ip_address}
### The port of the TGI service. On this port, the TGI service will accept connections
export CODEGEN_TGI_SERVICE_PORT=8028
@@ -27,7 +27,7 @@ export CODEGEN_TGI_LLM_ENDPOINT="http://${HOST_IP}:${CODEGEN_TGI_SERVICE_PORT}"
export CODEGEN_MEGA_SERVICE_HOST_IP=${HOST_IP}
### The port for CodeGen backend service
export CODEGEN_BACKEND_SERVICE_PORT=18150
export CODEGEN_BACKEND_SERVICE_PORT=7778
### The URL of CodeGen backend service, used by the frontend service
export CODEGEN_BACKEND_SERVICE_URL="http://${EXTERNAL_HOST_IP}:${CODEGEN_BACKEND_SERVICE_PORT}/v1/codegen"
@@ -36,4 +36,4 @@ export CODEGEN_BACKEND_SERVICE_URL="http://${EXTERNAL_HOST_IP}:${CODEGEN_BACKEND
export CODEGEN_LLM_SERVICE_HOST_IP=${HOST_IP}
### The CodeGen service UI port
export CODEGEN_UI_SERVICE_PORT=18151
export CODEGEN_UI_SERVICE_PORT=5173

View File

@@ -5,8 +5,8 @@
# SPDX-License-Identifier: Apache-2.0
### The IP address or domain name of the server on which the application is running
export HOST_IP=''
export EXTERNAL_HOST_IP=''
export HOST_IP=${ip_address}
export EXTERNAL_HOST_IP=${ip_address}
### The port of the vLLM service. On this port, the TGI service will accept connections
export CODEGEN_VLLM_SERVICE_PORT=8028
@@ -25,7 +25,7 @@ export CODEGEN_LLM_SERVICE_PORT=9000
export CODEGEN_MEGA_SERVICE_HOST_IP=${HOST_IP}
### The port for CodeGen backend service
export CODEGEN_BACKEND_SERVICE_PORT=18150
export CODEGEN_BACKEND_SERVICE_PORT=7778
### The URL of CodeGen backend service, used by the frontend service
export CODEGEN_BACKEND_SERVICE_URL="http://${EXTERNAL_HOST_IP}:${CODEGEN_BACKEND_SERVICE_PORT}/v1/codegen"
@@ -34,4 +34,4 @@ export CODEGEN_BACKEND_SERVICE_URL="http://${EXTERNAL_HOST_IP}:${CODEGEN_BACKEND
export CODEGEN_LLM_SERVICE_HOST_IP=${HOST_IP}
### The CodeGen service UI port
export CODEGEN_UI_SERVICE_PORT=18151
export CODEGEN_UI_SERVICE_PORT=5173

View File

@@ -6,22 +6,10 @@ This README provides instructions for deploying the CodeGen application using Do
- [Overview](#overview)
- [Prerequisites](#prerequisites)
- [Quick Start](#quick-start)
- [Available Deployment Options](#available-deployment-options)
- [Default: vLLM-based Deployment (`--profile codegen-xeon-vllm`)](#default-vllm-based-deployment---profile-codegen-xeon-vllm)
- [TGI-based Deployment (`--profile codegen-xeon-tgi`)](#tgi-based-deployment---profile-codegen-xeon-tgi)
- [Configuration Parameters](#configuration-parameters)
- [Environment Variables](#environment-variables)
- [Compose Profiles](#compose-profiles)
- [Quick Start Deployment](#quick-start-deployment)
- [Building Custom Images (Optional)](#building-custom-images-optional)
- [Validate Services](#validate-services)
- [Check Container Status](#check-container-status)
- [Run Validation Script/Commands](#run-validation-scriptcommands)
- [Accessing the User Interface (UI)](#accessing-the-user-interface-ui)
- [Gradio UI (Default)](#gradio-ui-default)
- [Svelte UI (Optional)](#svelte-ui-optional)
- [React UI (Optional)](#react-ui-optional)
- [VS Code Extension (Optional)](#vs-code-extension-optional)
- [Troubleshooting](#troubleshooting)
- [Stopping the Application](#stopping-the-application)
- [Next Steps](#next-steps)
@@ -43,27 +31,37 @@ This guide focuses on running the pre-configured CodeGen service using Docker Co
cd GenAIExamples/CodeGen/docker_compose/intel/cpu/xeon
```
## Quick Start
## Quick Start Deployment
This uses the default vLLM-based deployment profile (`codegen-xeon-vllm`).
1. **Configure Environment:**
Set required environment variables in your shell:
```bash
# Replace with your host's external IP address (do not use localhost or 127.0.0.1)
export HOST_IP="your_external_ip_address"
# Replace with your Hugging Face Hub API token
export HUGGINGFACEHUB_API_TOKEN="your_huggingface_token"
```bash
# Replace with your host's external IP address (do not use localhost or 127.0.0.1)
export HOST_IP="your_external_ip_address"
# Replace with your Hugging Face Hub API token
export HUGGINGFACEHUB_API_TOKEN="your_huggingface_token"
# Optional: Configure proxy if needed
# export http_proxy="your_http_proxy"
# export https_proxy="your_https_proxy"
# export no_proxy="localhost,127.0.0.1,${HOST_IP}" # Add other hosts if necessary
source ../../../set_env.sh
```
# Optional: Configure proxy if needed
# export http_proxy="your_http_proxy"
# export https_proxy="your_https_proxy"
# export no_proxy="localhost,127.0.0.1,${HOST_IP}" # Add other hosts if necessary
source ../../set_env.sh
```
_Note: The compose file might read additional variables from a `.env` file or expect them defined elsewhere. Ensure all required variables like ports (`LLM_SERVICE_PORT`, `MEGA_SERVICE_PORT`, etc.) are set if not using defaults from the compose file._
_Note: The compose file might read additional variables from set_env.sh. Ensure all required variables like ports (`LLM_SERVICE_PORT`, `MEGA_SERVICE_PORT`, etc.) are set if not using defaults from the compose file._
For instance, edit the set_env.sh to change the LLM model
```
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
```
can be changed to other model if needed
```
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-32B-Instruct"
```
2. **Start Services (vLLM Profile):**
@@ -74,17 +72,17 @@ This uses the default vLLM-based deployment profile (`codegen-xeon-vllm`).
3. **Validate:**
Wait several minutes for models to download (especially the first time) and services to initialize. Check container logs (`docker compose logs -f <service_name>`) or proceed to the validation steps below.
## Available Deployment Options
### Available Deployment Options
The `compose.yaml` file uses Docker Compose profiles to select the LLM serving backend.
### Default: vLLM-based Deployment (`--profile codegen-xeon-vllm`)
#### Default: vLLM-based Deployment (`--profile codegen-xeon-vllm`)
- **Profile:** `codegen-xeon-vllm`
- **Description:** Uses vLLM optimized for Intel CPUs as the LLM serving engine. This is the default profile used in the Quick Start.
- **Services Deployed:** `codegen-vllm-server`, `codegen-llm-server`, `codegen-tei-embedding-server`, `codegen-retriever-server`, `redis-vector-db`, `codegen-dataprep-server`, `codegen-backend-server`, `codegen-gradio-ui-server`.
### TGI-based Deployment (`--profile codegen-xeon-tgi`)
#### TGI-based Deployment (`--profile codegen-xeon-tgi`)
- **Profile:** `codegen-xeon-tgi`
- **Description:** Uses Hugging Face Text Generation Inference (TGI) optimized for Intel CPUs as the LLM serving engine.
@@ -95,24 +93,24 @@ The `compose.yaml` file uses Docker Compose profiles to select the LLM serving b
docker compose --profile codegen-xeon-tgi up -d
```
## Configuration Parameters
### Configuration Parameters
### Environment Variables
#### Environment Variables
Key parameters are configured via environment variables set before running `docker compose up`.
| Environment Variable | Description | Default (Set Externally) |
| :-------------------------------------- | :------------------------------------------------------------------------------------------------------------------ | :----------------------------------------------------------------------------------------------- |
| `HOST_IP` | External IP address of the host machine. **Required.** | `your_external_ip_address` |
| `HUGGINGFACEHUB_API_TOKEN` | Your Hugging Face Hub token for model access. **Required.** | `your_huggingface_token` |
| `LLM_MODEL_ID` | Hugging Face model ID for the CodeGen LLM (used by TGI/vLLM service). Configured within `compose.yaml` environment. | `Qwen/Qwen2.5-Coder-7B-Instruct` |
| `EMBEDDING_MODEL_ID` | Hugging Face model ID for the embedding model (used by TEI service). Configured within `compose.yaml` environment. | `BAAI/bge-base-en-v1.5` |
| `LLM_ENDPOINT` | Internal URL for the LLM serving endpoint (used by `codegen-llm-server`). Configured in `compose.yaml`. | `http://codegen-tgi-server:80/generate` or `http://codegen-vllm-server:8000/v1/chat/completions` |
| `TEI_EMBEDDING_ENDPOINT` | Internal URL for the Embedding service. Configured in `compose.yaml`. | `http://codegen-tei-embedding-server:80/embed` |
| `DATAPREP_ENDPOINT` | Internal URL for the Data Preparation service. Configured in `compose.yaml`. | `http://codegen-dataprep-server:80/dataprep` |
| `BACKEND_SERVICE_ENDPOINT` | External URL for the CodeGen Gateway (MegaService). Derived from `HOST_IP` and port `7778`. | `http://${HOST_IP}:7778/v1/codegen` |
| `*_PORT` (Internal) | Internal container ports (e.g., `80`, `6379`). Defined in `compose.yaml`. | N/A |
| `http_proxy` / `https_proxy`/`no_proxy` | Network proxy settings (if required). | `""` |
| Environment Variable | Description | Default (Set Externally) |
| :-------------------------------------- | :------------------------------------------------------------------------------------------------------------------ | :--------------------------------------------- | ------------------------------------ |
| `HOST_IP` | External IP address of the host machine. **Required.** | `your_external_ip_address` |
| `HUGGINGFACEHUB_API_TOKEN` | Your Hugging Face Hub token for model access. **Required.** | `your_huggingface_token` |
| `LLM_MODEL_ID` | Hugging Face model ID for the CodeGen LLM (used by TGI/vLLM service). Configured within `compose.yaml` environment. | `Qwen/Qwen2.5-Coder-7B-Instruct` |
| `EMBEDDING_MODEL_ID` | Hugging Face model ID for the embedding model (used by TEI service). Configured within `compose.yaml` environment. | `BAAI/bge-base-en-v1.5` |
| `LLM_ENDPOINT` | Internal URL for the LLM serving endpoint (used by `codegen-llm-server`). Configured in `compose.yaml`. | `http://codegen-vllm | tgi-server:9000/v1/chat/completions` |
| `TEI_EMBEDDING_ENDPOINT` | Internal URL for the Embedding service. Configured in `compose.yaml`. | `http://codegen-tei-embedding-server:80/embed` |
| `DATAPREP_ENDPOINT` | Internal URL for the Data Preparation service. Configured in `compose.yaml`. | `http://codegen-dataprep-server:80/dataprep` |
| `BACKEND_SERVICE_ENDPOINT` | External URL for the CodeGen Gateway (MegaService). Derived from `HOST_IP` and port `7778`. | `http://${HOST_IP}:7778/v1/codegen` |
| `*_PORT` (Internal) | Internal container ports (e.g., `80`, `6379`). Defined in `compose.yaml`. | N/A |
| `http_proxy` / `https_proxy`/`no_proxy` | Network proxy settings (if required). | `""` |
Most of these parameters are in `set_env.sh`, you can either modify this file or overwrite the env variables by setting them.
@@ -120,7 +118,7 @@ Most of these parameters are in `set_env.sh`, you can either modify this file or
source CodeGen/docker_compose/set_env.sh
```
### Compose Profiles
#### Compose Profiles
Docker Compose profiles (`codegen-xeon-vllm`, `codegen-xeon-tgi`) control which LLM serving backend (vLLM or TGI) and its associated dependencies are started. Only one profile should typically be active.
@@ -152,11 +150,11 @@ Check logs for specific services: `docker compose logs <service_name>`
Use `curl` commands to test the main service endpoints. Ensure `HOST_IP` is correctly set in your environment.
1. **Validate LLM Serving Endpoint (Example for vLLM on default port 8000 internally, exposed differently):**
1. **Validate LLM Serving Endpoint (Example for vLLM on default port 9000 internally, exposed differently):**
```bash
# This command structure targets the OpenAI-compatible vLLM endpoint
curl http://${HOST_IP}:8000/v1/chat/completions \
curl http://${HOST_IP}:9000/v1/chat/completions \
-X POST \
-H 'Content-Type: application/json' \
-d '{"model": "Qwen/Qwen2.5-Coder-7B-Instruct", "messages": [{"role": "user", "content": "Implement a basic Python class"}], "max_tokens":32}'
@@ -179,8 +177,8 @@ Multiple UI options can be configured via the `compose.yaml`.
### Gradio UI (Default)
Access the default Gradio UI by navigating to:
`http://{HOST_IP}:8080`
_(Port `8080` is the default host mapping for `codegen-gradio-ui-server`)_
`http://{HOST_IP}:5173`
_(Port `5173` is the default host mapping for `codegen-gradio-ui-server`)_
![Gradio UI - Code Generation](../../../../assets/img/codegen_gradio_ui_main.png)
![Gradio UI - Resource Management](../../../../assets/img/codegen_gradio_ui_dataprep.png)

View File

@@ -6,23 +6,10 @@ This README provides instructions for deploying the CodeGen application using Do
- [Overview](#overview)
- [Prerequisites](#prerequisites)
- [Quick Start](#quick-start)
- [Available Deployment Options](#available-deployment-options)
- [Default: vLLM-based Deployment (`--profile codegen-gaudi-vllm`)](#default-vllm-based-deployment---profile-codegen-gaudi-vllm)
- [TGI-based Deployment (`--profile codegen-gaudi-tgi`)](#tgi-based-deployment---profile-codegen-gaudi-tgi)
- [Configuration Parameters](#configuration-parameters)
- [Environment Variables](#environment-variables)
- [Compose Profiles](#compose-profiles)
- [Docker Compose Gaudi Configuration](#docker-compose-gaudi-configuration)
- [Quick Start Deployment](#quick-start-deployment)
- [Building Custom Images (Optional)](#building-custom-images-optional)
- [Validate Services](#validate-services)
- [Check Container Status](#check-container-status)
- [Run Validation Script/Commands](#run-validation-scriptcommands)
- [Accessing the User Interface (UI)](#accessing-the-user-interface-ui)
- [Gradio UI (Default)](#gradio-ui-default)
- [Svelte UI (Optional)](#svelte-ui-optional)
- [React UI (Optional)](#react-ui-optional)
- [VS Code Extension (Optional)](#vs-code-extension-optional)
- [Troubleshooting](#troubleshooting)
- [Stopping the Application](#stopping-the-application)
- [Next Steps](#next-steps)
@@ -44,7 +31,7 @@ This guide focuses on running the pre-configured CodeGen service using Docker Co
cd GenAIExamples/CodeGen/docker_compose/intel/hpu/gaudi
```
## Quick Start
## Quick Start Deployment
This uses the default vLLM-based deployment profile (`codegen-gaudi-vllm`).
@@ -61,10 +48,21 @@ This uses the default vLLM-based deployment profile (`codegen-gaudi-vllm`).
# export http_proxy="your_http_proxy"
# export https_proxy="your_https_proxy"
# export no_proxy="localhost,127.0.0.1,${HOST_IP}" # Add other hosts if necessary
source ../../../set_env.sh
source ../../set_env.sh
```
_Note: Ensure all required variables like ports (`LLM_SERVICE_PORT`, `MEGA_SERVICE_PORT`, etc.) are set if not using defaults from the compose file._
_Note: The compose file might read additional variables from set_env.sh. Ensure all required variables like ports (`LLM_SERVICE_PORT`, `MEGA_SERVICE_PORT`, etc.) are set if not using defaults from the compose file._
For instance, edit the set_env.sh to change the LLM model
```
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
```
can be changed to other model if needed
```
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-32B-Instruct"
```
2. **Start Services (vLLM Profile):**
@@ -104,18 +102,18 @@ The `compose.yaml` file uses Docker Compose profiles to select the LLM serving b
Key parameters are configured via environment variables set before running `docker compose up`.
| Environment Variable | Description | Default (Set Externally) |
| :-------------------------------------- | :------------------------------------------------------------------------------------------------------------------ | :----------------------------------------------------------------------------------------------- |
| `HOST_IP` | External IP address of the host machine. **Required.** | `your_external_ip_address` |
| `HUGGINGFACEHUB_API_TOKEN` | Your Hugging Face Hub token for model access. **Required.** | `your_huggingface_token` |
| `LLM_MODEL_ID` | Hugging Face model ID for the CodeGen LLM (used by TGI/vLLM service). Configured within `compose.yaml` environment. | `Qwen/Qwen2.5-Coder-7B-Instruct` |
| `EMBEDDING_MODEL_ID` | Hugging Face model ID for the embedding model (used by TEI service). Configured within `compose.yaml` environment. | `BAAI/bge-base-en-v1.5` |
| `LLM_ENDPOINT` | Internal URL for the LLM serving endpoint (used by `codegen-llm-server`). Configured in `compose.yaml`. | `http://codegen-tgi-server:80/generate` or `http://codegen-vllm-server:8000/v1/chat/completions` |
| `TEI_EMBEDDING_ENDPOINT` | Internal URL for the Embedding service. Configured in `compose.yaml`. | `http://codegen-tei-embedding-server:80/embed` |
| `DATAPREP_ENDPOINT` | Internal URL for the Data Preparation service. Configured in `compose.yaml`. | `http://codegen-dataprep-server:80/dataprep` |
| `BACKEND_SERVICE_ENDPOINT` | External URL for the CodeGen Gateway (MegaService). Derived from `HOST_IP` and port `7778`. | `http://${HOST_IP}:7778/v1/codegen` |
| `*_PORT` (Internal) | Internal container ports (e.g., `80`, `6379`). Defined in `compose.yaml`. | N/A |
| `http_proxy` / `https_proxy`/`no_proxy` | Network proxy settings (if required). | `""` |
| Environment Variable | Description | Default (Set Externally) |
| :-------------------------------------- | :------------------------------------------------------------------------------------------------------------------ | :--------------------------------------------- | ------------------------------------ |
| `HOST_IP` | External IP address of the host machine. **Required.** | `your_external_ip_address` |
| `HUGGINGFACEHUB_API_TOKEN` | Your Hugging Face Hub token for model access. **Required.** | `your_huggingface_token` |
| `LLM_MODEL_ID` | Hugging Face model ID for the CodeGen LLM (used by TGI/vLLM service). Configured within `compose.yaml` environment. | `Qwen/Qwen2.5-Coder-7B-Instruct` |
| `EMBEDDING_MODEL_ID` | Hugging Face model ID for the embedding model (used by TEI service). Configured within `compose.yaml` environment. | `BAAI/bge-base-en-v1.5` |
| `LLM_ENDPOINT` | Internal URL for the LLM serving endpoint (used by `llm-codegen-vllm-server`). Configured in `compose.yaml`. | http://codegen-vllm | tgi-server:9000/v1/chat/completions` |
| `TEI_EMBEDDING_ENDPOINT` | Internal URL for the Embedding service. Configured in `compose.yaml`. | `http://codegen-tei-embedding-server:80/embed` |
| `DATAPREP_ENDPOINT` | Internal URL for the Data Preparation service. Configured in `compose.yaml`. | `http://codegen-dataprep-server:80/dataprep` |
| `BACKEND_SERVICE_ENDPOINT` | External URL for the CodeGen Gateway (MegaService). Derived from `HOST_IP` and port `7778`. | `http://${HOST_IP}:7778/v1/codegen` |
| `*_PORT` (Internal) | Internal container ports (e.g., `80`, `6379`). Defined in `compose.yaml`. | N/A |
| `http_proxy` / `https_proxy`/`no_proxy` | Network proxy settings (if required). | `""` |
Most of these parameters are in `set_env.sh`, you can either modify this file or overwrite the env variables by setting them.
@@ -172,11 +170,11 @@ Check logs: `docker compose logs <service_name>`. Pay attention to `vllm-gaudi-s
Use `curl` commands targeting the main service endpoints. Ensure `HOST_IP` is correctly set.
1. **Validate LLM Serving Endpoint (Example for vLLM on default port 8000 internally, exposed differently):**
1. **Validate LLM Serving Endpoint (Example for vLLM on default port 9000 internally, exposed differently):**
```bash
# This command structure targets the OpenAI-compatible vLLM endpoint
curl http://${HOST_IP}:8000/v1/chat/completions \
curl http://${HOST_IP}:9000/v1/chat/completions \
-X POST \
-H 'Content-Type: application/json' \
-d '{"model": "Qwen/Qwen2.5-Coder-7B-Instruct", "messages": [{"role": "user", "content": "Implement a basic Python class"}], "max_tokens":32}'
@@ -197,8 +195,8 @@ UI options are similar to the Xeon deployment.
### Gradio UI (Default)
Access the default Gradio UI:
`http://{HOST_IP}:8080`
_(Port `8080` is the default host mapping)_
`http://{HOST_IP}:5173`
_(Port `5173` is the default host mapping)_
![Gradio UI](../../../../assets/img/codegen_gradio_ui_main.png)

View File

@@ -0,0 +1,51 @@
#!/usr/bin/env bash
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
pushd "../../" > /dev/null
source .set_env.sh
popd > /dev/null
export HOST_IP=$(hostname -I | awk '{print $1}')
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
if [ -z "${HUGGINGFACEHUB_API_TOKEN}" ]; then
echo "Error: HUGGINGFACEHUB_API_TOKEN is not set. Please set HUGGINGFACEHUB_API_TOKEN"
fi
if [ -z "${HOST_IP}" ]; then
echo "Error: HOST_IP is not set. Please set HOST_IP first."
fi
export no_proxy=${no_proxy},${HOST_IP}
export http_proxy=${http_proxy}
export https_proxy=${https_proxy}
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
export LLM_SERVICE_PORT=9000
export LLM_ENDPOINT="http://${HOST_IP}:8028"
export LLM_SERVICE_HOST_IP=${HOST_IP}
export TGI_LLM_ENDPOINT="http://${HOST_IP}:8028"
export MEGA_SERVICE_PORT=7778
export MEGA_SERVICE_HOST_IP=${HOST_IP}
export BACKEND_SERVICE_ENDPOINT="http://${HOST_IP}:7778/v1/codegen"
export REDIS_DB_PORT=6379
export REDIS_INSIGHTS_PORT=8001
export REDIS_RETRIEVER_PORT=7000
export REDIS_URL="redis://${HOST_IP}:${REDIS_DB_PORT}"
export RETRIEVAL_SERVICE_HOST_IP=${HOST_IP}
export RETRIEVER_COMPONENT_NAME="OPEA_RETRIEVER_REDIS"
export INDEX_NAME="CodeGen"
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export EMBEDDER_PORT=6000
export TEI_EMBEDDER_PORT=8090
export TEI_EMBEDDING_HOST_IP=${HOST_IP}
export TEI_EMBEDDING_ENDPOINT="http://${HOST_IP}:${TEI_EMBEDDER_PORT}"
export DATAPREP_REDIS_PORT=6007
export DATAPREP_ENDPOINT="http://${HOST_IP}:${DATAPREP_REDIS_PORT}/v1/dataprep"
export LOGFLAG=false
export MODEL_CACHE=${model_cache:-"./data"}
export NUM_CARDS=1

View File

@@ -1,50 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
pushd "../../" > /dev/null
source .set_env.sh
popd > /dev/null
export host_ip=$(hostname -I | awk '{print $1}')
if [ -z "${HUGGINGFACEHUB_API_TOKEN}" ]; then
echo "Error: HUGGINGFACEHUB_API_TOKEN is not set. Please set HUGGINGFACEHUB_API_TOKEN"
fi
if [ -z "${host_ip}" ]; then
echo "Error: host_ip is not set. Please set host_ip first."
fi
export no_proxy=${no_proxy},${host_ip}
export http_proxy=${http_proxy}
export https_proxy=${https_proxy}
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-32B-Instruct"
export LLM_SERVICE_PORT=9000
export LLM_ENDPOINT="http://${host_ip}:8028"
export LLM_SERVICE_HOST_IP=${host_ip}
export TGI_LLM_ENDPOINT="http://${host_ip}:8028"
export MEGA_SERVICE_PORT=7778
export MEGA_SERVICE_HOST_IP=${host_ip}
export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:7778/v1/codegen"
export REDIS_DB_PORT=6379
export REDIS_INSIGHTS_PORT=8001
export REDIS_RETRIEVER_PORT=7000
export REDIS_URL="redis://${host_ip}:${REDIS_DB_PORT}"
export RETRIEVAL_SERVICE_HOST_IP=${host_ip}
export RETRIEVER_COMPONENT_NAME="OPEA_RETRIEVER_REDIS"
export INDEX_NAME="CodeGen"
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export EMBEDDER_PORT=6000
export TEI_EMBEDDER_PORT=8090
export TEI_EMBEDDING_HOST_IP=${host_ip}
export TEI_EMBEDDING_ENDPOINT="http://${host_ip}:${TEI_EMBEDDER_PORT}"
export DATAPREP_REDIS_PORT=6007
export DATAPREP_ENDPOINT="http://${host_ip}:${DATAPREP_REDIS_PORT}/v1/dataprep"
export LOGFLAG=false
export MODEL_CACHE="./data"
export NUM_CARDS=1

View File

@@ -5,6 +5,8 @@ services:
codegen:
build:
args:
IMAGE_REPO: ${REGISTRY}
BASE_TAG: ${TAG}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
no_proxy: ${no_proxy}
@@ -39,6 +41,7 @@ services:
build:
context: GenAIComps
dockerfile: comps/third_parties/vllm/src/Dockerfile.amd_gpu
extends: codegen
image: ${REGISTRY:-opea}/vllm-rocm:${TAG:-latest}
vllm:
build:

33
CodeGen/tests/README.md Normal file
View File

@@ -0,0 +1,33 @@
# CodeGen E2E test scripts
## Set the required environment variable
```bash
export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token"
```
## Run test
On Intel Xeon with TGI:
```bash
bash test_compose_on_xeon.sh
```
On Intel Gaudi with TGI:
```bash
bash test_compose_on_gaudi.sh
```
On AMD ROCm with TGI:
```bash
bash test_compose_on_rocm.sh
```
On AMD ROCm with vLLM:
```bash
bash test_compose_vllm_on_rocm.sh
```

View File

@@ -10,36 +10,20 @@ echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
export REGISTRY=${IMAGE_REPO}
export TAG=${IMAGE_TAG}
export MODEL_CACHE=${model_cache:-"./data"}
export REDIS_DB_PORT=6379
export REDIS_INSIGHTS_PORT=8001
export REDIS_RETRIEVER_PORT=7000
export EMBEDDER_PORT=6000
export TEI_EMBEDDER_PORT=8090
export DATAPREP_REDIS_PORT=6007
WORKPATH=$(dirname "$PWD")
LOG_PATH="$WORKPATH/tests"
ip_address=$(hostname -I | awk '{print $1}')
export http_proxy=${http_proxy}
export https_proxy=${https_proxy}
export no_proxy=${no_proxy},${ip_address}
source $WORKPATH/docker_compose/intel/set_env.sh
function build_docker_images() {
opea_branch=${opea_branch:-"main"}
# If the opea_branch isn't main, replace the git clone branch in Dockerfile.
if [[ "${opea_branch}" != "main" ]]; then
cd $WORKPATH
OLD_STRING="RUN git clone --depth 1 https://github.com/opea-project/GenAIComps.git"
NEW_STRING="RUN git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git"
find . -type f -name "Dockerfile*" | while read -r file; do
echo "Processing file: $file"
sed -i "s|$OLD_STRING|$NEW_STRING|g" "$file"
done
fi
cd $WORKPATH/docker_image_build
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
pushd GenAIComps
echo "GenAIComps test commit is $(git rev-parse HEAD)"
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s
# Download Gaudi vllm of latest tag
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
@@ -60,28 +44,6 @@ function start_services() {
cd $WORKPATH/docker_compose/intel/hpu/gaudi
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
export LLM_ENDPOINT="http://${ip_address}:8028"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export MEGA_SERVICE_PORT=7778
export MEGA_SERVICE_HOST_IP=${ip_address}
export LLM_SERVICE_HOST_IP=${ip_address}
export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:${MEGA_SERVICE_PORT}/v1/codegen"
export NUM_CARDS=1
export host_ip=${ip_address}
export REDIS_URL="redis://${host_ip}:${REDIS_DB_PORT}"
export RETRIEVAL_SERVICE_HOST_IP=${host_ip}
export RETRIEVER_COMPONENT_NAME="OPEA_RETRIEVER_REDIS"
export INDEX_NAME="CodeGen"
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export TEI_EMBEDDING_HOST_IP=${host_ip}
export TEI_EMBEDDING_ENDPOINT="http://${host_ip}:${TEI_EMBEDDER_PORT}"
export DATAPREP_ENDPOINT="http://${host_ip}:${DATAPREP_REDIS_PORT}/v1/dataprep"
export INDEX_NAME="CodeGen"
# Start Docker Containers
docker compose --profile ${compose_profile} up -d | tee ${LOG_PATH}/start_services_with_compose.log
@@ -250,24 +212,36 @@ function main() {
stop_docker "${docker_compose_profiles[${i}]}"
done
# build docker images
echo "::group::build_docker_images"
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
echo "::endgroup::"
# loop all profiles
for ((i = 0; i < len_profiles; i++)); do
echo "Process [${i}]: ${docker_compose_profiles[$i]}, ${docker_llm_container_names[${i}]}"
echo "::group::start_services"
start_services "${docker_compose_profiles[${i}]}" "${docker_llm_container_names[${i}]}"
echo "::endgroup::"
docker ps -a
echo "::group::validate_microservices"
validate_microservices "${docker_llm_container_names[${i}]}"
echo "::endgroup::"
echo "::group::validate_megaservice"
validate_megaservice
echo "::endgroup::"
echo "::group::validate_gradio"
validate_gradio
echo "::endgroup::"
stop_docker "${docker_compose_profiles[${i}]}"
sleep 5s
done
echo y | docker system prune
docker system prune -f
}
main

View File

@@ -17,19 +17,13 @@ ip_address=$(hostname -I | awk '{print $1}')
function build_docker_images() {
opea_branch=${opea_branch:-"main"}
# If the opea_branch isn't main, replace the git clone branch in Dockerfile.
if [[ "${opea_branch}" != "main" ]]; then
cd $WORKPATH
OLD_STRING="RUN git clone --depth 1 https://github.com/opea-project/GenAIComps.git"
NEW_STRING="RUN git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git"
find . -type f -name "Dockerfile*" | while read -r file; do
echo "Processing file: $file"
sed -i "s|$OLD_STRING|$NEW_STRING|g" "$file"
done
fi
cd $WORKPATH/docker_image_build
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
pushd GenAIComps
echo "GenAIComps test commit is $(git rev-parse HEAD)"
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="codegen codegen-ui llm-textgen"
@@ -41,18 +35,7 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/amd/gpu/rocm/
export CODEGEN_LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
export CODEGEN_TGI_SERVICE_PORT=8028
export CODEGEN_TGI_LLM_ENDPOINT="http://${ip_address}:${CODEGEN_TGI_SERVICE_PORT}"
export CODEGEN_LLM_SERVICE_PORT=9000
export CODEGEN_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export CODEGEN_MEGA_SERVICE_HOST_IP=${ip_address}
export CODEGEN_LLM_SERVICE_HOST_IP=${ip_address}
export CODEGEN_BACKEND_SERVICE_PORT=7778
export CODEGEN_BACKEND_SERVICE_URL="http://${ip_address}:${CODEGEN_BACKEND_SERVICE_PORT}/v1/codegen"
export CODEGEN_UI_SERVICE_PORT=5173
export HOST_IP=${ip_address}
source set_env.sh
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
@@ -164,18 +147,35 @@ function stop_docker() {
function main() {
echo "::group::stop_docker"
stop_docker
echo "::endgroup::"
echo "::group::build_docker_images"
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
echo "::endgroup::"
echo "::group::start_services"
start_services
echo "::endgroup::"
echo "::group::validate_microservices"
validate_microservices
validate_megaservice
validate_frontend
echo "::endgroup::"
echo "::group::validate_megaservice"
validate_megaservice
echo "::endgroup::"
echo "::group::validate_frontend"
validate_frontend
echo "::endgroup::"
echo "::group::stop_docker"
stop_docker
echo y | docker system prune
cd $WORKPATH
echo "::endgroup::"
docker system prune -f
}

View File

@@ -10,36 +10,21 @@ echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
export REGISTRY=${IMAGE_REPO}
export TAG=${IMAGE_TAG}
export MODEL_CACHE=${model_cache:-"./data"}
export REDIS_DB_PORT=6379
export REDIS_INSIGHTS_PORT=8001
export REDIS_RETRIEVER_PORT=7000
export EMBEDDER_PORT=6000
export TEI_EMBEDDER_PORT=8090
export DATAPREP_REDIS_PORT=6007
WORKPATH=$(dirname "$PWD")
LOG_PATH="$WORKPATH/tests"
ip_address=$(hostname -I | awk '{print $1}')
export http_proxy=${http_proxy}
export https_proxy=${https_proxy}
export no_proxy=${no_proxy},${ip_address}
source $WORKPATH/docker_compose/intel/set_env.sh
function build_docker_images() {
opea_branch=${opea_branch:-"main"}
# If the opea_branch isn't main, replace the git clone branch in Dockerfile.
if [[ "${opea_branch}" != "main" ]]; then
cd $WORKPATH
OLD_STRING="RUN git clone --depth 1 https://github.com/opea-project/GenAIComps.git"
NEW_STRING="RUN git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git"
find . -type f -name "Dockerfile*" | while read -r file; do
echo "Processing file: $file"
sed -i "s|$OLD_STRING|$NEW_STRING|g" "$file"
done
fi
cd $WORKPATH/docker_image_build
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
pushd GenAIComps
echo "GenAIComps test commit is $(git rev-parse HEAD)"
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s
git clone https://github.com/vllm-project/vllm.git && cd vllm
VLLM_VER="v0.8.3"
@@ -62,25 +47,6 @@ function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon/
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
export LLM_ENDPOINT="http://${ip_address}:8028"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export MEGA_SERVICE_PORT=7778
export MEGA_SERVICE_HOST_IP=${ip_address}
export LLM_SERVICE_HOST_IP=${ip_address}
export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:${MEGA_SERVICE_PORT}/v1/codegen"
export host_ip=${ip_address}
export REDIS_URL="redis://${host_ip}:${REDIS_DB_PORT}"
export RETRIEVAL_SERVICE_HOST_IP=${host_ip}
export RETRIEVER_COMPONENT_NAME="OPEA_RETRIEVER_REDIS"
export INDEX_NAME="CodeGen"
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export TEI_EMBEDDING_HOST_IP=${host_ip}
export TEI_EMBEDDING_ENDPOINT="http://${host_ip}:${TEI_EMBEDDER_PORT}"
export DATAPREP_ENDPOINT="http://${host_ip}:${DATAPREP_REDIS_PORT}/v1/dataprep"
# Start Docker Containers
docker compose --profile ${compose_profile} up -d > ${LOG_PATH}/start_services_with_compose.log
@@ -256,17 +222,28 @@ function main() {
for ((i = 0; i < len_profiles; i++)); do
echo "Process [${i}]: ${docker_compose_profiles[$i]}, ${docker_llm_container_names[${i}]}"
docker ps -a
start_services "${docker_compose_profiles[${i}]}" "${docker_llm_container_names[${i}]}"
echo "::group::start_services"
start_services "${docker_compose_profiles[${i}]}" "${docker_llm_container_names[${i}]}"
echo "::endgroup::"
echo "::group::validate_microservices"
validate_microservices "${docker_llm_container_names[${i}]}"
echo "::endgroup::"
echo "::group::validate_megaservice"
validate_megaservice
echo "::endgroup::"
echo "::group::validate_gradio"
validate_gradio
echo "::endgroup::"
stop_docker "${docker_compose_profiles[${i}]}"
sleep 5s
done
echo y | docker system prune
docker system prune -f
}
main

View File

@@ -17,19 +17,13 @@ ip_address=$(hostname -I | awk '{print $1}')
function build_docker_images() {
opea_branch=${opea_branch:-"main"}
# If the opea_branch isn't main, replace the git clone branch in Dockerfile.
if [[ "${opea_branch}" != "main" ]]; then
cd $WORKPATH
OLD_STRING="RUN git clone --depth 1 https://github.com/opea-project/GenAIComps.git"
NEW_STRING="RUN git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git"
find . -type f -name "Dockerfile*" | while read -r file; do
echo "Processing file: $file"
sed -i "s|$OLD_STRING|$NEW_STRING|g" "$file"
done
fi
cd $WORKPATH/docker_image_build
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
pushd GenAIComps
echo "GenAIComps test commit is $(git rev-parse HEAD)"
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="vllm-rocm llm-textgen codegen codegen-ui"
@@ -40,18 +34,7 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/amd/gpu/rocm/
export CODEGEN_LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
export CODEGEN_VLLM_SERVICE_PORT=8028
export CODEGEN_VLLM_ENDPOINT="http://${ip_address}:${CODEGEN_VLLM_SERVICE_PORT}"
export CODEGEN_LLM_SERVICE_PORT=9000
export CODEGEN_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export CODEGEN_MEGA_SERVICE_HOST_IP=${ip_address}
export CODEGEN_LLM_SERVICE_HOST_IP=${ip_address}
export CODEGEN_BACKEND_SERVICE_PORT=7778
export CODEGEN_BACKEND_SERVICE_URL="http://${ip_address}:${CODEGEN_BACKEND_SERVICE_PORT}/v1/codegen"
export CODEGEN_UI_SERVICE_PORT=5173
export HOST_IP=${ip_address}
source set_env_vllm.sh
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
@@ -164,17 +147,35 @@ function stop_docker() {
function main() {
echo "::group::stop_docker"
stop_docker
echo "::endgroup::"
echo "::group::build_docker_images"
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
echo "::endgroup::"
echo "::group::start_services"
start_services
echo "::endgroup::"
echo "::group::validate_microservices"
validate_microservices
validate_megaservice
validate_frontend
echo "::endgroup::"
echo "::group::validate_megaservice"
validate_megaservice
echo "::endgroup::"
echo "::group::validate_frontend"
validate_frontend
echo "::endgroup::"
echo "::group::stop_docker"
stop_docker
echo y | docker system prune
cd $WORKPATH
echo "::endgroup::"
docker system prune -f
}

View File

@@ -22,12 +22,11 @@ This Code Translation use case demonstrates Text Generation Inference across mul
The table below lists currently available deployment options. They outline in detail the implementation of this example on selected hardware.
| Category | Deployment Option | Description |
| ---------------------- | -------------------- | ----------------------------------------------------------------- |
| On-premise Deployments | Docker compose | [CodeTrans deployment on Xeon](./docker_compose/intel/cpu/xeon) |
| | | [CodeTrans deployment on Gaudi](./docker_compose/intel/hpu/gaudi) |
| | | [CodeTrans deployment on AMD ROCm](./docker_compose/amd/gpu/rocm) |
| | Kubernetes | [Helm Charts](./kubernetes/helm) |
| | | [GMC](./kubernetes/gmc) |
| | Azure | Work-in-progress |
| | Intel Tiber AI Cloud | Work-in-progress |
| Category | Deployment Option | Description |
| ---------------------- | -------------------- | --------------------------------------------------------------------------- |
| On-premise Deployments | Docker compose | [CodeTrans deployment on Xeon](./docker_compose/intel/cpu/xeon/README.md) |
| | | [CodeTrans deployment on Gaudi](./docker_compose/intel/hpu/gaudi/README.md) |
| | | [CodeTrans deployment on AMD ROCm](./docker_compose/amd/gpu/rocm/README.md) |
| | Kubernetes | [Helm Charts](./kubernetes/helm/README.md) |
| | Azure | Work-in-progress |
| | Intel Tiber AI Cloud | Work-in-progress |

View File

@@ -44,3 +44,38 @@ Some HuggingFace resources, such as some models, are only accessible if the deve
2. (Docker only) If all microservices work well, check the port ${host_ip}:7777, the port may be allocated by other users, you can modify the `compose.yaml`.
3. (Docker only) If you get errors like "The container name is in use", change container name in `compose.yaml`.
## Monitoring OPEA Services with Prometheus and Grafana Dashboard
OPEA microservice deployment can easily be monitored through Grafana dashboards using data collected via Prometheus. Follow the [README](https://github.com/opea-project/GenAIEval/blob/main/evals/benchmark/grafana/README.md) to setup Prometheus and Grafana servers and import dashboards to monitor the OPEA services.
![example dashboards](./assets/img/example_dashboards.png)
![tgi dashboard](./assets/img/tgi_dashboard.png)
## Tracing with OpenTelemetry and Jaeger
> NOTE: This feature is disabled by default. Please use the compose.telemetry.yaml file to enable this feature.
OPEA microservice and [TGI](https://huggingface.co/docs/text-generation-inference/en/index)/[TEI](https://huggingface.co/docs/text-embeddings-inference/en/index) serving can easily be traced through [Jaeger](https://www.jaegertracing.io/) dashboards in conjunction with [OpenTelemetry](https://opentelemetry.io/) Tracing feature. Follow the [README](https://github.com/opea-project/GenAIComps/tree/main/comps/cores/telemetry#tracing) to trace additional functions if needed.
Tracing data is exported to http://{EXTERNAL_IP}:4318/v1/traces via Jaeger.
Users could also get the external IP via below command.
```bash
ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+'
```
Access the Jaeger dashboard UI at http://{EXTERNAL_IP}:16686
For TGI serving on Gaudi, users could see different services like opea, TEI and TGI.
![Screenshot from 2024-12-27 11-58-18](https://github.com/user-attachments/assets/6126fa70-e830-4780-bd3f-83cb6eff064e)
Here is a screenshot for one tracing of TGI serving request.
![Screenshot from 2024-12-27 11-26-25](https://github.com/user-attachments/assets/3a7c51c6-f422-41eb-8e82-c3df52cd48b8)
There are also OPEA related tracings. Users could understand the time breakdown of each service request by looking into each opea:schedule operation.
![image](https://github.com/user-attachments/assets/6137068b-b374-4ff8-b345-993343c0c25f)
There could be asynchronous function such as `llm/MicroService_asyn_generate` and user needs to check the trace of the asynchronous function in another operation like
opea:llm_generate_stream.
![image](https://github.com/user-attachments/assets/a973d283-198f-4ce2-a7eb-58515b77503e)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 120 KiB

After

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 414 KiB

View File

@@ -46,7 +46,7 @@ export http_proxy="Your_HTTP_Proxy" # http proxy if any
export https_proxy="Your_HTTPs_Proxy" # https proxy if any
export no_proxy=localhost,127.0.0.1,$host_ip # additional no proxies if needed
export NGINX_PORT=${your_nginx_port} # your usable port for nginx, 80 for example
source ./set_env.sh
source docker_compose/intel/set_env.sh
```
Consult the section on [CodeTrans Service configuration](#codetrans-configuration) for information on how service specific configuration parameters affect deployments.

View File

@@ -46,7 +46,7 @@ export http_proxy="Your_HTTP_Proxy" # http proxy if any
export https_proxy="Your_HTTPs_Proxy" # https proxy if any
export no_proxy=localhost,127.0.0.1,$host_ip # additional no proxies if needed
export NGINX_PORT=${your_nginx_port} # your usable port for nginx, 80 for example
source ./set_env.sh
source docker_compose/intel/set_env.sh
```
Consult the section on [CodeTrans Service configuration](#codetrans-configuration) for information on how service specific configuration parameters affect deployments.

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
pushd "../../" > /dev/null
pushd "../../../" > /dev/null
source .set_env.sh
popd > /dev/null

View File

@@ -37,12 +37,12 @@ function build_docker_images() {
}
function start_services() {
cd $WORKPATH/docker_compose
cd $WORKPATH/docker_compose/intel
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export NGINX_PORT=80
export host_ip=${ip_address}
source set_env.sh
cd intel/hpu/gaudi
cd hpu/gaudi
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env

View File

@@ -39,13 +39,13 @@ function build_docker_images() {
}
function start_services() {
cd $WORKPATH/docker_compose
cd $WORKPATH/docker_compose/intel
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export NGINX_PORT=80
export host_ip=${ip_address}
source set_env.sh
cd intel/cpu/xeon/
cd cpu/xeon/
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env

View File

@@ -34,13 +34,13 @@ function build_docker_images() {
}
function start_services() {
cd $WORKPATH/docker_compose
cd $WORKPATH/docker_compose/intel
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export NGINX_PORT=80
export host_ip=${ip_address}
source set_env.sh
cd intel/hpu/gaudi/
cd hpu/gaudi/
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env

View File

@@ -34,13 +34,13 @@ function build_docker_images() {
}
function start_services() {
cd $WORKPATH/docker_compose
cd $WORKPATH/docker_compose/intel
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export NGINX_PORT=80
export host_ip=${ip_address}
source set_env.sh
cd intel/cpu/xeon/
cd cpu/xeon/
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env

View File

@@ -1,8 +1,9 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
ARG IMAGE_REPO=opea
ARG BASE_TAG=latest
FROM opea/comps-base:$BASE_TAG
FROM $IMAGE_REPO/comps-base:$BASE_TAG
COPY ./retrieval_tool.py $HOME/retrieval_tool.py

View File

@@ -2,7 +2,11 @@
DocRetriever are the most widely adopted use case for leveraging the different methodologies to match user query against a set of free-text records. DocRetriever is essential to RAG system, which bridges the knowledge gap by dynamically fetching relevant information from external sources, ensuring that responses generated remain factual and current. The core of this architecture are vector databases, which are instrumental in enabling efficient and semantic retrieval of information. These databases store data as vectors, allowing RAG to swiftly access the most pertinent documents or data points based on semantic similarity.
## 1. Build Images for necessary microservices. (Optional after docker image release)
\_Note:
As the related docker images were published to Docker Hub, you can ignore the below step 1 and 2 quick start from step 3.
## 1. Build Images for necessary microservices. (Optional)
- Embedding TEI Image
@@ -30,7 +34,7 @@ DocRetriever are the most widely adopted use case for leveraging the different m
docker build -t opea/dataprep:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/dataprep/src/Dockerfile .
```
## 2. Build Images for MegaService
## 2. Build Images for MegaService (Optional)
```bash
cd ..
@@ -44,6 +48,19 @@ docker build --no-cache -t opea/doc-index-retriever:latest --build-arg https_pro
```bash
export host_ip="YOUR IP ADDR"
export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token}
```
Set environment variables by
```
cd GenAIExamples/DocIndexRetriever/docker_compose/intel/cpu/xeon
source set_env.sh
```
Note: set_env.sh will help to set all required variables. Please ensure all required variables like ports (LLM_SERVICE_PORT, MEGA_SERVICE_PORT, etc.) are set if not using defaults from the compose file.
or Set environment variables manually
```
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export TEI_EMBEDDING_ENDPOINT="http://${host_ip}:6006"

View File

@@ -5,6 +5,8 @@ services:
doc-index-retriever:
build:
args:
IMAGE_REPO: ${REGISTRY:-opea}
BASE_TAG: ${TAG:-latest}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
no_proxy: ${no_proxy}

View File

@@ -20,17 +20,15 @@ function build_docker_images() {
if [ ! -d "GenAIComps" ] ; then
git clone --single-branch --branch "${opea_branch:-"main"}" https://github.com/opea-project/GenAIComps.git
fi
pushd GenAIComps
echo "GenAIComps test commit is $(git rev-parse HEAD)"
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s
service_list="dataprep embedding retriever reranking doc-index-retriever"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
docker pull ghcr.io/huggingface/tei-gaudi:1.5.0
docker pull quay.io/coreos/etcd:v3.5.5
docker pull minio/minio:RELEASE.2023-03-20T20-16-18Z
docker pull milvusdb/milvus:v2.4.6
docker images && sleep 1s
echo "Docker images built!"
}
function start_services() {
@@ -112,19 +110,27 @@ function stop_docker() {
function main() {
echo "::group::stop_docker"
stop_docker
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
echo "Dump current docker ps"
docker ps
start_time=$(date +%s)
start_services
end_time=$(date +%s)
duration=$((end_time-start_time))
echo "Mega service start duration is $duration s"
validate_megaservice
echo "::endgroup::"
echo "::group::build_docker_images"
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
echo "::endgroup::"
echo "::group::start_services"
start_services
echo "::endgroup::"
echo "::group::validate_megaservice"
validate_megaservice
echo "::endgroup::"
echo "::group::stop_docker"
stop_docker
echo y | docker system prune
echo "::endgroup::"
docker system prune -f
}

View File

@@ -20,16 +20,15 @@ function build_docker_images() {
if [ ! -d "GenAIComps" ] ; then
git clone --single-branch --branch "${opea_branch:-"main"}" https://github.com/opea-project/GenAIComps.git
fi
pushd GenAIComps
echo "GenAIComps test commit is $(git rev-parse HEAD)"
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s
service_list="dataprep embedding retriever reranking doc-index-retriever"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
docker pull quay.io/coreos/etcd:v3.5.5
docker pull minio/minio:RELEASE.2023-03-20T20-16-18Z
docker pull milvusdb/milvus:v2.4.6
docker images && sleep 1s
echo "Docker images built!"
}
function start_services() {
@@ -111,19 +110,27 @@ function stop_docker() {
function main() {
echo "::group::stop_docker"
stop_docker
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
echo "Dump current docker ps"
docker ps
start_time=$(date +%s)
start_services
end_time=$(date +%s)
duration=$((end_time-start_time))
echo "Mega service start duration is $duration s"
validate_megaservice
echo "::endgroup::"
echo "::group::build_docker_images"
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
echo "::endgroup::"
echo "::group::start_services"
start_services
echo "::endgroup::"
echo "::group::validate_megaservice"
validate_megaservice
echo "::endgroup::"
echo "::group::stop_docker"
stop_docker
echo y | docker system prune
echo "::endgroup::"
docker system prune -f
}

View File

@@ -21,14 +21,15 @@ function build_docker_images() {
if [ ! -d "GenAIComps" ] ; then
git clone --single-branch --branch "${opea_branch:-"main"}" https://github.com/opea-project/GenAIComps.git
fi
pushd GenAIComps
echo "GenAIComps test commit is $(git rev-parse HEAD)"
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
docker compose -f build.yaml build --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull redis/redis-stack:7.2.0-v9
docker pull ghcr.io/huggingface/tei-gaudi:1.5.0
docker images && sleep 1s
echo "Docker images built!"
}
function start_services() {
@@ -103,19 +104,27 @@ function stop_docker() {
function main() {
echo "::group::stop_docker"
stop_docker
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
echo "Dump current docker ps"
docker ps
start_time=$(date +%s)
start_services
end_time=$(date +%s)
duration=$((end_time-start_time))
echo "Mega service start duration is $duration s"
validate_megaservice
echo "::endgroup::"
echo "::group::build_docker_images"
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
echo "::endgroup::"
echo "::group::start_services"
start_services
echo "::endgroup::"
echo "::group::validate_megaservice"
validate_megaservice
echo "::endgroup::"
echo "::group::stop_docker"
stop_docker
echo y | docker system prune
echo "::endgroup::"
docker system prune -f
}

View File

@@ -21,14 +21,15 @@ function build_docker_images() {
if [ ! -d "GenAIComps" ] ; then
git clone --single-branch --branch "${opea_branch:-"main"}" https://github.com/opea-project/GenAIComps.git
fi
pushd GenAIComps
echo "GenAIComps test commit is $(git rev-parse HEAD)"
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s
service_list="dataprep embedding retriever reranking doc-index-retriever"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
docker pull redis/redis-stack:7.2.0-v9
docker images && sleep 1s
echo "Docker images built!"
}
function start_services() {
@@ -110,20 +111,27 @@ function stop_docker() {
function main() {
echo "::group::stop_docker"
stop_docker
echo "::endgroup::"
echo "::group::build_docker_images"
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
echo "Dump current docker ps"
docker ps
echo "::endgroup::"
start_time=$(date +%s)
echo "::group::start_services"
start_services
end_time=$(date +%s)
duration=$((end_time-start_time))
echo "Mega service start duration is $duration s"
validate_megaservice
echo "::endgroup::"
echo "::group::validate_megaservice"
validate_megaservice
echo "::endgroup::"
echo "::group::stop_docker"
stop_docker
echo y | docker system prune
echo "::endgroup::"
docker system prune -f
}

View File

@@ -21,6 +21,11 @@ function build_docker_images() {
if [ ! -d "GenAIComps" ] ; then
git clone --single-branch --branch "${opea_branch:-"main"}" https://github.com/opea-project/GenAIComps.git
fi
pushd GenAIComps
echo "GenAIComps test commit is $(git rev-parse HEAD)"
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s
service_list="dataprep embedding retriever doc-index-retriever"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
@@ -114,19 +119,27 @@ function stop_docker() {
function main() {
echo "::group::stop_docker"
stop_docker
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
echo "Dump current docker ps"
docker ps
start_time=$(date +%s)
start_services
end_time=$(date +%s)
duration=$((end_time-start_time))
echo "Mega service start duration is $duration s"
validate_megaservice
echo "::endgroup::"
echo "::group::build_docker_images"
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
echo "::endgroup::"
echo "::group::start_services"
start_services
echo "::endgroup::"
echo "::group::validate_megaservice"
validate_megaservice
echo "::endgroup::"
echo "::group::stop_docker"
stop_docker
echo y | docker system prune
echo "::endgroup::"
docker system prune -f
}

View File

@@ -27,9 +27,9 @@ Clone the GenAIExample repository and access the ChatQnA Intel Xeon platform Doc
```bash
git clone https://github.com/opea-project/GenAIExamples.git
cd GenAIExamples/DocSum/docker_compose
cd GenAIExamples/DocSum/docker_compose/intel
source set_env.sh
cd intel/cpu/xeon/
cd cpu/xeon/
```
NOTE: by default vLLM does "warmup" at start, to optimize its performance for the specified model and the underlying platform, which can take long time. For development (and e.g. autoscaling) it can be skipped with `export VLLM_SKIP_WARMUP=true`.
@@ -49,6 +49,7 @@ Some HuggingFace resources, such as some models, are only accessible if you have
To deploy the DocSum services, execute the `docker compose up` command with the appropriate arguments. For a default deployment, execute:
```bash
cd cpu/xeon/
docker compose up -d
```

View File

@@ -29,9 +29,9 @@ Clone the GenAIExample repository and access the DocSum Intel® Gaudi® platform
```bash
git clone https://github.com/opea-project/GenAIExamples.git
cd GenAIExamples/DocSum/docker_compose
cd GenAIExamples/DocSum/docker_compose/intel
source set_env.sh
cd intel/hpu/gaudi/
cd hpu/gaudi/
```
NOTE: by default vLLM does "warmup" at start, to optimize its performance for the specified model and the underlying platform, which can take long time. For development (and e.g. autoscaling) it can be skipped with `export VLLM_SKIP_WARMUP=true`.
@@ -51,6 +51,7 @@ Some HuggingFace resources, such as some models, are only accessible if you have
To deploy the DocSum services, execute the `docker compose up` command with the appropriate arguments. For a default deployment, execute:
```bash
cd hpu/gaudi/
docker compose up -d
```

View File

@@ -16,7 +16,7 @@ echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
export REGISTRY=${IMAGE_REPO}
export TAG=${IMAGE_TAG}
source $WORKPATH/docker_compose/set_env.sh
source $WORKPATH/docker_compose/intel/set_env.sh
export MODEL_CACHE=${model_cache:-"./data"}

View File

@@ -17,7 +17,7 @@ echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
export REGISTRY=${IMAGE_REPO}
export TAG=${IMAGE_TAG}
source $WORKPATH/docker_compose/set_env.sh
source $WORKPATH/docker_compose/intel/set_env.sh
export MODEL_CACHE=${model_cache:-"./data"}
export MAX_INPUT_TOKENS=2048

View File

@@ -16,7 +16,7 @@ echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
export REGISTRY=${IMAGE_REPO}
export TAG=${IMAGE_TAG}
source $WORKPATH/docker_compose/set_env.sh
source $WORKPATH/docker_compose/intel/set_env.sh
export MODEL_CACHE=${model_cache:-"./data"}
export MAX_INPUT_TOKENS=2048

View File

@@ -16,7 +16,7 @@ echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
export REGISTRY=${IMAGE_REPO}
export TAG=${IMAGE_TAG}
source $WORKPATH/docker_compose/set_env.sh
source $WORKPATH/docker_compose/intel/set_env.sh
export MODEL_CACHE=${model_cache:-"./data"}
export MAX_INPUT_TOKENS=2048

View File

@@ -5,3 +5,13 @@
pushd "../../../../../" > /dev/null
source .set_env.sh
popd > /dev/null
export MODEL_PATH=${MODEL_PATH}
export DOC_PATH=${DOC_PATH}
export UI_TMPFILE_PATH=${UI_TMPFILE_PATH}
export HOST_IP=${HOST_IP}
export LLM_MODEL=${LLM_MODEL}
export HF_ENDPOINT=${HF_ENDPOINT}
export vLLM_ENDPOINT=${vLLM_ENDPOINT}
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export no_proxy="localhost, 127.0.0.1, 192.168.1.1"

View File

@@ -0,0 +1,21 @@
# EdgeCraftRAG E2E test scripts
## Set the required environment variable
```bash
export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token"
```
## Run test
On Intel ARC with TGI:
```bash
bash test_compose_on_arc.sh
```
On Intel ARC with vLLM:
```bash
bash test_compose_vllm_on_arc.sh
```

View File

@@ -46,18 +46,10 @@ function build_docker_images() {
}
function start_services() {
export MODEL_PATH=${MODEL_PATH}
export DOC_PATH=${DOC_PATH}
export UI_UPLOAD_PATH=${UI_UPLOAD_PATH}
export HOST_IP=${HOST_IP}
export LLM_MODEL=${LLM_MODEL}
export HF_ENDPOINT=${HF_ENDPOINT}
export vLLM_ENDPOINT=${vLLM_ENDPOINT}
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export no_proxy="localhost, 127.0.0.1, 192.168.1.1"
cd $WORKPATH/docker_compose/intel/gpu/arc
source set_env.sh
# Start Docker Containers
docker compose -f $COMPOSE_FILE up -d > ${LOG_PATH}/start_services_with_compose.log
sleep 20

View File

@@ -53,17 +53,8 @@ function build_docker_images() {
}
function start_services() {
export MODEL_PATH=${MODEL_PATH}
export DOC_PATH=${DOC_PATH}
export UI_TMPFILE_PATH=${UI_TMPFILE_PATH}
export HOST_IP=${HOST_IP}
export LLM_MODEL=${LLM_MODEL}
export HF_ENDPOINT=${HF_ENDPOINT}
export vLLM_ENDPOINT=${vLLM_ENDPOINT}
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export no_proxy="localhost, 127.0.0.1, 192.168.1.1"
cd $WORKPATH/docker_compose/intel/gpu/arc
source set_env.sh
# Start Docker Containers
docker compose -f $COMPOSE_FILE up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -137,7 +137,6 @@ export BACKEND_SERVICE_ENDPOINT_DOCSUM="http://${host_ip}:8890/v1/docsum"
export DATAPREP_SERVICE_ENDPOINT="http://${host_ip}:6007/v1/dataprep/ingest"
export DATAPREP_GET_FILE_ENDPOINT="http://${host_ip}:6007/v1/dataprep/get"
export CHAT_HISTORY_CREATE_ENDPOINT="http://${host_ip}:6012/v1/chathistory/create"
export CHAT_HISTORY_CREATE_ENDPOINT="http://${host_ip}:6012/v1/chathistory/create"
export CHAT_HISTORY_DELETE_ENDPOINT="http://${host_ip}:6012/v1/chathistory/delete"
export CHAT_HISTORY_GET_ENDPOINT="http://${host_ip}:6012/v1/chathistory/get"
export PROMPT_SERVICE_GET_ENDPOINT="http://${host_ip}:6018/v1/prompt/get"

View File

@@ -18,7 +18,6 @@ export BACKEND_SERVICE_ENDPOINT_DOCSUM="http://${host_ip}:8890/v1/docsum"
export DATAPREP_SERVICE_ENDPOINT="http://${host_ip}:6007/v1/dataprep/ingest"
export DATAPREP_GET_FILE_ENDPOINT="http://${host_ip}:6007/v1/dataprep/get"
export CHAT_HISTORY_CREATE_ENDPOINT="http://${host_ip}:6012/v1/chathistory/create"
export CHAT_HISTORY_CREATE_ENDPOINT="http://${host_ip}:6012/v1/chathistory/create"
export CHAT_HISTORY_DELETE_ENDPOINT="http://${host_ip}:6012/v1/chathistory/delete"
export CHAT_HISTORY_GET_ENDPOINT="http://${host_ip}:6012/v1/chathistory/get"
export PROMPT_SERVICE_GET_ENDPOINT="http://${host_ip}:6018/v1/prompt/get"

View File

@@ -0,0 +1,15 @@
# ProductivitySuite E2E test scripts
## Set the required environment variable
```bash
export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token"
```
## Run test
On Intel Xeon with TGI:
```bash
bash test_compose_on_xeon.sh
```

View File

@@ -29,33 +29,12 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon/
export DB_NAME="opea"
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
export LLM_MODEL_ID_CODEGEN="Intel/neural-chat-7b-v3-3"
export INDEX_NAME="rag-redis"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export BACKEND_SERVICE_ENDPOINT_CHATQNA="http://${ip_address}:8888/v1/chatqna"
export DATAPREP_DELETE_FILE_ENDPOINT="http://${ip_address}:6007/v1/dataprep/delete"
export BACKEND_SERVICE_ENDPOINT_CODEGEN="http://${ip_address}:7778/v1/codegen"
export BACKEND_SERVICE_ENDPOINT_DOCSUM="http://${ip_address}:8890/v1/docsum"
export DATAPREP_SERVICE_ENDPOINT="http://${ip_address}:6007/v1/dataprep/ingest"
export DATAPREP_GET_FILE_ENDPOINT="http://${ip_address}:6007/v1/dataprep/get"
export CHAT_HISTORY_CREATE_ENDPOINT="http://${ip_address}:6012/v1/chathistory/create"
export CHAT_HISTORY_CREATE_ENDPOINT="http://${ip_address}:6012/v1/chathistory/create"
export CHAT_HISTORY_DELETE_ENDPOINT="http://${ip_address}:6012/v1/chathistory/delete"
export CHAT_HISTORY_GET_ENDPOINT="http://${ip_address}:6012/v1/chathistory/get"
export PROMPT_SERVICE_GET_ENDPOINT="http://${ip_address}:6018/v1/prompt/get"
export PROMPT_SERVICE_CREATE_ENDPOINT="http://${ip_address}:6018/v1/prompt/create"
export PROMPT_SERVICE_DELETE_ENDPOINT="http://${ip_address}:6018/v1/prompt/delete"
export KEYCLOAK_SERVICE_ENDPOINT="http://${ip_address}:8080"
export DocSum_COMPONENT_NAME="OpeaDocSumTgi"
export host_ip=${ip_address}
export LOGFLAG=True
export no_proxy="$no_proxy,tgi_service_codegen,llm_codegen,tei-embedding-service,tei-reranking-service,chatqna-xeon-backend-server,retriever,tgi-service,redis-vector-db,whisper,llm-docsum-tgi,docsum-xeon-backend-server,mongo,codegen"
source set_env.sh
# Start Docker Containers
docker compose up -d > ${LOG_PATH}/start_services_with_compose.log
sleep 30s

View File

@@ -30,66 +30,38 @@ The architecture of the SearchQnA Application is illustrated below:
The SearchQnA example is implemented using the component-level microservices defined in [GenAIComps](https://github.com/opea-project/GenAIComps). The flow chart below shows the information flow between different microservices for this example.
```mermaid
---
config:
flowchart:
nodeSpacing: 400
rankSpacing: 100
curve: linear
themeVariables:
fontSize: 50px
---
%% Orange are microservices from third parties that are 'wrapped' as OPEA components.
flowchart LR
%% Colors %%
classDef blue fill:#ADD8E6,stroke:#ADD8E6,stroke-width:2px,fill-opacity:0.5
classDef orange fill:#FBAA60,stroke:#ADD8E6,stroke-width:2px,fill-opacity:0.5
classDef orchid fill:#C26DBC,stroke:#ADD8E6,stroke-width:2px,fill-opacity:0.5
classDef invisible fill:transparent,stroke:transparent;
style SearchQnA-MegaService stroke:#000000
User["User"] --> Nginx["Nginx<br>searchqna-nginx-server"]
Nginx --> UI["UI<br>searchqna-ui-server"] & Gateway & User
UI --> Nginx
Gateway --> Nginx & Embedding
Embedding --> Retriever
Retriever --> Reranker
Reranker --> LLM
LLM --> Gateway
LLM <-.-> TGI_Service["LLM<br>tgi-service"]
Embedding <-.-> TEI_Embedding["TEI Embedding<br>tei-embedding-server"]
Reranker <-.-> TEI_Reranker["TEI Reranker<br>tei-reranking-server"]
%% Subgraphs %%
subgraph SearchQnA-MegaService["SearchQnA MegaService "]
direction LR
EM([Embedding MicroService]):::blue
RET([Web Retrieval MicroService]):::blue
RER([Rerank MicroService]):::blue
LLM([LLM MicroService]):::blue
end
subgraph UserInterface[" User Interface "]
direction LR
a([User Input Query]):::orchid
UI([UI server<br>]):::orchid
end
TEI_RER{{Reranking service<br>}}
TEI_EM{{Embedding service <br>}}
VDB{{Vector DB<br><br>}}
R_RET{{Web Retriever service <br>}}
LLM_gen{{LLM Service <br>}}
GW([SearchQnA GateWay<br>]):::orange
%% Questions interaction
direction LR
a[User Input Query] --> UI
UI --> GW
GW <==> SearchQnA-MegaService
EM ==> RET
RET ==> RER
RER ==> LLM
%% Embedding service flow
direction LR
EM <-.-> TEI_EM
RET <-.-> R_RET
RER <-.-> TEI_RER
LLM <-.-> LLM_gen
TEI_Embedding:::ext
TEI_Reranker:::ext
TGI_Service:::ext
subgraph MegaService["MegaService"]
LLM["LLM<br>llm-textgen-server"]
Reranker["Reranker<br>reranking-tei-server"]
Retriever["Retriever<br>web-retriever-server"]
Embedding["Embedding<br>embedding-server"]
end
subgraph Backend["searchqna-backend-server"]
direction TB
%% Vector DB interaction
R_RET <-.-> VDB
MegaService
Gateway["Backend Endpoint"]
end
classDef default fill:#fff,stroke:#000,color:#000
classDef ext fill:#f9cb9c,stroke:#000,color:#000
style MegaService margin-top:20px,margin-bottom:20px
```
This SearchQnA use case performs Search-augmented Question Answering across multiple platforms. Currently, we provide the example for Intel® Gaudi® 2 and Intel® Xeon® Scalable Processors, and we invite contributions from other hardware vendors to expand OPEA ecosystem.
@@ -98,8 +70,8 @@ This SearchQnA use case performs Search-augmented Question Answering across mult
The table below lists the available deployment options and their implementation details for different hardware platforms.
| Category | Deployment Option | Description |
| ---------------------- | ---------------------- | -------------------------------------------------------------- |
| On-premise Deployments | Docker Compose (Xeon) | [DocSum deployment on Xeon](./docker_compose/intel/cpu/xeon) |
| | Docker Compose (Gaudi) | [DocSum deployment on Gaudi](./docker_compose/intel/hpu/gaudi) |
| | Docker Compose (ROCm) | [DocSum deployment on AMD ROCm](./docker_compose/amd/gpu/rocm) |
| Category | Deployment Option | Description |
| ---------------------- | ---------------------- | --------------------------------------------------------------------------- |
| On-premise Deployments | Docker Compose (Xeon) | [SearchQnA deployment on Xeon](./docker_compose/intel/cpu/xeon/README.md) |
| | Docker Compose (Gaudi) | [SearchQnA deployment on Gaudi](./docker_compose/intel/hpu/gaudi/README.md) |
| | Docker Compose (ROCm) | [SearchQnA deployment on AMD ROCm](./docker_compose/amd/gpu/rocm/README.md) |

View File

@@ -170,7 +170,25 @@ services:
no_proxy: ${no_proxy}
https_proxy: ${https_proxy}
http_proxy: ${http_proxy}
BACKEND_BASE_URL: ${SEARCH_BACKEND_SERVICE_ENDPOINT}
ipc: host
restart: always
search-nginx-server:
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
container_name: search-nginx-server
depends_on:
- search-backend-server
- search-ui-server
ports:
- "${NGINX_PORT:-80}:80"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- FRONTEND_SERVICE_IP=search-ui-server
- FRONTEND_SERVICE_PORT=5173
- BACKEND_SERVICE_NAME=search
- BACKEND_SERVICE_IP=search-backend-server
- BACKEND_SERVICE_PORT=8888
ipc: host
restart: always

View File

@@ -176,10 +176,27 @@ services:
no_proxy: ${no_proxy}
https_proxy: ${https_proxy}
http_proxy: ${http_proxy}
BACKEND_BASE_URL: ${SEARCH_BACKEND_SERVICE_ENDPOINT}
ipc: host
restart: always
search-nginx-server:
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
container_name: search-nginx-server
depends_on:
- search-backend-server
- search-ui-server
ports:
- "${NGINX_PORT:-80}:80"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- FRONTEND_SERVICE_IP=search-ui-server
- FRONTEND_SERVICE_PORT=5173
- BACKEND_SERVICE_NAME=search
- BACKEND_SERVICE_IP=search-backend-server
- BACKEND_SERVICE_PORT=8888
ipc: host
restart: always
networks:
default:
driver: bridge

View File

@@ -2,8 +2,8 @@
# Copyright (C) 2025 Advanced Micro Devices, Inc.
export HOST_IP=''
export EXTERNAL_HOST_IP=''
export HOST_IP=${ip_address}
export EXTERNAL_HOST_IP=${ip_address}
export SEARCH_EMBEDDING_MODEL_ID='BAAI/bge-base-en-v1.5'
export SEARCH_GOOGLE_API_KEY=${GOOGLE_API_KEY}
@@ -12,9 +12,9 @@ export SEARCH_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export SEARCH_LLM_MODEL_ID='Intel/neural-chat-7b-v3-3'
export SEARCH_RERANK_MODEL_ID='BAAI/bge-reranker-base'
export SEARCH_BACKEND_SERVICE_PORT=18142
export SEARCH_BACKEND_SERVICE_PORT=3008
export SEARCH_EMBEDDING_SERVICE_PORT=3002
export SEARCH_FRONTEND_SERVICE_PORT=18143
export SEARCH_FRONTEND_SERVICE_PORT=5173
export SEARCH_LLM_SERVICE_PORT=3007
export SEARCH_RERANK_SERVICE_PORT=3005
export SEARCH_TEI_EMBEDDING_PORT=3001

View File

@@ -2,8 +2,8 @@
# Copyright (C) 2025 Advanced Micro Devices, Inc.
export HOST_IP=''
export EXTERNAL_HOST_IP=''
export HOST_IP=${ip_address}
export EXTERNAL_HOST_IP=${ip_address}
export SEARCH_EMBEDDING_MODEL_ID='BAAI/bge-base-en-v1.5'
export SEARCH_GOOGLE_API_KEY=${GOOGLE_API_KEY}
@@ -12,11 +12,11 @@ export SEARCH_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export SEARCH_LLM_MODEL_ID='Intel/neural-chat-7b-v3-3'
export SEARCH_RERANK_MODEL_ID='BAAI/bge-reranker-base'
export MODEL_PATH="./data"
export MODEL_CACHE="./data"
export SEARCH_BACKEND_SERVICE_PORT=18142
export SEARCH_BACKEND_SERVICE_PORT=3008
export SEARCH_EMBEDDING_SERVICE_PORT=3002
export SEARCH_FRONTEND_SERVICE_PORT=18143
export SEARCH_FRONTEND_SERVICE_PORT=5173
export SEARCH_LLM_SERVICE_PORT=3007
export SEARCH_RERANK_SERVICE_PORT=3005
export SEARCH_TEI_EMBEDDING_PORT=3001

View File

@@ -168,10 +168,27 @@ services:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- BACKEND_BASE_URL=${BACKEND_SERVICE_ENDPOINT}
ipc: host
restart: always
searchqna-xeon-nginx-server:
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
container_name: searchqna-xeon-nginx-server
depends_on:
- searchqna-xeon-backend-server
- searchqna-xeon-ui-server
ports:
- "${NGINX_PORT:-80}:80"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- FRONTEND_SERVICE_IP=searchqna-xeon-ui-server
- FRONTEND_SERVICE_PORT=5173
- BACKEND_SERVICE_NAME=searchqna
- BACKEND_SERVICE_IP=searchqna-xeon-backend-server
- BACKEND_SERVICE_PORT=8888
ipc: host
restart: always
networks:
default:

View File

@@ -187,7 +187,25 @@ services:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- BACKEND_BASE_URL=${BACKEND_SERVICE_ENDPOINT}
ipc: host
restart: always
searchqna-gaudi-nginx-server:
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
container_name: searchqna-gaudi-nginx-server
depends_on:
- searchqna-gaudi-backend-server
- searchqna-gaudi-ui-server
ports:
- "${NGINX_PORT:-80}:80"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- FRONTEND_SERVICE_IP=searchqna-gaudi-ui-server
- FRONTEND_SERVICE_PORT=5173
- BACKEND_SERVICE_NAME=searchqna
- BACKEND_SERVICE_IP=searchqna-gaudi-backend-server
- BACKEND_SERVICE_PORT=8888
ipc: host
restart: always

View File

@@ -2,11 +2,13 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
pushd "../../" > /dev/null
pushd "../../../" > /dev/null
source .set_env.sh
popd > /dev/null
export GOOGLE_CSE_ID=$GOOGLE_CSE_ID
export GOOGLE_API_KEY=$GOOGLE_API_KEY
export HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN
export EMBEDDING_MODEL_ID=BAAI/bge-base-en-v1.5
export TEI_EMBEDDING_ENDPOINT=http://${host_ip}:3001
export RERANK_MODEL_ID=BAAI/bge-reranker-base

View File

@@ -46,3 +46,9 @@ services:
context: GenAIComps
dockerfile: comps/third_parties/vllm/src/Dockerfile.amd_gpu
image: ${REGISTRY:-opea}/vllm-rocm:${TAG:-latest}
nginx:
build:
context: GenAIComps
dockerfile: comps/third_parties/nginx/src/Dockerfile
extends: searchqna
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}

33
SearchQnA/tests/README.md Normal file
View File

@@ -0,0 +1,33 @@
# SearchQnA E2E test scripts
## Set the required environment variable
```bash
export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token"
```
## Run test
On Intel Xeon with TGI:
```bash
bash test_compose_on_xeon.sh
```
On Intel Gaudi with TGI:
```bash
bash test_compose_on_gaudi.sh
```
On AMD ROCm with TGI:
```bash
bash test_compose_on_rocm.sh
```
On AMD ROCm with vLLM:
```bash
bash test_compose_vllm_on_rocm.sh
```

Some files were not shown because too many files have changed in this diff Show More