Compare commits

...

10 Commits

Author SHA1 Message Date
ZePan110
52a6b22f3f test
Signed-off-by: ZePan110 <ze.pan@intel.com>
2025-05-21 12:28:44 +08:00
ZePan110
c8259d47f9 Revert "test"
This reverts commit 2f9959f0a5.
2025-05-21 12:28:07 +08:00
ZePan110
b980d6a34c Fix issue
Signed-off-by: ZePan110 <ze.pan@intel.com>
2025-05-21 12:24:24 +08:00
ZePan110
2f9959f0a5 test
Signed-off-by: ZePan110 <ze.pan@intel.com>
2025-05-21 09:54:13 +08:00
ZePan110
51b9d3b975 Update .github/workflows/pr-code-scan.yml
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-05-20 15:32:16 +08:00
ZePan110
d9e7264a81 Fix
Signed-off-by: ZePan110 <ze.pan@intel.com>
2025-05-20 15:24:36 +08:00
Ying Hu
26cb531766 Update README.md of model/port change (#1969)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-05-20 14:29:23 +08:00
Ed Lee @ Intel
e9153b82bb Updated SearchQnA to use nginx like ChatQnA (#1769)
Signed-off-by: Ed Lee <16417837+edlee123@users.noreply.github.com>
2025-05-20 14:15:46 +08:00
Letong Han
0890e94a21 Refine CodeTrans README (#1960)
Signed-off-by: letonghan <letong.han@intel.com>
Co-authored-by: Ying Hu <ying.hu@intel.com>
2025-05-20 13:43:24 +08:00
ZePan110
581e954a8d Integrate ChatQnA set_env to ut scripts and add README.md for UT scripts. (#1971)
Signed-off-by: ZePan110 <ze.pan@intel.com>
2025-05-20 13:42:18 +08:00
72 changed files with 545 additions and 464 deletions

View File

@@ -3,7 +3,8 @@
# This workflow will only test GMC pipeline and will not install GMC any more
name: Single GMC E2e Test For CD Workflow Call
permissions:
contents: read
on:
workflow_call:
inputs:

View File

@@ -2,7 +2,8 @@
# SPDX-License-Identifier: Apache-2.0
name: Build and deploy GMC system on call and manual
permissions:
contents: read
on:
workflow_dispatch:
inputs:

View File

@@ -2,6 +2,8 @@
# SPDX-License-Identifier: Apache-2.0
name: Update Docker Hub Description
permissions:
contents: read
on:
schedule:
- cron: "0 0 * * 0"

View File

@@ -2,6 +2,8 @@
# SPDX-License-Identifier: Apache-2.0
name: Clean up container on manual event
permissions:
contents: read
on:
workflow_dispatch:
inputs:

View File

@@ -2,7 +2,8 @@
# SPDX-License-Identifier: Apache-2.0
name: Freeze OPEA images release tag
permissions:
contents: read
on:
workflow_dispatch:
inputs:

View File

@@ -2,6 +2,8 @@
# SPDX-License-Identifier: Apache-2.0
name: Build specific images on manual event
permissions:
contents: read
on:
workflow_dispatch:
inputs:

View File

@@ -2,6 +2,8 @@
# SPDX-License-Identifier: Apache-2.0
name: Clean up Local Registry on manual event
permissions:
contents: read
on:
workflow_dispatch:
inputs:

View File

@@ -2,7 +2,8 @@
# SPDX-License-Identifier: Apache-2.0
name: Trellix Command Line Scanner
permissions:
contents: read
on:
workflow_dispatch:
schedule:

View File

@@ -2,7 +2,8 @@
# SPDX-License-Identifier: Apache-2.0
name: Nightly build/publish latest docker images
permissions:
contents: read
on:
schedule:
- cron: "30 14 * * 1-5" # UTC time

View File

@@ -2,7 +2,8 @@
# SPDX-License-Identifier: Apache-2.0
name: E2E Test with Helm Charts
permissions:
contents: read
on:
pull_request_target:
branches: [main]

View File

@@ -2,7 +2,8 @@
# SPDX-License-Identifier: Apache-2.0
name: Check Duplicated Images
permissions:
contents: read
on:
pull_request:
branches: [main]

View File

@@ -2,7 +2,9 @@
# SPDX-License-Identifier: Apache-2.0
name: Code Scan
permissions:
contents: read
security-events: write
on:
pull_request:
branches: [main]

View File

@@ -3,6 +3,9 @@
name: E2E test with docker compose
permissions:
contents: read
on:
pull_request_target:
branches: ["main", "*rc"]

View File

@@ -2,7 +2,8 @@
# SPDX-License-Identifier: Apache-2.0
name: Compose file and dockerfile path checking
permissions:
contents: read
on:
pull_request:
branches: [main]

View File

@@ -3,6 +3,9 @@
name: Check hyperlinks and relative path validity
permissions:
contents: read
on:
pull_request:
branches: [main]

View File

@@ -3,6 +3,9 @@
# Test
name: Build latest images on push event
permissions:
contents: read
on:
push:
branches: [ 'main' ]

View File

@@ -3,10 +3,12 @@
name: Check the validity of links in docker_images_list.
permissions:
contents: read
on:
push:
branches: [main]
types: [opened, reopened, ready_for_review, synchronize]
jobs:
check-dockerfile-paths:

View File

@@ -8,6 +8,10 @@ on:
- "**/docker_compose/**/compose*.yaml"
name: Create an issue to GenAIInfra on push
permissions:
contents: read
jobs:
job1:
name: Create issue

View File

@@ -3,13 +3,15 @@
name: Weekly test all examples on multiple HWs
permissions: read-all
on:
schedule:
- cron: "30 2 * * 6" # UTC time
workflow_dispatch:
env:
EXAMPLES: ${{ vars.NIGHTLY_RELEASE_EXAMPLES }}
EXAMPLES: "CodeTrans" #${{ vars.NIGHTLY_RELEASE_EXAMPLES }}
NODES: "gaudi,xeon,rocm,arc"
jobs:

View File

@@ -16,7 +16,7 @@ services:
- chatqna-redis-vector-db
- chatqna-tei-embedding-service
ports:
- "${CHATQNA_REDIS_DATAPREP_PORT}:5000"
- "${CHATQNA_REDIS_DATAPREP_PORT:-18103}:5000"
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}

View File

@@ -16,7 +16,7 @@ services:
- chatqna-redis-vector-db
- chatqna-tei-embedding-service
ports:
- "${CHATQNA_REDIS_DATAPREP_PORT}:5000"
- "${CHATQNA_REDIS_DATAPREP_PORT:-18103}:5000"
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}

View File

@@ -16,7 +16,7 @@ services:
- chatqna-redis-vector-db
- chatqna-tei-embedding-service
ports:
- "${CHATQNA_REDIS_DATAPREP_PORT}:5000"
- "${CHATQNA_REDIS_DATAPREP_PORT:-18103}:5000"
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}

View File

@@ -16,7 +16,7 @@ services:
- chatqna-redis-vector-db
- chatqna-tei-embedding-service
ports:
- "${CHATQNA_REDIS_DATAPREP_PORT:-5000}:5000"
- "${CHATQNA_REDIS_DATAPREP_PORT:-18103}:5000"
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}

View File

@@ -2,17 +2,17 @@
# Copyright (C) 2025 Advanced Micro Devices, Inc.
export HOST_IP=''
export HOST_IP_EXTERNAL=''
export HOST_IP=${ip_address}
export HOST_IP_EXTERNAL=${ip_address}
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
export CHATQNA_BACKEND_SERVICE_PORT=18102
export CHATQNA_FRONTEND_SERVICE_PORT=18101
export CHATQNA_NGINX_PORT=18104
export CHATQNA_BACKEND_SERVICE_PORT=8888
export CHATQNA_FRONTEND_SERVICE_PORT=5173
export CHATQNA_NGINX_PORT=80
export CHATQNA_REDIS_DATAPREP_PORT=18103
export CHATQNA_REDIS_RETRIEVER_PORT=7000
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001

View File

@@ -2,18 +2,18 @@
# Copyright (C) 2025 Advanced Micro Devices, Inc.
export HOST_IP=''
export HOST_IP_EXTERNAL=''
export HOST_IP=${ip_address}
export HOST_IP_EXTERNAL=${ip_address}
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
export CHATQNA_BACKEND_SERVICE_PORT=18102
export CHATQNA_FRONTEND_SERVICE_PORT=18101
export CHATQNA_BACKEND_SERVICE_PORT=8888
export CHATQNA_FRONTEND_SERVICE_PORT=5173
export CHATQNA_LLM_FAQGEN_PORT=18011
export CHATQNA_NGINX_PORT=18104
export CHATQNA_NGINX_PORT=80
export CHATQNA_REDIS_DATAPREP_PORT=18103
export CHATQNA_REDIS_RETRIEVER_PORT=7000
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001

View File

@@ -2,18 +2,18 @@
# Copyright (C) 2025 Advanced Micro Devices, Inc.
export HOST_IP=''
export HOST_IP_EXTERNAL=''
export HOST_IP=${ip_address}
export HOST_IP_EXTERNAL=${ip_address}
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
export CHATQNA_BACKEND_SERVICE_PORT=18102
export CHATQNA_FRONTEND_SERVICE_PORT=18101
export CHATQNA_BACKEND_SERVICE_PORT=8888
export CHATQNA_FRONTEND_SERVICE_PORT=5173
export CHATQNA_LLM_FAQGEN_PORT=18011
export CHATQNA_NGINX_PORT=18104
export CHATQNA_NGINX_PORT=80
export CHATQNA_REDIS_DATAPREP_PORT=18103
export CHATQNA_REDIS_RETRIEVER_PORT=7000
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001

View File

@@ -2,17 +2,17 @@
# Copyright (C) 2025 Advanced Micro Devices, Inc.
export HOST_IP=''
export HOST_IP_EXTERNAL=''
export HOST_IP=${ip_address}
export HOST_IP_EXTERNAL=${ip_address}
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
export CHATQNA_BACKEND_SERVICE_PORT=18102
export CHATQNA_FRONTEND_SERVICE_PORT=18101
export CHATQNA_NGINX_PORT=18104
export CHATQNA_BACKEND_SERVICE_PORT=8888
export CHATQNA_FRONTEND_SERVICE_PORT=5173
export CHATQNA_NGINX_PORT=80
export CHATQNA_REDIS_DATAPREP_PORT=18103
export CHATQNA_REDIS_RETRIEVER_PORT=7000
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001

View File

@@ -1,6 +1,8 @@
# Copyright (C) 2025 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
rm *.json
if ls *.json 1> /dev/null 2>&1; then
rm *.json
fi
wget https://raw.githubusercontent.com/opea-project/GenAIEval/refs/heads/main/evals/benchmark/grafana/chatqna_megaservice_grafana.json
wget https://raw.githubusercontent.com/opea-project/GenAIEval/refs/heads/main/evals/benchmark/grafana/qdrant_grafana.json
wget https://raw.githubusercontent.com/opea-project/GenAIEval/refs/heads/main/evals/benchmark/grafana/milvus_grafana.json

View File

@@ -7,6 +7,9 @@ pushd "../../../../../" > /dev/null
source .set_env.sh
popd > /dev/null
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export HF_TOKEN=${HF_TOKEN}
export host_ip=${ip_address}
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"

View File

@@ -43,7 +43,7 @@ Some HuggingFace resources, such as some models, are only accessible if you have
### Configure the Deployment Environment
To set up environment variables for deploying ChatQnA services, source the _setup_env.sh_ script in this directory:
To set up environment variables for deploying ChatQnA services, source the _setup_env.sh_ script in this directory (If using faqgen or guardrails, source the _set_env_faqgen.sh_):
```
source ./set_env.sh

View File

@@ -4,12 +4,20 @@
# SPDX-License-Identifier: Apache-2.0
# Function to prompt for input and set environment variables
NON_INTERACTIVE=${NON_INTERACTIVE:-false}
prompt_for_env_var() {
local var_name="$1"
local prompt_message="$2"
local default_value="$3"
local mandatory="$4"
if [[ "$NON_INTERACTIVE" == "true" ]]; then
echo "Non-interactive environment detected. Setting $var_name to default: $default_value"
export "$var_name"="$default_value"
return
fi
if [[ "$mandatory" == "true" ]]; then
while [[ -z "$value" ]]; do
read -p "$prompt_message [default: \"${default_value}\"]: " value
@@ -34,7 +42,7 @@ popd > /dev/null
# Prompt the user for each required environment variable
prompt_for_env_var "EMBEDDING_MODEL_ID" "Enter the EMBEDDING_MODEL_ID" "BAAI/bge-base-en-v1.5" false
prompt_for_env_var "HUGGINGFACEHUB_API_TOKEN" "Enter the HUGGINGFACEHUB_API_TOKEN" "" true
prompt_for_env_var "HUGGINGFACEHUB_API_TOKEN" "Enter the HUGGINGFACEHUB_API_TOKEN" "${HF_TOKEN}" true
prompt_for_env_var "RERANK_MODEL_ID" "Enter the RERANK_MODEL_ID" "BAAI/bge-reranker-base" false
prompt_for_env_var "LLM_MODEL_ID" "Enter the LLM_MODEL_ID" "meta-llama/Meta-Llama-3-8B-Instruct" false
prompt_for_env_var "INDEX_NAME" "Enter the INDEX_NAME" "rag-redis" false
@@ -42,34 +50,40 @@ prompt_for_env_var "NUM_CARDS" "Enter the number of Gaudi devices" "1" false
prompt_for_env_var "host_ip" "Enter the host_ip" "$(curl ifconfig.me)" false
#Query for enabling http_proxy
prompt_for_env_var "http_proxy" "Enter the http_proxy." "" false
prompt_for_env_var "http_proxy" "Enter the http_proxy." "${http_proxy}" false
#Query for enabling https_proxy
prompt_for_env_var "https_proxy" "Enter the https_proxy." "" false
prompt_for_env_var "http_proxy" "Enter the http_proxy." "${https_proxy}" false
#Query for enabling no_proxy
prompt_for_env_var "no_proxy" "Enter the no_proxy." "" false
prompt_for_env_var "no_proxy" "Enter the no_proxy." "${no_proxy}" false
# Query for enabling logging
read -p "Enable logging? (yes/no): " logging && logging=$(echo "$logging" | tr '[:upper:]' '[:lower:]')
if [[ "$logging" == "yes" || "$logging" == "y" ]]; then
export LOGFLAG=true
if [[ "$NON_INTERACTIVE" == "true" ]]; then
# Query for enabling logging
prompt_for_env_var "LOGFLAG" "Enable logging? (yes/no): " "true" false
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
telemetry_flag=true
else
export LOGFLAG=false
fi
# Query for enabling OpenTelemetry Tracing Endpoint
read -p "Enable OpenTelemetry Tracing Endpoint? (yes/no): " telemetry && telemetry=$(echo "$telemetry" | tr '[:upper:]' '[:lower:]')
if [[ "$telemetry" == "yes" || "$telemetry" == "y" ]]; then
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
telemetry_flag=true
pushd "grafana/dashboards" > /dev/null
source download_opea_dashboard.sh
popd > /dev/null
else
telemetry_flag=false
# Query for enabling logging
read -p "Enable logging? (yes/no): " logging && logging=$(echo "$logging" | tr '[:upper:]' '[:lower:]')
if [[ "$logging" == "yes" || "$logging" == "y" ]]; then
export LOGFLAG=true
else
export LOGFLAG=false
fi
# Query for enabling OpenTelemetry Tracing Endpoint
read -p "Enable OpenTelemetry Tracing Endpoint? (yes/no): " telemetry && telemetry=$(echo "$telemetry" | tr '[:upper:]' '[:lower:]')
if [[ "$telemetry" == "yes" || "$telemetry" == "y" ]]; then
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
telemetry_flag=true
else
telemetry_flag=false
fi
fi
# Generate the .env file

View File

@@ -0,0 +1,32 @@
#!/usr/bin/env bash
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
pushd "../../../../../" > /dev/null
source .set_env.sh
popd > /dev/null
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export HF_TOKEN=${HF_TOKEN}
export host_ip=${ip_address}
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export INDEX_NAME="rag-redis"
export NUM_CARDS=1
export VLLM_SKIP_WARMUP=true
export LOGFLAG=True
export http_proxy=${http_proxy}
export https_proxy=${https_proxy}
export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-gaudi-backend-server,chatqna-gaudi-ui-server,chatqna-gaudi-nginx-server"
export LLM_ENDPOINT_PORT=8010
export LLM_SERVER_PORT=9001
export CHATQNA_BACKEND_PORT=8888
export CHATQNA_REDIS_VECTOR_PORT=6377
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006
export CHATQNA_FRONTEND_SERVICE_PORT=5175
export NGINX_PORT=80
export FAQGen_COMPONENT_NAME="OpeaFaqGenvLLM"
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"

123
ChatQnA/tests/README.md Normal file
View File

@@ -0,0 +1,123 @@
# ChatQnA E2E test scripts
## Set the required environment variable
```bash
export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token"
```
## Run test
On Intel Xeon with TGI:
```bash
bash test_compose_tgi_on_xeon.sh
```
On Intel Xeon with vLLM:
```bash
bash test_compose_on_xeon.sh
```
On Intel Xeon with MariaDB Vector:
```bash
bash test_compose_mariadb_on_xeon.sh
```
On Intel Xeon with Pinecone:
```bash
bash test_compose_pinecone_on_xeon.sh
```
On Intel Xeon with Milvus
```bash
bash test_compose_milvus_on_xeon.sh
```
On Intel Xeon with Qdrant
```bash
bash test_compose_qdrant_on_xeon.sh
```
On Intel Xeon without Rerank:
```bash
bash test_compose_without_rerank_on_xeon.sh
```
On Intel Gaudi with TGI:
```bash
bash test_compose_tgi_on_gaudi.sh
```
On Intel Gaudi with vLLM:
```bash
bash test_compose_on_gaudi.sh
```
On Intel Gaudi with Guardrails:
```bash
bash test_compose_guardrails_on_gaudi.sh
```
On Intel Gaudi without Rerank:
```bash
bash test_compose_without_rerank_on_gaudi.sh
```
On AMD ROCm with TGI:
```bash
bash test_compose_on_rocm.sh
```
On AMD ROCm with vLLM:
```bash
bash test_compose_vllm_on_rocm.sh
```
Test FAQ Generation On Intel Xeon with TGI:
```bash
bash test_compose_faqgen_tgi_on_xeon.sh
```
Test FAQ Generation On Intel Xeon with vLLM:
```bash
bash test_compose_faqgen_on_xeon.sh
```
Test FAQ Generation On Intel Gaudi with TGI:
```bash
bash test_compose_faqgen_tgi_on_gaudi.sh
```
Test FAQ Generation On Intel Gaudi with vLLM:
```bash
bash test_compose_faqgen_on_gaudi.sh
```
Test FAQ Generation On AMD ROCm with TGI:
```bash
bash test_compose_faqgen_on_rocm.sh
```
Test FAQ Generation On AMD ROCm with vLLM:
```bash
bash test_compose_faqgen_vllm_on_rocm.sh
```

View File

@@ -36,27 +36,7 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/hpu/gaudi
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export NUM_CARDS=1
export INDEX_NAME="rag-redis"
export host_ip=${ip_address}
export LLM_ENDPOINT_PORT=8010
export LLM_SERVER_PORT=9001
export CHATQNA_BACKEND_PORT=8888
export CHATQNA_REDIS_VECTOR_PORT=6377
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006
export CHATQNA_FRONTEND_SERVICE_PORT=5175
export NGINX_PORT=80
export FAQGen_COMPONENT_NAME="OpeaFaqGenvLLM"
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
export HF_TOKEN=${HF_TOKEN}
export VLLM_SKIP_WARMUP=true
export LOGFLAG=True
export http_proxy=${http_proxy}
export https_proxy=${https_proxy}
export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-gaudi-backend-server,chatqna-gaudi-ui-server,chatqna-gaudi-nginx-server"
source set_env_faqgen.sh
# Start Docker Containers
docker compose -f compose_faqgen.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -15,44 +15,7 @@ WORKPATH=$(dirname "$PWD")
LOG_PATH="$WORKPATH/tests"
ip_address=$(hostname -I | awk '{print $1}')
export HOST_IP=${ip_address}
export HOST_IP_EXTERNAL=${ip_address}
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
export CHATQNA_BACKEND_SERVICE_PORT=8888
export CHATQNA_FRONTEND_SERVICE_PORT=5173
export CHATQNA_LLM_FAQGEN_PORT=18011
export CHATQNA_NGINX_PORT=80
export CHATQNA_REDIS_DATAPREP_PORT=18103
export CHATQNA_REDIS_RETRIEVER_PORT=7000
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001
export CHATQNA_REDIS_VECTOR_PORT=6379
export CHATQNA_TEI_EMBEDDING_PORT=18090
export CHATQNA_TEI_RERANKING_PORT=18808
export CHATQNA_TGI_SERVICE_PORT=18008
export CHATQNA_BACKEND_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_BACKEND_SERVICE_PORT}/v1/chatqna"
export CHATQNA_BACKEND_SERVICE_IP=${HOST_IP}
export CHATQNA_DATAPREP_DELETE_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/delete"
export CHATQNA_DATAPREP_GET_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/get"
export CHATQNA_DATAPREP_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/ingest"
export CHATQNA_EMBEDDING_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_FRONTEND_SERVICE_IP=${HOST_IP}
export CHATQNA_LLM_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_LLM_ENDPOINT="http://${HOST_IP}:${CHATQNA_TGI_SERVICE_PORT}"
export CHATQNA_MEGA_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_REDIS_URL="redis://${HOST_IP}:${CHATQNA_REDIS_VECTOR_PORT}"
export CHATQNA_RERANK_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_RETRIEVER_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_TEI_EMBEDDING_ENDPOINT="http://${HOST_IP}:${CHATQNA_TEI_EMBEDDING_PORT}"
export CHATQNA_BACKEND_SERVICE_NAME=chatqna
export CHATQNA_INDEX_NAME="rag-redis"
export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi"
source $WORKPATH/docker_compose/amd/gpu/rocm/set_env_faqgen.sh
export PATH="~/miniconda3/bin:$PATH"

View File

@@ -37,26 +37,16 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export INDEX_NAME="rag-redis"
export host_ip=${ip_address}
export LLM_ENDPOINT_PORT=8010
export LLM_SERVER_PORT=9001
export CHATQNA_BACKEND_PORT=8888
export CHATQNA_REDIS_VECTOR_PORT=6377
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006
export CHATQNA_FRONTEND_SERVICE_PORT=5175
export NGINX_PORT=80
export FAQGen_COMPONENT_NAME="OpeaFaqGenvLLM"
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
export HF_TOKEN=${HF_TOKEN}
export VLLM_SKIP_WARMUP=true
export LOGFLAG=True
export http_proxy=${http_proxy}
export https_proxy=${https_proxy}
export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-xeon-backend-server,chatqna-xeon-ui-server,chatqna-xeon-nginx-server"
source set_env.sh
# Start Docker Containers
docker compose -f compose_faqgen.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -33,25 +33,8 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/hpu/gaudi
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export INDEX_NAME="rag-redis"
export host_ip=${ip_address}
export LLM_ENDPOINT_PORT=8010
export LLM_SERVER_PORT=9001
export CHATQNA_BACKEND_PORT=8888
export CHATQNA_REDIS_VECTOR_PORT=6377
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006
export CHATQNA_FRONTEND_SERVICE_PORT=5175
export NGINX_PORT=80
export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi"
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
export HF_TOKEN=${HF_TOKEN}
export LOGFLAG=True
export http_proxy=${http_proxy}
export https_proxy=${https_proxy}
export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-gaudi-backend-server,chatqna-gaudi-ui-server,chatqna-gaudi-nginx-server"
source set_env_faqgen.sh
# Start Docker Containers
docker compose -f compose_faqgen_tgi.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -37,25 +37,16 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export INDEX_NAME="rag-redis"
export host_ip=${ip_address}
export LLM_ENDPOINT_PORT=8010
export LLM_SERVER_PORT=9001
export CHATQNA_BACKEND_PORT=8888
export CHATQNA_REDIS_VECTOR_PORT=6377
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006
export CHATQNA_FRONTEND_SERVICE_PORT=5175
export NGINX_PORT=80
export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi"
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
export HF_TOKEN=${HF_TOKEN}
export LOGFLAG=True
export http_proxy=${http_proxy}
export https_proxy=${https_proxy}
export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-xeon-backend-server,chatqna-xeon-ui-server,chatqna-xeon-nginx-server"
source set_env.sh
# Start Docker Containers
docker compose -f compose_faqgen_tgi.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -14,41 +14,7 @@ WORKPATH=$(dirname "$PWD")
LOG_PATH="$WORKPATH/tests"
ip_address=$(hostname -I | awk '{print $1}')
export HOST_IP=${ip_address}
export HOST_IP_EXTERNAL=${ip_address}
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
export CHATQNA_BACKEND_SERVICE_PORT=8888
export CHATQNA_FRONTEND_SERVICE_PORT=5173
export CHATQNA_LLM_FAQGEN_PORT=18011
export CHATQNA_NGINX_PORT=80
export CHATQNA_REDIS_DATAPREP_PORT=18103
export CHATQNA_REDIS_RETRIEVER_PORT=7000
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001
export CHATQNA_REDIS_VECTOR_PORT=6379
export CHATQNA_TEI_EMBEDDING_PORT=18090
export CHATQNA_TEI_RERANKING_PORT=18808
export CHATQNA_VLLM_SERVICE_PORT=18008
export CHATQNA_BACKEND_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_BACKEND_SERVICE_PORT}/v1/chatqna"
export CHATQNA_BACKEND_SERVICE_IP=${HOST_IP_EXTERNAL}
export CHATQNA_DATAPREP_DELETE_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/delete"
export CHATQNA_DATAPREP_GET_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/get"
export CHATQNA_DATAPREP_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/ingest"
export CHATQNA_FRONTEND_SERVICE_IP=${HOST_IP}
export CHATQNA_MEGA_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_REDIS_URL="redis://${HOST_IP}:${CHATQNA_REDIS_VECTOR_PORT}"
export CHATQNA_TEI_EMBEDDING_ENDPOINT="http://${HOST_IP}:${CHATQNA_TEI_EMBEDDING_PORT}"
export LLM_ENDPOINT="http://${HOST_IP}:${CHATQNA_VLLM_SERVICE_PORT}"
export CHATQNA_BACKEND_SERVICE_NAME=chatqna
export CHATQNA_INDEX_NAME="rag-redis"
export CHATQNA_TYPE="CHATQNA_FAQGEN"
export FAQGen_COMPONENT_NAME="OpeaFaqGenvLLM"
source $WORKPATH/docker_compose/amd/gpu/rocm/set_env_faqgen_vllm.sh
function build_docker_images() {
opea_branch=${opea_branch:-"main"}

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -36,14 +36,8 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/hpu/gaudi
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export NUM_CARDS=1
export INDEX_NAME="rag-redis"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export host_ip=${ip_address}
export GURADRAILS_MODEL_ID="meta-llama/Meta-Llama-Guard-2-8B"
source set_env_faqgen.sh
# Start Docker Containers
docker compose -f compose_guardrails.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2025 MariaDB Foundation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -39,14 +39,8 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon
export MARIADB_DATABASE="vectordb"
export MARIADB_USER="chatqna"
export MARIADB_PASSWORD="test"
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export host_ip=${ip_address}
source set_env_mariadb.sh
# Start Docker Containers
docker compose -f compose_mariadb.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -39,11 +39,8 @@ function build_docker_images() {
}
function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon/
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export LOGFLAG=true
source set_env.sh
# Start Docker Containers
docker compose -f compose_milvus.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -36,16 +36,10 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/hpu/gaudi
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export NUM_CARDS=1
export INDEX_NAME="rag-redis"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export NON_INTERACTIVE=true
export host_ip=${ip_address}
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
export telemetry=yes
source set_env.sh
# Start Docker Containers
docker compose -f compose.yaml -f compose.telemetry.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -15,41 +15,7 @@ WORKPATH=$(dirname "$PWD")
LOG_PATH="$WORKPATH/tests"
ip_address=$(hostname -I | awk '{print $1}')
export HOST_IP=${ip_address}
export HOST_IP_EXTERNAL=${ip_address}
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
export CHATQNA_BACKEND_SERVICE_PORT=8888
export CHATQNA_FRONTEND_SERVICE_PORT=5173
export CHATQNA_NGINX_PORT=80
export CHATQNA_REDIS_DATAPREP_PORT=18103
export CHATQNA_REDIS_RETRIEVER_PORT=7000
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001
export CHATQNA_REDIS_VECTOR_PORT=6379
export CHATQNA_TEI_EMBEDDING_PORT=18090
export CHATQNA_TEI_RERANKING_PORT=18808
export CHATQNA_TGI_SERVICE_PORT=18008
export CHATQNA_BACKEND_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_BACKEND_SERVICE_PORT}/v1/chatqna"
export CHATQNA_BACKEND_SERVICE_IP=${HOST_IP}
export CHATQNA_DATAPREP_DELETE_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/delete"
export CHATQNA_DATAPREP_GET_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/get"
export CHATQNA_DATAPREP_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/ingest"
export CHATQNA_EMBEDDING_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_FRONTEND_SERVICE_IP=${HOST_IP}
export CHATQNA_LLM_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_MEGA_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_REDIS_URL="redis://${HOST_IP}:${CHATQNA_REDIS_VECTOR_PORT}"
export CHATQNA_RERANK_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_RETRIEVER_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_TEI_EMBEDDING_ENDPOINT="http://${HOST_IP}:${CHATQNA_TEI_EMBEDDING_PORT}"
export CHATQNA_BACKEND_SERVICE_NAME=chatqna
export CHATQNA_INDEX_NAME="rag-redis"
source $WORKPATH/docker_compose/amd/gpu/rocm/set_env.sh
export PATH="~/miniconda3/bin:$PATH"

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -40,15 +40,7 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export INDEX_NAME="rag-redis"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export host_ip=${ip_address}
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
source set_env.sh
# Start Docker Containers
docker compose -f compose.yaml -f compose.telemetry.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -41,14 +41,11 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon/
export no_proxy=${no_proxy},${ip_address}
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export PINECONE_API_KEY=${PINECONE_KEY_LANGCHAIN_TEST}
export PINECONE_INDEX_NAME="langchain-test"
export INDEX_NAME="langchain-test"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export LOGFLAG=true
source set_env.sh
# Start Docker Containers
docker compose -f compose_pinecone.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -40,11 +40,8 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export INDEX_NAME="rag-qdrant"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
source set_env.sh
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -32,15 +32,10 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/hpu/gaudi
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export NUM_CARDS=1
export INDEX_NAME="rag-redis"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
export NON_INTERACTIVE=true
export host_ip=${ip_address}
export telemetry=yes
source set_env.sh
# Start Docker Containers
docker compose -f compose_tgi.yaml -f compose_tgi.telemetry.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -33,14 +33,7 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export INDEX_NAME="rag-redis"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
source set_env.sh
# Start Docker Containers
docker compose -f compose_tgi.yaml -f compose_tgi.telemetry.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -14,42 +14,7 @@ WORKPATH=$(dirname "$PWD")
LOG_PATH="$WORKPATH/tests"
ip_address=$(hostname -I | awk '{print $1}')
export HOST_IP=${ip_address}
export HOST_IP_EXTERNAL=${ip_address}
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
export CHATQNA_BACKEND_SERVICE_PORT=8888
export CHATQNA_FRONTEND_SERVICE_PORT=5173
export CHATQNA_NGINX_PORT=80
export CHATQNA_REDIS_DATAPREP_PORT=18103
export CHATQNA_REDIS_RETRIEVER_PORT=7000
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001
export CHATQNA_REDIS_VECTOR_PORT=6379
export CHATQNA_TEI_EMBEDDING_PORT=18090
export CHATQNA_TEI_RERANKING_PORT=18808
export CHATQNA_VLLM_SERVICE_PORT=18008
export CHATQNA_BACKEND_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_BACKEND_SERVICE_PORT}/v1/chatqna"
export CHATQNA_BACKEND_SERVICE_IP=${HOST_IP_EXTERNAL}
export CHATQNA_DATAPREP_DELETE_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/delete"
export CHATQNA_DATAPREP_GET_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/get"
export CHATQNA_DATAPREP_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/ingest"
export CHATQNA_EMBEDDING_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_FRONTEND_SERVICE_IP=${HOST_IP}
export CHATQNA_LLM_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_MEGA_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_REDIS_URL="redis://${HOST_IP}:${CHATQNA_REDIS_VECTOR_PORT}"
export CHATQNA_RERANK_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_RETRIEVER_SERVICE_HOST_IP=${HOST_IP}
export CHATQNA_TEI_EMBEDDING_ENDPOINT="http://${HOST_IP}:${CHATQNA_TEI_EMBEDDING_PORT}"
export CHATQNA_BACKEND_SERVICE_NAME=chatqna
export CHATQNA_INDEX_NAME="rag-redis"
source $WORKPATH/docker_compose/amd/gpu/rocm/set_env_vllm.sh
function build_docker_images() {
opea_branch=${opea_branch:-"main"}

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -36,11 +36,8 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/hpu/gaudi
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export NUM_CARDS=1
export INDEX_NAME="rag-redis"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export NON_INTERACTIVE=true
source set_env.sh
# Start Docker Containers
docker compose -f compose_without_rerank.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -2,7 +2,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -41,10 +41,7 @@ function build_docker_images() {
function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export INDEX_NAME="rag-redis"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
source set_env.sh
# Start Docker Containers
docker compose -f compose_without_rerank.yaml up -d > ${LOG_PATH}/start_services_with_compose.log

View File

@@ -52,18 +52,29 @@ This uses the default vLLM-based deployment profile (`codegen-xeon-vllm`).
```bash
# Replace with your host's external IP address (do not use localhost or 127.0.0.1)
export HOST_IP="your_external_ip_address"
export host_ip="your_external_ip_address"
# Replace with your Hugging Face Hub API token
export HUGGINGFACEHUB_API_TOKEN="your_huggingface_token"
# Optional: Configure proxy if needed
# export http_proxy="your_http_proxy"
# export https_proxy="your_https_proxy"
# export no_proxy="localhost,127.0.0.1,${HOST_IP}" # Add other hosts if necessary
# export no_proxy="localhost,127.0.0.1,${host_ip}" # Add other hosts if necessary
source ../../../set_env.sh
```
_Note: The compose file might read additional variables from a `.env` file or expect them defined elsewhere. Ensure all required variables like ports (`LLM_SERVICE_PORT`, `MEGA_SERVICE_PORT`, etc.) are set if not using defaults from the compose file._
_Note: The compose file might read additional variables from set_env.sh. Ensure all required variables like ports (`LLM_SERVICE_PORT`, `MEGA_SERVICE_PORT`, etc.) are set if not using defaults from the compose file._
like
```
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-32B-Instruct"
```
can be changed to small model if needed
```
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
```
2. **Start Services (vLLM Profile):**
@@ -91,7 +102,7 @@ The `compose.yaml` file uses Docker Compose profiles to select the LLM serving b
- **Services Deployed:** `codegen-tgi-server`, `codegen-llm-server`, `codegen-tei-embedding-server`, `codegen-retriever-server`, `redis-vector-db`, `codegen-dataprep-server`, `codegen-backend-server`, `codegen-gradio-ui-server`.
- **To Run:**
```bash
# Ensure environment variables (HOST_IP, HUGGINGFACEHUB_API_TOKEN) are set
# Ensure environment variables (host_ip, HUGGINGFACEHUB_API_TOKEN) are set
docker compose --profile codegen-xeon-tgi up -d
```
@@ -103,14 +114,14 @@ Key parameters are configured via environment variables set before running `dock
| Environment Variable | Description | Default (Set Externally) |
| :-------------------------------------- | :------------------------------------------------------------------------------------------------------------------ | :----------------------------------------------------------------------------------------------- |
| `HOST_IP` | External IP address of the host machine. **Required.** | `your_external_ip_address` |
| `host_ip` | External IP address of the host machine. **Required.** | `your_external_ip_address` |
| `HUGGINGFACEHUB_API_TOKEN` | Your Hugging Face Hub token for model access. **Required.** | `your_huggingface_token` |
| `LLM_MODEL_ID` | Hugging Face model ID for the CodeGen LLM (used by TGI/vLLM service). Configured within `compose.yaml` environment. | `Qwen/Qwen2.5-Coder-7B-Instruct` |
| `EMBEDDING_MODEL_ID` | Hugging Face model ID for the embedding model (used by TEI service). Configured within `compose.yaml` environment. | `BAAI/bge-base-en-v1.5` |
| `LLM_ENDPOINT` | Internal URL for the LLM serving endpoint (used by `codegen-llm-server`). Configured in `compose.yaml`. | `http://codegen-tgi-server:80/generate` or `http://codegen-vllm-server:8000/v1/chat/completions` |
| `TEI_EMBEDDING_ENDPOINT` | Internal URL for the Embedding service. Configured in `compose.yaml`. | `http://codegen-tei-embedding-server:80/embed` |
| `DATAPREP_ENDPOINT` | Internal URL for the Data Preparation service. Configured in `compose.yaml`. | `http://codegen-dataprep-server:80/dataprep` |
| `BACKEND_SERVICE_ENDPOINT` | External URL for the CodeGen Gateway (MegaService). Derived from `HOST_IP` and port `7778`. | `http://${HOST_IP}:7778/v1/codegen` |
| `BACKEND_SERVICE_ENDPOINT` | External URL for the CodeGen Gateway (MegaService). Derived from `host_ip` and port `7778`. | `http://${host_ip}:7778/v1/codegen` |
| `*_PORT` (Internal) | Internal container ports (e.g., `80`, `6379`). Defined in `compose.yaml`. | N/A |
| `http_proxy` / `https_proxy`/`no_proxy` | Network proxy settings (if required). | `""` |
@@ -150,23 +161,23 @@ Check logs for specific services: `docker compose logs <service_name>`
### Run Validation Script/Commands
Use `curl` commands to test the main service endpoints. Ensure `HOST_IP` is correctly set in your environment.
Use `curl` commands to test the main service endpoints. Ensure `host_ip` is correctly set in your environment.
1. **Validate LLM Serving Endpoint (Example for vLLM on default port 8000 internally, exposed differently):**
1. **Validate LLM Serving Endpoint (Example for vLLM on default port 9000 internally, exposed differently):**
```bash
# This command structure targets the OpenAI-compatible vLLM endpoint
curl http://${HOST_IP}:8000/v1/chat/completions \
curl http://${host_ip}:9000/v1/chat/completions \
-X POST \
-H 'Content-Type: application/json' \
-d '{"model": "Qwen/Qwen2.5-Coder-7B-Instruct", "messages": [{"role": "user", "content": "Implement a basic Python class"}], "max_tokens":32}'
-d '{"model": "Qwen/Qwen2.5-Coder-32B-Instruct", "messages": [{"role": "user", "content": "Implement a basic Python class"}], "max_tokens":32}'
```
- **Expected Output:** A JSON response with generated code in `choices[0].message.content`.
2. **Validate CodeGen Gateway (MegaService on default port 7778):**
```bash
curl http://${HOST_IP}:7778/v1/codegen \
curl http://${host_ip}:7778/v1/codegen \
-H "Content-Type: application/json" \
-d '{"messages": "Write a Python function that adds two numbers."}'
```
@@ -179,8 +190,8 @@ Multiple UI options can be configured via the `compose.yaml`.
### Gradio UI (Default)
Access the default Gradio UI by navigating to:
`http://{HOST_IP}:8080`
_(Port `8080` is the default host mapping for `codegen-gradio-ui-server`)_
`http://{host_ip}:5173`
_(Port `5173` is the default host mapping for `codegen-gradio-ui-server`)_
![Gradio UI - Code Generation](../../../../assets/img/codegen_gradio_ui_main.png)
![Gradio UI - Resource Management](../../../../assets/img/codegen_gradio_ui_dataprep.png)
@@ -189,7 +200,7 @@ _(Port `8080` is the default host mapping for `codegen-gradio-ui-server`)_
1. Modify `compose.yaml`: Comment out the `codegen-gradio-ui-server` service and uncomment/add the `codegen-xeon-ui-server` (Svelte) service definition, ensuring the port mapping is correct (e.g., `"- 5173:5173"`).
2. Restart Docker Compose: `docker compose --profile <profile_name> up -d`
3. Access: `http://{HOST_IP}:5173` (or the host port you mapped).
3. Access: `http://{host_ip}:5173` (or the host port you mapped).
![Svelte UI Init](../../../../assets/img/codeGen_ui_init.jpg)
@@ -197,7 +208,7 @@ _(Port `8080` is the default host mapping for `codegen-gradio-ui-server`)_
1. Modify `compose.yaml`: Comment out the default UI service and uncomment/add the `codegen-xeon-react-ui-server` definition, ensuring correct port mapping (e.g., `"- 5174:80"`).
2. Restart Docker Compose: `docker compose --profile <profile_name> up -d`
3. Access: `http://{HOST_IP}:5174` (or the host port you mapped).
3. Access: `http://{host_ip}:5174` (or the host port you mapped).
![React UI](../../../../assets/img/codegen_react.png)
@@ -207,7 +218,7 @@ Users can interact with the backend service using the `Neural Copilot` VS Code e
1. **Install:** Find and install `Neural Copilot` from the VS Code Marketplace.
![Install Copilot](../../../../assets/img/codegen_copilot.png)
2. **Configure:** Set the "Service URL" in the extension settings to your CodeGen backend endpoint: `http://${HOST_IP}:7778/v1/codegen` (use the correct port if changed).
2. **Configure:** Set the "Service URL" in the extension settings to your CodeGen backend endpoint: `http://${host_ip}:7778/v1/codegen` (use the correct port if changed).
![Configure Endpoint](../../../../assets/img/codegen_endpoint.png)
3. **Usage:**
- **Inline Suggestion:** Type a comment describing the code you want (e.g., `# Python function to read a file`) and wait for suggestions.
@@ -218,7 +229,7 @@ Users can interact with the backend service using the `Neural Copilot` VS Code e
## Troubleshooting
- **Model Download Issues:** Check `HUGGINGFACEHUB_API_TOKEN`. Ensure internet connectivity or correct proxy settings. Check logs of `tgi-service`/`vllm-service` and `tei-embedding-server`. Gated models need prior Hugging Face access.
- **Connection Errors:** Verify `HOST_IP` is correct and accessible. Check `docker ps` for port mappings. Ensure `no_proxy` includes `HOST_IP` if using a proxy. Check logs of the service failing to connect (e.g., `codegen-backend-server` logs if it can't reach `codegen-llm-server`).
- **Connection Errors:** Verify `host_ip` is correct and accessible. Check `docker ps` for port mappings. Ensure `no_proxy` includes `host_ip` if using a proxy. Check logs of the service failing to connect (e.g., `codegen-backend-server` logs if it can't reach `codegen-llm-server`).
- **"Container name is in use"**: Stop existing containers (`docker compose down`) or change `container_name` in `compose.yaml`.
- **Resource Issues:** CodeGen models can be memory-intensive. Monitor host RAM usage. Increase Docker resources if needed.

View File

@@ -53,18 +53,29 @@ This uses the default vLLM-based deployment profile (`codegen-gaudi-vllm`).
```bash
# Replace with your host's external IP address (do not use localhost or 127.0.0.1)
export HOST_IP="your_external_ip_address"
export host_ip="your_external_ip_address"
# Replace with your Hugging Face Hub API token
export HUGGINGFACEHUB_API_TOKEN="your_huggingface_token"
# Optional: Configure proxy if needed
# export http_proxy="your_http_proxy"
# export https_proxy="your_https_proxy"
# export no_proxy="localhost,127.0.0.1,${HOST_IP}" # Add other hosts if necessary
# export no_proxy="localhost,127.0.0.1,${host_ip}" # Add other hosts if necessary
source ../../../set_env.sh
```
_Note: Ensure all required variables like ports (`LLM_SERVICE_PORT`, `MEGA_SERVICE_PORT`, etc.) are set if not using defaults from the compose file._
_Note: The compose file might read additional variables from set_env.sh. Ensure all required variables like ports (`LLM_SERVICE_PORT`, `MEGA_SERVICE_PORT`, etc.) are set if not using defaults from the compose file._
like
```
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-32B-Instruct"
```
can be changed to small model if needed
```
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
```
2. **Start Services (vLLM Profile):**
@@ -94,7 +105,7 @@ The `compose.yaml` file uses Docker Compose profiles to select the LLM serving b
- **Other Services:** Same CPU-based services as the vLLM profile.
- **To Run:**
```bash
# Ensure environment variables (HOST_IP, HUGGINGFACEHUB_API_TOKEN) are set
# Ensure environment variables (host_ip, HUGGINGFACEHUB_API_TOKEN) are set
docker compose --profile codegen-gaudi-tgi up -d
```
@@ -106,14 +117,14 @@ Key parameters are configured via environment variables set before running `dock
| Environment Variable | Description | Default (Set Externally) |
| :-------------------------------------- | :------------------------------------------------------------------------------------------------------------------ | :----------------------------------------------------------------------------------------------- |
| `HOST_IP` | External IP address of the host machine. **Required.** | `your_external_ip_address` |
| `host_ip` | External IP address of the host machine. **Required.** | `your_external_ip_address` |
| `HUGGINGFACEHUB_API_TOKEN` | Your Hugging Face Hub token for model access. **Required.** | `your_huggingface_token` |
| `LLM_MODEL_ID` | Hugging Face model ID for the CodeGen LLM (used by TGI/vLLM service). Configured within `compose.yaml` environment. | `Qwen/Qwen2.5-Coder-7B-Instruct` |
| `LLM_MODEL_ID` | Hugging Face model ID for the CodeGen LLM (used by TGI/vLLM service). Configured within `compose.yaml` environment. | `Qwen/Qwen2.5-Coder-32B-Instruct` |
| `EMBEDDING_MODEL_ID` | Hugging Face model ID for the embedding model (used by TEI service). Configured within `compose.yaml` environment. | `BAAI/bge-base-en-v1.5` |
| `LLM_ENDPOINT` | Internal URL for the LLM serving endpoint (used by `codegen-llm-server`). Configured in `compose.yaml`. | `http://codegen-tgi-server:80/generate` or `http://codegen-vllm-server:8000/v1/chat/completions` |
| `TEI_EMBEDDING_ENDPOINT` | Internal URL for the Embedding service. Configured in `compose.yaml`. | `http://codegen-tei-embedding-server:80/embed` |
| `DATAPREP_ENDPOINT` | Internal URL for the Data Preparation service. Configured in `compose.yaml`. | `http://codegen-dataprep-server:80/dataprep` |
| `BACKEND_SERVICE_ENDPOINT` | External URL for the CodeGen Gateway (MegaService). Derived from `HOST_IP` and port `7778`. | `http://${HOST_IP}:7778/v1/codegen` |
| `BACKEND_SERVICE_ENDPOINT` | External URL for the CodeGen Gateway (MegaService). Derived from `host_ip` and port `7778`. | `http://${host_ip}:7778/v1/codegen` |
| `*_PORT` (Internal) | Internal container ports (e.g., `80`, `6379`). Defined in `compose.yaml`. | N/A |
| `http_proxy` / `https_proxy`/`no_proxy` | Network proxy settings (if required). | `""` |
@@ -170,21 +181,21 @@ Check logs: `docker compose logs <service_name>`. Pay attention to `vllm-gaudi-s
### Run Validation Script/Commands
Use `curl` commands targeting the main service endpoints. Ensure `HOST_IP` is correctly set.
Use `curl` commands targeting the main service endpoints. Ensure `host_ip` is correctly set.
1. **Validate LLM Serving Endpoint (Example for vLLM on default port 8000 internally, exposed differently):**
1. **Validate LLM Serving Endpoint (Example for vLLM on default port 9000 internally, exposed differently):**
```bash
# This command structure targets the OpenAI-compatible vLLM endpoint
curl http://${HOST_IP}:8000/v1/chat/completions \
curl http://${host_ip}:9000/v1/chat/completions \
-X POST \
-H 'Content-Type: application/json' \
-d '{"model": "Qwen/Qwen2.5-Coder-7B-Instruct", "messages": [{"role": "user", "content": "Implement a basic Python class"}], "max_tokens":32}'
-d '{"model": "Qwen/Qwen2.5-Coder-32B-Instruct", "messages": [{"role": "user", "content": "Implement a basic Python class"}], "max_tokens":32}'
```
2. **Validate CodeGen Gateway (MegaService, default host port 7778):**
```bash
curl http://${HOST_IP}:7778/v1/codegen \
curl http://${host_ip}:7778/v1/codegen \
-H "Content-Type: application/json" \
-d '{"messages": "Implement a sorting algorithm in Python."}'
```
@@ -197,8 +208,8 @@ UI options are similar to the Xeon deployment.
### Gradio UI (Default)
Access the default Gradio UI:
`http://{HOST_IP}:8080`
_(Port `8080` is the default host mapping)_
`http://{host_ip}:5173`
_(Port `5173` is the default host mapping)_
![Gradio UI](../../../../assets/img/codegen_gradio_ui_main.png)
@@ -206,17 +217,17 @@ _(Port `8080` is the default host mapping)_
1. Modify `compose.yaml`: Swap Gradio service for Svelte (`codegen-gaudi-ui-server`), check port map (e.g., `5173:5173`).
2. Restart: `docker compose --profile <profile_name> up -d`
3. Access: `http://{HOST_IP}:5173`
3. Access: `http://{host_ip}:5173`
### React UI (Optional)
1. Modify `compose.yaml`: Swap Gradio service for React (`codegen-gaudi-react-ui-server`), check port map (e.g., `5174:80`).
2. Restart: `docker compose --profile <profile_name> up -d`
3. Access: `http://{HOST_IP}:5174`
3. Access: `http://{host_ip}:5174`
### VS Code Extension (Optional)
Use the `Neural Copilot` extension configured with the CodeGen backend URL: `http://${HOST_IP}:7778/v1/codegen`. (See Xeon README for detailed setup screenshots).
Use the `Neural Copilot` extension configured with the CodeGen backend URL: `http://${host_ip}:7778/v1/codegen`. (See Xeon README for detailed setup screenshots).
## Troubleshooting
@@ -226,7 +237,7 @@ Use the `Neural Copilot` extension configured with the CodeGen backend URL: `htt
- Verify `runtime: habana` and volume mounts in `compose.yaml`.
- Gaudi initialization can take significant time and memory. Monitor resource usage.
- **Model Download Issues:** Check `HUGGINGFACEHUB_API_TOKEN`, internet access, proxy settings. Check LLM service logs.
- **Connection Errors:** Verify `HOST_IP`, ports, and proxy settings. Use `docker ps` and check service logs.
- **Connection Errors:** Verify `host_ip`, ports, and proxy settings. Use `docker ps` and check service logs.
## Stopping the Application

View File

@@ -22,12 +22,11 @@ This Code Translation use case demonstrates Text Generation Inference across mul
The table below lists currently available deployment options. They outline in detail the implementation of this example on selected hardware.
| Category | Deployment Option | Description |
| ---------------------- | -------------------- | ----------------------------------------------------------------- |
| On-premise Deployments | Docker compose | [CodeTrans deployment on Xeon](./docker_compose/intel/cpu/xeon) |
| | | [CodeTrans deployment on Gaudi](./docker_compose/intel/hpu/gaudi) |
| | | [CodeTrans deployment on AMD ROCm](./docker_compose/amd/gpu/rocm) |
| | Kubernetes | [Helm Charts](./kubernetes/helm) |
| | | [GMC](./kubernetes/gmc) |
| | Azure | Work-in-progress |
| | Intel Tiber AI Cloud | Work-in-progress |
| Category | Deployment Option | Description |
| ---------------------- | -------------------- | --------------------------------------------------------------------------- |
| On-premise Deployments | Docker compose | [CodeTrans deployment on Xeon](./docker_compose/intel/cpu/xeon/README.md) |
| | | [CodeTrans deployment on Gaudi](./docker_compose/intel/hpu/gaudi/README.md) |
| | | [CodeTrans deployment on AMD ROCm](./docker_compose/amd/gpu/rocm/README.md) |
| | Kubernetes | [Helm Charts](./kubernetes/helm/README.md) |
| | Azure | Work-in-progress |
| | Intel Tiber AI Cloud | Work-in-progress |

View File

@@ -44,3 +44,38 @@ Some HuggingFace resources, such as some models, are only accessible if the deve
2. (Docker only) If all microservices work well, check the port ${host_ip}:7777, the port may be allocated by other users, you can modify the `compose.yaml`.
3. (Docker only) If you get errors like "The container name is in use", change container name in `compose.yaml`.
## Monitoring OPEA Services with Prometheus and Grafana Dashboard
OPEA microservice deployment can easily be monitored through Grafana dashboards using data collected via Prometheus. Follow the [README](https://github.com/opea-project/GenAIEval/blob/main/evals/benchmark/grafana/README.md) to setup Prometheus and Grafana servers and import dashboards to monitor the OPEA services.
![example dashboards](./assets/img/example_dashboards.png)
![tgi dashboard](./assets/img/tgi_dashboard.png)
## Tracing with OpenTelemetry and Jaeger
> NOTE: This feature is disabled by default. Please use the compose.telemetry.yaml file to enable this feature.
OPEA microservice and [TGI](https://huggingface.co/docs/text-generation-inference/en/index)/[TEI](https://huggingface.co/docs/text-embeddings-inference/en/index) serving can easily be traced through [Jaeger](https://www.jaegertracing.io/) dashboards in conjunction with [OpenTelemetry](https://opentelemetry.io/) Tracing feature. Follow the [README](https://github.com/opea-project/GenAIComps/tree/main/comps/cores/telemetry#tracing) to trace additional functions if needed.
Tracing data is exported to http://{EXTERNAL_IP}:4318/v1/traces via Jaeger.
Users could also get the external IP via below command.
```bash
ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+'
```
Access the Jaeger dashboard UI at http://{EXTERNAL_IP}:16686
For TGI serving on Gaudi, users could see different services like opea, TEI and TGI.
![Screenshot from 2024-12-27 11-58-18](https://github.com/user-attachments/assets/6126fa70-e830-4780-bd3f-83cb6eff064e)
Here is a screenshot for one tracing of TGI serving request.
![Screenshot from 2024-12-27 11-26-25](https://github.com/user-attachments/assets/3a7c51c6-f422-41eb-8e82-c3df52cd48b8)
There are also OPEA related tracings. Users could understand the time breakdown of each service request by looking into each opea:schedule operation.
![image](https://github.com/user-attachments/assets/6137068b-b374-4ff8-b345-993343c0c25f)
There could be asynchronous function such as `llm/MicroService_asyn_generate` and user needs to check the trace of the asynchronous function in another operation like
opea:llm_generate_stream.
![image](https://github.com/user-attachments/assets/a973d283-198f-4ce2-a7eb-58515b77503e)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 120 KiB

After

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 414 KiB

View File

@@ -2,7 +2,11 @@
DocRetriever are the most widely adopted use case for leveraging the different methodologies to match user query against a set of free-text records. DocRetriever is essential to RAG system, which bridges the knowledge gap by dynamically fetching relevant information from external sources, ensuring that responses generated remain factual and current. The core of this architecture are vector databases, which are instrumental in enabling efficient and semantic retrieval of information. These databases store data as vectors, allowing RAG to swiftly access the most pertinent documents or data points based on semantic similarity.
## 1. Build Images for necessary microservices. (Optional after docker image release)
\_Note:
As the related docker images were published to Docker Hub, you can ignore the below step 1 and 2 quick start from step 3.
## 1. Build Images for necessary microservices. (Optional)
- Embedding TEI Image
@@ -30,7 +34,7 @@ DocRetriever are the most widely adopted use case for leveraging the different m
docker build -t opea/dataprep:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/dataprep/src/Dockerfile .
```
## 2. Build Images for MegaService
## 2. Build Images for MegaService (Optional)
```bash
cd ..
@@ -44,6 +48,19 @@ docker build --no-cache -t opea/doc-index-retriever:latest --build-arg https_pro
```bash
export host_ip="YOUR IP ADDR"
export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token}
```
Set environment variables by
```
cd GenAIExamples/DocIndexRetriever/docker_compose/intel/cpu/xeon
source set_env.sh
```
Note: set_env.sh will help to set all required variables. Please ensure all required variables like ports (LLM_SERVICE_PORT, MEGA_SERVICE_PORT, etc.) are set if not using defaults from the compose file.
or Set environment variables manually
```
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export TEI_EMBEDDING_ENDPOINT="http://${host_ip}:6006"

View File

@@ -30,66 +30,38 @@ The architecture of the SearchQnA Application is illustrated below:
The SearchQnA example is implemented using the component-level microservices defined in [GenAIComps](https://github.com/opea-project/GenAIComps). The flow chart below shows the information flow between different microservices for this example.
```mermaid
---
config:
flowchart:
nodeSpacing: 400
rankSpacing: 100
curve: linear
themeVariables:
fontSize: 50px
---
%% Orange are microservices from third parties that are 'wrapped' as OPEA components.
flowchart LR
%% Colors %%
classDef blue fill:#ADD8E6,stroke:#ADD8E6,stroke-width:2px,fill-opacity:0.5
classDef orange fill:#FBAA60,stroke:#ADD8E6,stroke-width:2px,fill-opacity:0.5
classDef orchid fill:#C26DBC,stroke:#ADD8E6,stroke-width:2px,fill-opacity:0.5
classDef invisible fill:transparent,stroke:transparent;
style SearchQnA-MegaService stroke:#000000
User["User"] --> Nginx["Nginx<br>searchqna-nginx-server"]
Nginx --> UI["UI<br>searchqna-ui-server"] & Gateway & User
UI --> Nginx
Gateway --> Nginx & Embedding
Embedding --> Retriever
Retriever --> Reranker
Reranker --> LLM
LLM --> Gateway
LLM <-.-> TGI_Service["LLM<br>tgi-service"]
Embedding <-.-> TEI_Embedding["TEI Embedding<br>tei-embedding-server"]
Reranker <-.-> TEI_Reranker["TEI Reranker<br>tei-reranking-server"]
%% Subgraphs %%
subgraph SearchQnA-MegaService["SearchQnA MegaService "]
direction LR
EM([Embedding MicroService]):::blue
RET([Web Retrieval MicroService]):::blue
RER([Rerank MicroService]):::blue
LLM([LLM MicroService]):::blue
end
subgraph UserInterface[" User Interface "]
direction LR
a([User Input Query]):::orchid
UI([UI server<br>]):::orchid
end
TEI_RER{{Reranking service<br>}}
TEI_EM{{Embedding service <br>}}
VDB{{Vector DB<br><br>}}
R_RET{{Web Retriever service <br>}}
LLM_gen{{LLM Service <br>}}
GW([SearchQnA GateWay<br>]):::orange
%% Questions interaction
direction LR
a[User Input Query] --> UI
UI --> GW
GW <==> SearchQnA-MegaService
EM ==> RET
RET ==> RER
RER ==> LLM
%% Embedding service flow
direction LR
EM <-.-> TEI_EM
RET <-.-> R_RET
RER <-.-> TEI_RER
LLM <-.-> LLM_gen
TEI_Embedding:::ext
TEI_Reranker:::ext
TGI_Service:::ext
subgraph MegaService["MegaService"]
LLM["LLM<br>llm-textgen-server"]
Reranker["Reranker<br>reranking-tei-server"]
Retriever["Retriever<br>web-retriever-server"]
Embedding["Embedding<br>embedding-server"]
end
subgraph Backend["searchqna-backend-server"]
direction TB
%% Vector DB interaction
R_RET <-.-> VDB
MegaService
Gateway["Backend Endpoint"]
end
classDef default fill:#fff,stroke:#000,color:#000
classDef ext fill:#f9cb9c,stroke:#000,color:#000
style MegaService margin-top:20px,margin-bottom:20px
```
This SearchQnA use case performs Search-augmented Question Answering across multiple platforms. Currently, we provide the example for Intel® Gaudi® 2 and Intel® Xeon® Scalable Processors, and we invite contributions from other hardware vendors to expand OPEA ecosystem.
@@ -98,8 +70,8 @@ This SearchQnA use case performs Search-augmented Question Answering across mult
The table below lists the available deployment options and their implementation details for different hardware platforms.
| Category | Deployment Option | Description |
| ---------------------- | ---------------------- | -------------------------------------------------------------- |
| On-premise Deployments | Docker Compose (Xeon) | [DocSum deployment on Xeon](./docker_compose/intel/cpu/xeon) |
| | Docker Compose (Gaudi) | [DocSum deployment on Gaudi](./docker_compose/intel/hpu/gaudi) |
| | Docker Compose (ROCm) | [DocSum deployment on AMD ROCm](./docker_compose/amd/gpu/rocm) |
| Category | Deployment Option | Description |
| ---------------------- | ---------------------- | --------------------------------------------------------------------------- |
| On-premise Deployments | Docker Compose (Xeon) | [SearchQnA deployment on Xeon](./docker_compose/intel/cpu/xeon/README.md) |
| | Docker Compose (Gaudi) | [SearchQnA deployment on Gaudi](./docker_compose/intel/hpu/gaudi/README.md) |
| | Docker Compose (ROCm) | [SearchQnA deployment on AMD ROCm](./docker_compose/amd/gpu/rocm/README.md) |

View File

@@ -170,7 +170,25 @@ services:
no_proxy: ${no_proxy}
https_proxy: ${https_proxy}
http_proxy: ${http_proxy}
BACKEND_BASE_URL: ${SEARCH_BACKEND_SERVICE_ENDPOINT}
ipc: host
restart: always
search-nginx-server:
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
container_name: search-nginx-server
depends_on:
- search-backend-server
- search-ui-server
ports:
- "${NGINX_PORT:-80}:80"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- FRONTEND_SERVICE_IP=search-ui-server
- FRONTEND_SERVICE_PORT=5173
- BACKEND_SERVICE_NAME=search
- BACKEND_SERVICE_IP=search-backend-server
- BACKEND_SERVICE_PORT=8888
ipc: host
restart: always

View File

@@ -176,10 +176,27 @@ services:
no_proxy: ${no_proxy}
https_proxy: ${https_proxy}
http_proxy: ${http_proxy}
BACKEND_BASE_URL: ${SEARCH_BACKEND_SERVICE_ENDPOINT}
ipc: host
restart: always
search-nginx-server:
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
container_name: search-nginx-server
depends_on:
- search-backend-server
- search-ui-server
ports:
- "${NGINX_PORT:-80}:80"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- FRONTEND_SERVICE_IP=search-ui-server
- FRONTEND_SERVICE_PORT=5173
- BACKEND_SERVICE_NAME=search
- BACKEND_SERVICE_IP=search-backend-server
- BACKEND_SERVICE_PORT=8888
ipc: host
restart: always
networks:
default:
driver: bridge

View File

@@ -168,10 +168,27 @@ services:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- BACKEND_BASE_URL=${BACKEND_SERVICE_ENDPOINT}
ipc: host
restart: always
searchqna-xeon-nginx-server:
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
container_name: searchqna-xeon-nginx-server
depends_on:
- searchqna-xeon-backend-server
- searchqna-xeon-ui-server
ports:
- "${NGINX_PORT:-80}:80"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- FRONTEND_SERVICE_IP=searchqna-xeon-ui-server
- FRONTEND_SERVICE_PORT=5173
- BACKEND_SERVICE_NAME=searchqna
- BACKEND_SERVICE_IP=searchqna-xeon-backend-server
- BACKEND_SERVICE_PORT=8888
ipc: host
restart: always
networks:
default:

View File

@@ -187,7 +187,25 @@ services:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- BACKEND_BASE_URL=${BACKEND_SERVICE_ENDPOINT}
ipc: host
restart: always
searchqna-gaudi-nginx-server:
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
container_name: searchqna-gaudi-nginx-server
depends_on:
- searchqna-gaudi-backend-server
- searchqna-gaudi-ui-server
ports:
- "${NGINX_PORT:-80}:80"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- FRONTEND_SERVICE_IP=searchqna-gaudi-ui-server
- FRONTEND_SERVICE_PORT=5173
- BACKEND_SERVICE_NAME=searchqna
- BACKEND_SERVICE_IP=searchqna-gaudi-backend-server
- BACKEND_SERVICE_PORT=8888
ipc: host
restart: always

View File

@@ -46,3 +46,9 @@ services:
context: GenAIComps
dockerfile: comps/third_parties/vllm/src/Dockerfile.amd_gpu
image: ${REGISTRY:-opea}/vllm-rocm:${TAG:-latest}
nginx:
build:
context: GenAIComps
dockerfile: comps/third_parties/nginx/src/Dockerfile
extends: searchqna
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}

View File

@@ -32,7 +32,7 @@ function build_docker_images() {
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="searchqna searchqna-ui embedding web-retriever reranking llm-textgen"
service_list="searchqna searchqna-ui embedding web-retriever reranking llm-textgen nginx"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.6

View File

@@ -20,7 +20,7 @@ function build_docker_images() {
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="searchqna searchqna-ui embedding web-retriever reranking llm-textgen"
service_list="searchqna searchqna-ui embedding web-retriever reranking llm-textgen nginx"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.6

View File

@@ -32,7 +32,7 @@ function build_docker_images() {
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="searchqna searchqna-ui embedding web-retriever reranking llm-textgen"
service_list="searchqna searchqna-ui embedding web-retriever reranking llm-textgen nginx"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.6

View File

@@ -20,7 +20,7 @@ function build_docker_images() {
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="searchqna searchqna-ui embedding web-retriever reranking llm-textgen vllm-rocm"
service_list="searchqna searchqna-ui embedding web-retriever reranking llm-textgen vllm-rocm nginx"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5

View File

@@ -1 +1 @@
BACKEND_BASE_URL = 'http://backend_address:3008/v1/searchqna'
BACKEND_BASE_URL = '/v1/searchqna'

View File

@@ -38,7 +38,7 @@ export default defineConfig({
/* Maximum time each action such as `click()` can take. Defaults to 0 (no limit). */
actionTimeout: 0,
/* Base URL to use in actions like `await page.goto('/')`. */
baseURL: "http://localhost:5173",
baseURL: "http://localhost:80",
/* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */
trace: "on-first-retry",