Compare commits
22 Commits
yao531441/
...
Fix-sec
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
52a6b22f3f | ||
|
|
c8259d47f9 | ||
|
|
b980d6a34c | ||
|
|
2f9959f0a5 | ||
|
|
51b9d3b975 | ||
|
|
d9e7264a81 | ||
|
|
26cb531766 | ||
|
|
e9153b82bb | ||
|
|
0890e94a21 | ||
|
|
581e954a8d | ||
|
|
8a9f3f4351 | ||
|
|
09d93ecce6 | ||
|
|
ed918bcef1 | ||
|
|
1c0b1731c5 | ||
|
|
22174e68a5 | ||
|
|
c8abbc4958 | ||
|
|
7ee6f3657c | ||
|
|
11b04b38db | ||
|
|
7f55b5a100 | ||
|
|
bb9ec6e5d2 | ||
|
|
3fb59a9769 | ||
|
|
410df80925 |
3
.github/workflows/_gmc-e2e.yml
vendored
3
.github/workflows/_gmc-e2e.yml
vendored
@@ -3,7 +3,8 @@
|
||||
|
||||
# This workflow will only test GMC pipeline and will not install GMC any more
|
||||
name: Single GMC E2e Test For CD Workflow Call
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
|
||||
3
.github/workflows/_gmc-workflow.yml
vendored
3
.github/workflows/_gmc-workflow.yml
vendored
@@ -2,7 +2,8 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
name: Build and deploy GMC system on call and manual
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
||||
10
.github/workflows/_run-docker-compose.yml
vendored
10
.github/workflows/_run-docker-compose.yml
vendored
@@ -204,6 +204,10 @@ jobs:
|
||||
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid && sleep 1s; fi
|
||||
|
||||
echo "Cleaning up images ..."
|
||||
df -h
|
||||
sleep 1
|
||||
docker system df
|
||||
sleep 1
|
||||
if [[ "${{ inputs.hardware }}" == "xeon"* ]]; then
|
||||
docker system prune -a -f
|
||||
else
|
||||
@@ -213,7 +217,13 @@ jobs:
|
||||
docker images --filter reference="opea/comps-base" -q | xargs -r docker rmi && sleep 1s
|
||||
docker system prune -f
|
||||
fi
|
||||
sleep 5
|
||||
docker images
|
||||
sleep 1
|
||||
df -h
|
||||
sleep 1
|
||||
docker system df
|
||||
sleep 1
|
||||
|
||||
- name: Publish pipeline artifact
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
2
.github/workflows/dockerhub-description.yml
vendored
2
.github/workflows/dockerhub-description.yml
vendored
@@ -2,6 +2,8 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
name: Update Docker Hub Description
|
||||
permissions:
|
||||
contents: read
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * 0"
|
||||
|
||||
2
.github/workflows/manual-docker-clean.yml
vendored
2
.github/workflows/manual-docker-clean.yml
vendored
@@ -2,6 +2,8 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
name: Clean up container on manual event
|
||||
permissions:
|
||||
contents: read
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
||||
3
.github/workflows/manual-freeze-tag.yml
vendored
3
.github/workflows/manual-freeze-tag.yml
vendored
@@ -2,7 +2,8 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
name: Freeze OPEA images release tag
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
||||
2
.github/workflows/manual-image-build.yml
vendored
2
.github/workflows/manual-image-build.yml
vendored
@@ -2,6 +2,8 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
name: Build specific images on manual event
|
||||
permissions:
|
||||
contents: read
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
name: Clean up Local Registry on manual event
|
||||
permissions:
|
||||
contents: read
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
||||
3
.github/workflows/mix-trellix.yml
vendored
3
.github/workflows/mix-trellix.yml
vendored
@@ -2,7 +2,8 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
name: Trellix Command Line Scanner
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
name: Nightly build/publish latest docker images
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 14 * * 1-5" # UTC time
|
||||
|
||||
3
.github/workflows/pr-chart-e2e.yml
vendored
3
.github/workflows/pr-chart-e2e.yml
vendored
@@ -2,7 +2,8 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
name: E2E Test with Helm Charts
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
on:
|
||||
pull_request_target:
|
||||
branches: [main]
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
name: Check Duplicated Images
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
on:
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
4
.github/workflows/pr-code-scan.yml
vendored
4
.github/workflows/pr-code-scan.yml
vendored
@@ -2,7 +2,9 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
name: Code Scan
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
security-events: write
|
||||
on:
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
3
.github/workflows/pr-docker-compose-e2e.yml
vendored
3
.github/workflows/pr-docker-compose-e2e.yml
vendored
@@ -3,6 +3,9 @@
|
||||
|
||||
name: E2E test with docker compose
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
branches: ["main", "*rc"]
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
name: Compose file and dockerfile path checking
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
on:
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
9
.github/workflows/pr-link-path-scan.yml
vendored
9
.github/workflows/pr-link-path-scan.yml
vendored
@@ -3,6 +3,9 @@
|
||||
|
||||
name: Check hyperlinks and relative path validity
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [main]
|
||||
@@ -80,6 +83,7 @@ jobs:
|
||||
- name: Checking Relative Path Validity
|
||||
run: |
|
||||
cd ${{github.workspace}}
|
||||
delay=15
|
||||
fail="FALSE"
|
||||
repo_name=${{ github.event.pull_request.head.repo.full_name }}
|
||||
branch="https://github.com/$repo_name/blob/${{ github.event.pull_request.head.ref }}"
|
||||
@@ -111,14 +115,15 @@ jobs:
|
||||
if [[ "$png_line" == *#* ]]; then
|
||||
if [ -n "changed_files" ] && echo "$changed_files" | grep -q "^${refer_path}$"; then
|
||||
url_dev=$branch$(echo "$real_path" | sed 's|.*/GenAIExamples||')$png_path
|
||||
sleep $delay
|
||||
response=$(curl -I -L -s -o /dev/null -w "%{http_code}" "$url_dev")
|
||||
if [ "$response" -ne 200 ]; then
|
||||
echo "**********Validation failed, try again**********"
|
||||
echo "**********Validation failed ($response), try again**********"
|
||||
response_retry=$(curl -s -o /dev/null -w "%{http_code}" "$url_dev")
|
||||
if [ "$response_retry" -eq 200 ]; then
|
||||
echo "*****Retry successfully*****"
|
||||
else
|
||||
echo "Invalid path from ${{github.workspace}}/$refer_path: $png_path"
|
||||
echo "Invalid path ($response_retry) from ${{github.workspace}}/$refer_path: $png_path"
|
||||
fail="TRUE"
|
||||
fi
|
||||
else
|
||||
|
||||
3
.github/workflows/push-image-build.yml
vendored
3
.github/workflows/push-image-build.yml
vendored
@@ -3,6 +3,9 @@
|
||||
# Test
|
||||
name: Build latest images on push event
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ 'main' ]
|
||||
|
||||
@@ -3,10 +3,12 @@
|
||||
|
||||
name: Check the validity of links in docker_images_list.
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
types: [opened, reopened, ready_for_review, synchronize]
|
||||
|
||||
jobs:
|
||||
check-dockerfile-paths:
|
||||
|
||||
@@ -8,6 +8,10 @@ on:
|
||||
- "**/docker_compose/**/compose*.yaml"
|
||||
|
||||
name: Create an issue to GenAIInfra on push
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
job1:
|
||||
name: Create issue
|
||||
|
||||
4
.github/workflows/weekly-example-test.yml
vendored
4
.github/workflows/weekly-example-test.yml
vendored
@@ -3,13 +3,15 @@
|
||||
|
||||
name: Weekly test all examples on multiple HWs
|
||||
|
||||
permissions: read-all
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 2 * * 6" # UTC time
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
EXAMPLES: ${{ vars.NIGHTLY_RELEASE_EXAMPLES }}
|
||||
EXAMPLES: "CodeTrans" #${{ vars.NIGHTLY_RELEASE_EXAMPLES }}
|
||||
NODES: "gaudi,xeon,rocm,arc"
|
||||
|
||||
jobs:
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
ARG IMAGE_REPO=opea
|
||||
ARG BASE_TAG=latest
|
||||
FROM opea/comps-base:$BASE_TAG
|
||||
FROM $IMAGE_REPO/comps-base:$BASE_TAG
|
||||
|
||||
COPY ./avatarchatbot.py $HOME/avatarchatbot.py
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ export FACE="/home/user/comps/animation/src/assets/img/avatar5.png"
|
||||
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
|
||||
export AUDIO='None'
|
||||
export FACESIZE=96
|
||||
export OUTFILE="/outputs/result.mp4"
|
||||
export OUTFILE="./outputs/result.mp4"
|
||||
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
|
||||
export UPSCALE_FACTOR=1
|
||||
export FPS=10
|
||||
export FPS=5
|
||||
|
||||
@@ -5,3 +5,32 @@
|
||||
pushd "../../../../../" > /dev/null
|
||||
source .set_env.sh
|
||||
popd > /dev/null
|
||||
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export host_ip=$(hostname -I | awk '{print $1}')
|
||||
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3
|
||||
export WAV2LIP_ENDPOINT=http://$host_ip:7860
|
||||
export MEGA_SERVICE_HOST_IP=${host_ip}
|
||||
export WHISPER_SERVER_HOST_IP=${host_ip}
|
||||
export WHISPER_SERVER_PORT=7066
|
||||
export SPEECHT5_SERVER_HOST_IP=${host_ip}
|
||||
export SPEECHT5_SERVER_PORT=7055
|
||||
export LLM_SERVER_HOST_IP=${host_ip}
|
||||
export LLM_SERVER_PORT=3006
|
||||
export ANIMATION_SERVICE_HOST_IP=${host_ip}
|
||||
export ANIMATION_SERVICE_PORT=3008
|
||||
|
||||
export MEGA_SERVICE_PORT=8888
|
||||
|
||||
export DEVICE="cpu"
|
||||
export WAV2LIP_PORT=7860
|
||||
export INFERENCE_MODE='wav2lip+gfpgan'
|
||||
export CHECKPOINT_PATH='/usr/local/lib/python3.11/site-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
|
||||
export FACE="/home/user/comps/animation/src/assets/img/avatar5.png"
|
||||
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
|
||||
export AUDIO='None'
|
||||
export FACESIZE=96
|
||||
export OUTFILE="/outputs/result.mp4"
|
||||
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
|
||||
export UPSCALE_FACTOR=1
|
||||
export FPS=10
|
||||
|
||||
@@ -5,3 +5,35 @@
|
||||
pushd "../../../../../" > /dev/null
|
||||
source .set_env.sh
|
||||
popd > /dev/null
|
||||
|
||||
export HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN
|
||||
export host_ip=$(hostname -I | awk '{print $1}')
|
||||
|
||||
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3
|
||||
|
||||
export WAV2LIP_ENDPOINT=http://$host_ip:7860
|
||||
|
||||
export MEGA_SERVICE_HOST_IP=${host_ip}
|
||||
export WHISPER_SERVER_HOST_IP=${host_ip}
|
||||
export WHISPER_SERVER_PORT=7066
|
||||
export SPEECHT5_SERVER_HOST_IP=${host_ip}
|
||||
export SPEECHT5_SERVER_PORT=7055
|
||||
export LLM_SERVER_HOST_IP=${host_ip}
|
||||
export LLM_SERVER_PORT=3006
|
||||
export ANIMATION_SERVICE_HOST_IP=${host_ip}
|
||||
export ANIMATION_SERVICE_PORT=3008
|
||||
|
||||
export MEGA_SERVICE_PORT=8888
|
||||
|
||||
export DEVICE="hpu"
|
||||
export WAV2LIP_PORT=7860
|
||||
export INFERENCE_MODE='wav2lip+gfpgan'
|
||||
export CHECKPOINT_PATH='/usr/local/lib/python3.10/dist-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
|
||||
export FACE="/home/user/comps/animation/src/assets/img/avatar1.jpg"
|
||||
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
|
||||
export AUDIO='None'
|
||||
export FACESIZE=96
|
||||
export OUTFILE="/outputs/result.mp4"
|
||||
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
|
||||
export UPSCALE_FACTOR=1
|
||||
export FPS=10
|
||||
|
||||
@@ -5,6 +5,8 @@ services:
|
||||
avatarchatbot:
|
||||
build:
|
||||
args:
|
||||
IMAGE_REPO: ${REGISTRY:-opea}
|
||||
BASE_TAG: ${TAG:-latest}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
no_proxy: ${no_proxy}
|
||||
|
||||
27
AvatarChatbot/tests/README.md
Normal file
27
AvatarChatbot/tests/README.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# AvatarChatbot E2E test scripts
|
||||
|
||||
## Set the required environment variable
|
||||
|
||||
```bash
|
||||
export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token"
|
||||
```
|
||||
|
||||
## Run test
|
||||
|
||||
On Intel Xeon with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_on_xeon.sh
|
||||
```
|
||||
|
||||
On Intel Gaudi with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_on_gaudi.sh
|
||||
```
|
||||
|
||||
On AMD ROCm with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_on_rocm.sh
|
||||
```
|
||||
@@ -24,19 +24,13 @@ ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
opea_branch=${opea_branch:-"main"}
|
||||
# If the opea_branch isn't main, replace the git clone branch in Dockerfile.
|
||||
if [[ "${opea_branch}" != "main" ]]; then
|
||||
cd $WORKPATH
|
||||
OLD_STRING="RUN git clone --depth 1 https://github.com/opea-project/GenAIComps.git"
|
||||
NEW_STRING="RUN git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git"
|
||||
find . -type f -name "Dockerfile*" | while read -r file; do
|
||||
echo "Processing file: $file"
|
||||
sed -i "s|$OLD_STRING|$NEW_STRING|g" "$file"
|
||||
done
|
||||
fi
|
||||
|
||||
cd $WORKPATH/docker_image_build
|
||||
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
|
||||
pushd GenAIComps
|
||||
echo "GenAIComps test commit is $(git rev-parse HEAD)"
|
||||
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
popd && sleep 1s
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="avatarchatbot whisper-gaudi speecht5-gaudi wav2lip-gaudi animation"
|
||||
@@ -51,37 +45,7 @@ function build_docker_images() {
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/hpu/gaudi
|
||||
|
||||
export HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN
|
||||
export host_ip=$(hostname -I | awk '{print $1}')
|
||||
|
||||
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3
|
||||
|
||||
export WAV2LIP_ENDPOINT=http://$host_ip:7860
|
||||
|
||||
export MEGA_SERVICE_HOST_IP=${host_ip}
|
||||
export WHISPER_SERVER_HOST_IP=${host_ip}
|
||||
export WHISPER_SERVER_PORT=7066
|
||||
export SPEECHT5_SERVER_HOST_IP=${host_ip}
|
||||
export SPEECHT5_SERVER_PORT=7055
|
||||
export LLM_SERVER_HOST_IP=${host_ip}
|
||||
export LLM_SERVER_PORT=3006
|
||||
export ANIMATION_SERVICE_HOST_IP=${host_ip}
|
||||
export ANIMATION_SERVICE_PORT=3008
|
||||
|
||||
export MEGA_SERVICE_PORT=8888
|
||||
|
||||
export DEVICE="hpu"
|
||||
export WAV2LIP_PORT=7860
|
||||
export INFERENCE_MODE='wav2lip+gfpgan'
|
||||
export CHECKPOINT_PATH='/usr/local/lib/python3.10/dist-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
|
||||
export FACE="/home/user/comps/animation/src/assets/img/avatar1.jpg"
|
||||
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
|
||||
export AUDIO='None'
|
||||
export FACESIZE=96
|
||||
export OUTFILE="/outputs/result.mp4"
|
||||
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
|
||||
export UPSCALE_FACTOR=1
|
||||
export FPS=10
|
||||
source set_env.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
@@ -128,19 +92,29 @@ function stop_docker() {
|
||||
|
||||
|
||||
function main() {
|
||||
echo "::group::stop_docker"
|
||||
stop_docker
|
||||
echo y | docker builder prune --all
|
||||
echo y | docker image prune
|
||||
echo "::endgroup::"
|
||||
docker builder prune --all -f
|
||||
docker image prune -f
|
||||
|
||||
echo "::group::build_docker_images"
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_services
|
||||
# validate_microservices
|
||||
validate_megaservice
|
||||
# validate_frontend
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::start_services"
|
||||
start_services
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::validate_megaservice"
|
||||
validate_megaservice
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::stop_docker"
|
||||
stop_docker
|
||||
echo y | docker builder prune --all
|
||||
echo y | docker image prune
|
||||
echo "::endgroup::"
|
||||
docker builder prune --all -f
|
||||
docker image prune -f
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -25,6 +25,10 @@ ip_address=$(hostname -I | awk '{print $1}')
|
||||
function build_docker_images() {
|
||||
cd $WORKPATH/docker_image_build
|
||||
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
|
||||
pushd GenAIComps
|
||||
echo "GenAIComps test commit is $(git rev-parse HEAD)"
|
||||
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
popd && sleep 1s
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="avatarchatbot whisper asr speecht5 tts wav2lip animation"
|
||||
@@ -38,48 +42,8 @@ function build_docker_images() {
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/amd/gpu/rocm
|
||||
|
||||
export HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN
|
||||
export OPENAI_API_KEY=$OPENAI_API_KEY
|
||||
export host_ip=${ip_address}
|
||||
|
||||
export TGI_SERVICE_PORT=3006
|
||||
export TGI_LLM_ENDPOINT=http://${host_ip}:${TGI_SERVICE_PORT}
|
||||
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
|
||||
|
||||
export ASR_ENDPOINT=http://${host_ip}:7066
|
||||
export TTS_ENDPOINT=http://${host_ip}:7055
|
||||
export WAV2LIP_ENDPOINT=http://${host_ip}:7860
|
||||
|
||||
export MEGA_SERVICE_HOST_IP=${host_ip}
|
||||
export ASR_SERVICE_HOST_IP=${host_ip}
|
||||
export TTS_SERVICE_HOST_IP=${host_ip}
|
||||
export LLM_SERVICE_HOST_IP=${host_ip}
|
||||
export ANIMATION_SERVICE_HOST_IP=${host_ip}
|
||||
export WHISPER_SERVER_HOST_IP=${host_ip}
|
||||
export WHISPER_SERVER_PORT=7066
|
||||
|
||||
export SPEECHT5_SERVER_HOST_IP=${host_ip}
|
||||
export SPEECHT5_SERVER_PORT=7055
|
||||
|
||||
export MEGA_SERVICE_PORT=8888
|
||||
export ASR_SERVICE_PORT=3001
|
||||
export TTS_SERVICE_PORT=3002
|
||||
export LLM_SERVICE_PORT=3006
|
||||
export ANIMATION_SERVICE_PORT=3008
|
||||
|
||||
export DEVICE="cpu"
|
||||
export WAV2LIP_PORT=7860
|
||||
export INFERENCE_MODE='wav2lip+gfpgan'
|
||||
export CHECKPOINT_PATH='/usr/local/lib/python3.11/site-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
|
||||
export FACE="/home/user/comps/animation/src/assets/img/avatar5.png"
|
||||
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
|
||||
export AUDIO='None'
|
||||
export FACESIZE=96
|
||||
export OUTFILE="./outputs/result.mp4"
|
||||
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
|
||||
export UPSCALE_FACTOR=1
|
||||
export FPS=5
|
||||
source set_env.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d --force-recreate
|
||||
@@ -138,11 +102,6 @@ function validate_megaservice() {
|
||||
}
|
||||
|
||||
|
||||
#function validate_frontend() {
|
||||
|
||||
#}
|
||||
|
||||
|
||||
function stop_docker() {
|
||||
cd $WORKPATH/docker_compose/amd/gpu/rocm
|
||||
docker compose down && docker compose rm -f
|
||||
@@ -151,19 +110,27 @@ function stop_docker() {
|
||||
|
||||
function main() {
|
||||
|
||||
echo $OPENAI_API_KEY
|
||||
echo $OPENAI_KEY
|
||||
|
||||
echo "::group::stop_docker"
|
||||
stop_docker
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::build_docker_images"
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_services
|
||||
# validate_microservices
|
||||
sleep 30
|
||||
validate_megaservice
|
||||
# validate_frontend
|
||||
stop_docker
|
||||
echo "::endgroup::"
|
||||
|
||||
echo y | docker system prune
|
||||
echo "::group::start_services"
|
||||
start_services
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::validate_megaservice"
|
||||
validate_megaservice
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::stop_docker"
|
||||
stop_docker
|
||||
echo "::endgroup::"
|
||||
|
||||
docker system prune -f
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -24,19 +24,13 @@ ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
opea_branch=${opea_branch:-"main"}
|
||||
# If the opea_branch isn't main, replace the git clone branch in Dockerfile.
|
||||
if [[ "${opea_branch}" != "main" ]]; then
|
||||
cd $WORKPATH
|
||||
OLD_STRING="RUN git clone --depth 1 https://github.com/opea-project/GenAIComps.git"
|
||||
NEW_STRING="RUN git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git"
|
||||
find . -type f -name "Dockerfile*" | while read -r file; do
|
||||
echo "Processing file: $file"
|
||||
sed -i "s|$OLD_STRING|$NEW_STRING|g" "$file"
|
||||
done
|
||||
fi
|
||||
|
||||
cd $WORKPATH/docker_image_build
|
||||
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
|
||||
pushd GenAIComps
|
||||
echo "GenAIComps test commit is $(git rev-parse HEAD)"
|
||||
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
popd && sleep 1s
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="avatarchatbot whisper speecht5 wav2lip animation"
|
||||
@@ -51,37 +45,7 @@ function build_docker_images() {
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon
|
||||
|
||||
export HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN
|
||||
export host_ip=$(hostname -I | awk '{print $1}')
|
||||
|
||||
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3
|
||||
|
||||
export WAV2LIP_ENDPOINT=http://$host_ip:7860
|
||||
|
||||
export MEGA_SERVICE_HOST_IP=${host_ip}
|
||||
export WHISPER_SERVER_HOST_IP=${host_ip}
|
||||
export WHISPER_SERVER_PORT=7066
|
||||
export SPEECHT5_SERVER_HOST_IP=${host_ip}
|
||||
export SPEECHT5_SERVER_PORT=7055
|
||||
export LLM_SERVER_HOST_IP=${host_ip}
|
||||
export LLM_SERVER_PORT=3006
|
||||
export ANIMATION_SERVICE_HOST_IP=${host_ip}
|
||||
export ANIMATION_SERVICE_PORT=3008
|
||||
|
||||
export MEGA_SERVICE_PORT=8888
|
||||
|
||||
export DEVICE="cpu"
|
||||
export WAV2LIP_PORT=7860
|
||||
export INFERENCE_MODE='wav2lip+gfpgan'
|
||||
export CHECKPOINT_PATH='/usr/local/lib/python3.11/site-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
|
||||
export FACE="/home/user/comps/animation/src/assets/img/avatar5.png"
|
||||
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
|
||||
export AUDIO='None'
|
||||
export FACESIZE=96
|
||||
export OUTFILE="/outputs/result.mp4"
|
||||
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
|
||||
export UPSCALE_FACTOR=1
|
||||
export FPS=10
|
||||
source set_env.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d
|
||||
@@ -127,16 +91,28 @@ function stop_docker() {
|
||||
|
||||
|
||||
function main() {
|
||||
stop_docker
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_services
|
||||
# validate_microservices
|
||||
validate_megaservice
|
||||
# validate_frontend
|
||||
stop_docker
|
||||
|
||||
echo y | docker builder prune --all
|
||||
echo y | docker image prune
|
||||
echo "::group::stop_docker"
|
||||
stop_docker
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::build_docker_images"
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::start_services"
|
||||
start_services
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::validate_megaservice"
|
||||
validate_megaservice
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::stop_docker"
|
||||
stop_docker
|
||||
echo "::endgroup::"
|
||||
|
||||
docker system prune -f
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ services:
|
||||
- chatqna-redis-vector-db
|
||||
- chatqna-tei-embedding-service
|
||||
ports:
|
||||
- "${CHATQNA_REDIS_DATAPREP_PORT}:5000"
|
||||
- "${CHATQNA_REDIS_DATAPREP_PORT:-18103}:5000"
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
|
||||
@@ -16,7 +16,7 @@ services:
|
||||
- chatqna-redis-vector-db
|
||||
- chatqna-tei-embedding-service
|
||||
ports:
|
||||
- "${CHATQNA_REDIS_DATAPREP_PORT}:5000"
|
||||
- "${CHATQNA_REDIS_DATAPREP_PORT:-18103}:5000"
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
|
||||
@@ -16,7 +16,7 @@ services:
|
||||
- chatqna-redis-vector-db
|
||||
- chatqna-tei-embedding-service
|
||||
ports:
|
||||
- "${CHATQNA_REDIS_DATAPREP_PORT}:5000"
|
||||
- "${CHATQNA_REDIS_DATAPREP_PORT:-18103}:5000"
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
|
||||
@@ -16,7 +16,7 @@ services:
|
||||
- chatqna-redis-vector-db
|
||||
- chatqna-tei-embedding-service
|
||||
ports:
|
||||
- "${CHATQNA_REDIS_DATAPREP_PORT:-5000}:5000"
|
||||
- "${CHATQNA_REDIS_DATAPREP_PORT:-18103}:5000"
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
|
||||
@@ -2,17 +2,17 @@
|
||||
|
||||
# Copyright (C) 2025 Advanced Micro Devices, Inc.
|
||||
|
||||
export HOST_IP=''
|
||||
export HOST_IP_EXTERNAL=''
|
||||
export HOST_IP=${ip_address}
|
||||
export HOST_IP_EXTERNAL=${ip_address}
|
||||
|
||||
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_PORT=18102
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=18101
|
||||
export CHATQNA_NGINX_PORT=18104
|
||||
export CHATQNA_BACKEND_SERVICE_PORT=8888
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5173
|
||||
export CHATQNA_NGINX_PORT=80
|
||||
export CHATQNA_REDIS_DATAPREP_PORT=18103
|
||||
export CHATQNA_REDIS_RETRIEVER_PORT=7000
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001
|
||||
|
||||
@@ -2,18 +2,18 @@
|
||||
|
||||
# Copyright (C) 2025 Advanced Micro Devices, Inc.
|
||||
|
||||
export HOST_IP=''
|
||||
export HOST_IP_EXTERNAL=''
|
||||
export HOST_IP=${ip_address}
|
||||
export HOST_IP_EXTERNAL=${ip_address}
|
||||
|
||||
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_PORT=18102
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=18101
|
||||
export CHATQNA_BACKEND_SERVICE_PORT=8888
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5173
|
||||
export CHATQNA_LLM_FAQGEN_PORT=18011
|
||||
export CHATQNA_NGINX_PORT=18104
|
||||
export CHATQNA_NGINX_PORT=80
|
||||
export CHATQNA_REDIS_DATAPREP_PORT=18103
|
||||
export CHATQNA_REDIS_RETRIEVER_PORT=7000
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001
|
||||
|
||||
@@ -2,18 +2,18 @@
|
||||
|
||||
# Copyright (C) 2025 Advanced Micro Devices, Inc.
|
||||
|
||||
export HOST_IP=''
|
||||
export HOST_IP_EXTERNAL=''
|
||||
export HOST_IP=${ip_address}
|
||||
export HOST_IP_EXTERNAL=${ip_address}
|
||||
|
||||
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_PORT=18102
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=18101
|
||||
export CHATQNA_BACKEND_SERVICE_PORT=8888
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5173
|
||||
export CHATQNA_LLM_FAQGEN_PORT=18011
|
||||
export CHATQNA_NGINX_PORT=18104
|
||||
export CHATQNA_NGINX_PORT=80
|
||||
export CHATQNA_REDIS_DATAPREP_PORT=18103
|
||||
export CHATQNA_REDIS_RETRIEVER_PORT=7000
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001
|
||||
|
||||
@@ -2,17 +2,17 @@
|
||||
|
||||
# Copyright (C) 2025 Advanced Micro Devices, Inc.
|
||||
|
||||
export HOST_IP=''
|
||||
export HOST_IP_EXTERNAL=''
|
||||
export HOST_IP=${ip_address}
|
||||
export HOST_IP_EXTERNAL=${ip_address}
|
||||
|
||||
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_PORT=18102
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=18101
|
||||
export CHATQNA_NGINX_PORT=18104
|
||||
export CHATQNA_BACKEND_SERVICE_PORT=8888
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5173
|
||||
export CHATQNA_NGINX_PORT=80
|
||||
export CHATQNA_REDIS_DATAPREP_PORT=18103
|
||||
export CHATQNA_REDIS_RETRIEVER_PORT=7000
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
# Copyright (C) 2025 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
rm *.json
|
||||
if ls *.json 1> /dev/null 2>&1; then
|
||||
rm *.json
|
||||
fi
|
||||
wget https://raw.githubusercontent.com/opea-project/GenAIEval/refs/heads/main/evals/benchmark/grafana/chatqna_megaservice_grafana.json
|
||||
wget https://raw.githubusercontent.com/opea-project/GenAIEval/refs/heads/main/evals/benchmark/grafana/qdrant_grafana.json
|
||||
wget https://raw.githubusercontent.com/opea-project/GenAIEval/refs/heads/main/evals/benchmark/grafana/milvus_grafana.json
|
||||
|
||||
@@ -7,6 +7,9 @@ pushd "../../../../../" > /dev/null
|
||||
source .set_env.sh
|
||||
popd > /dev/null
|
||||
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export HF_TOKEN=${HF_TOKEN}
|
||||
export host_ip=${ip_address}
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
|
||||
@@ -43,7 +43,7 @@ Some HuggingFace resources, such as some models, are only accessible if you have
|
||||
|
||||
### Configure the Deployment Environment
|
||||
|
||||
To set up environment variables for deploying ChatQnA services, source the _setup_env.sh_ script in this directory:
|
||||
To set up environment variables for deploying ChatQnA services, source the _setup_env.sh_ script in this directory (If using faqgen or guardrails, source the _set_env_faqgen.sh_):
|
||||
|
||||
```
|
||||
source ./set_env.sh
|
||||
|
||||
@@ -4,12 +4,20 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Function to prompt for input and set environment variables
|
||||
NON_INTERACTIVE=${NON_INTERACTIVE:-false}
|
||||
|
||||
prompt_for_env_var() {
|
||||
local var_name="$1"
|
||||
local prompt_message="$2"
|
||||
local default_value="$3"
|
||||
local mandatory="$4"
|
||||
|
||||
if [[ "$NON_INTERACTIVE" == "true" ]]; then
|
||||
echo "Non-interactive environment detected. Setting $var_name to default: $default_value"
|
||||
export "$var_name"="$default_value"
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ "$mandatory" == "true" ]]; then
|
||||
while [[ -z "$value" ]]; do
|
||||
read -p "$prompt_message [default: \"${default_value}\"]: " value
|
||||
@@ -34,7 +42,7 @@ popd > /dev/null
|
||||
|
||||
# Prompt the user for each required environment variable
|
||||
prompt_for_env_var "EMBEDDING_MODEL_ID" "Enter the EMBEDDING_MODEL_ID" "BAAI/bge-base-en-v1.5" false
|
||||
prompt_for_env_var "HUGGINGFACEHUB_API_TOKEN" "Enter the HUGGINGFACEHUB_API_TOKEN" "" true
|
||||
prompt_for_env_var "HUGGINGFACEHUB_API_TOKEN" "Enter the HUGGINGFACEHUB_API_TOKEN" "${HF_TOKEN}" true
|
||||
prompt_for_env_var "RERANK_MODEL_ID" "Enter the RERANK_MODEL_ID" "BAAI/bge-reranker-base" false
|
||||
prompt_for_env_var "LLM_MODEL_ID" "Enter the LLM_MODEL_ID" "meta-llama/Meta-Llama-3-8B-Instruct" false
|
||||
prompt_for_env_var "INDEX_NAME" "Enter the INDEX_NAME" "rag-redis" false
|
||||
@@ -42,34 +50,40 @@ prompt_for_env_var "NUM_CARDS" "Enter the number of Gaudi devices" "1" false
|
||||
prompt_for_env_var "host_ip" "Enter the host_ip" "$(curl ifconfig.me)" false
|
||||
|
||||
#Query for enabling http_proxy
|
||||
prompt_for_env_var "http_proxy" "Enter the http_proxy." "" false
|
||||
prompt_for_env_var "http_proxy" "Enter the http_proxy." "${http_proxy}" false
|
||||
|
||||
#Query for enabling https_proxy
|
||||
prompt_for_env_var "https_proxy" "Enter the https_proxy." "" false
|
||||
prompt_for_env_var "http_proxy" "Enter the http_proxy." "${https_proxy}" false
|
||||
|
||||
#Query for enabling no_proxy
|
||||
prompt_for_env_var "no_proxy" "Enter the no_proxy." "" false
|
||||
prompt_for_env_var "no_proxy" "Enter the no_proxy." "${no_proxy}" false
|
||||
|
||||
# Query for enabling logging
|
||||
read -p "Enable logging? (yes/no): " logging && logging=$(echo "$logging" | tr '[:upper:]' '[:lower:]')
|
||||
if [[ "$logging" == "yes" || "$logging" == "y" ]]; then
|
||||
export LOGFLAG=true
|
||||
if [[ "$NON_INTERACTIVE" == "true" ]]; then
|
||||
# Query for enabling logging
|
||||
prompt_for_env_var "LOGFLAG" "Enable logging? (yes/no): " "true" false
|
||||
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
|
||||
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
|
||||
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
|
||||
telemetry_flag=true
|
||||
else
|
||||
export LOGFLAG=false
|
||||
fi
|
||||
|
||||
# Query for enabling OpenTelemetry Tracing Endpoint
|
||||
read -p "Enable OpenTelemetry Tracing Endpoint? (yes/no): " telemetry && telemetry=$(echo "$telemetry" | tr '[:upper:]' '[:lower:]')
|
||||
if [[ "$telemetry" == "yes" || "$telemetry" == "y" ]]; then
|
||||
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
|
||||
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
|
||||
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
|
||||
telemetry_flag=true
|
||||
pushd "grafana/dashboards" > /dev/null
|
||||
source download_opea_dashboard.sh
|
||||
popd > /dev/null
|
||||
else
|
||||
telemetry_flag=false
|
||||
# Query for enabling logging
|
||||
read -p "Enable logging? (yes/no): " logging && logging=$(echo "$logging" | tr '[:upper:]' '[:lower:]')
|
||||
if [[ "$logging" == "yes" || "$logging" == "y" ]]; then
|
||||
export LOGFLAG=true
|
||||
else
|
||||
export LOGFLAG=false
|
||||
fi
|
||||
# Query for enabling OpenTelemetry Tracing Endpoint
|
||||
read -p "Enable OpenTelemetry Tracing Endpoint? (yes/no): " telemetry && telemetry=$(echo "$telemetry" | tr '[:upper:]' '[:lower:]')
|
||||
if [[ "$telemetry" == "yes" || "$telemetry" == "y" ]]; then
|
||||
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
|
||||
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
|
||||
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
|
||||
telemetry_flag=true
|
||||
else
|
||||
telemetry_flag=false
|
||||
fi
|
||||
fi
|
||||
|
||||
# Generate the .env file
|
||||
|
||||
32
ChatQnA/docker_compose/intel/hpu/gaudi/set_env_faqgen.sh
Executable file
32
ChatQnA/docker_compose/intel/hpu/gaudi/set_env_faqgen.sh
Executable file
@@ -0,0 +1,32 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
pushd "../../../../../" > /dev/null
|
||||
source .set_env.sh
|
||||
popd > /dev/null
|
||||
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export HF_TOKEN=${HF_TOKEN}
|
||||
export host_ip=${ip_address}
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export INDEX_NAME="rag-redis"
|
||||
export NUM_CARDS=1
|
||||
export VLLM_SKIP_WARMUP=true
|
||||
export LOGFLAG=True
|
||||
export http_proxy=${http_proxy}
|
||||
export https_proxy=${https_proxy}
|
||||
export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-gaudi-backend-server,chatqna-gaudi-ui-server,chatqna-gaudi-nginx-server"
|
||||
|
||||
export LLM_ENDPOINT_PORT=8010
|
||||
export LLM_SERVER_PORT=9001
|
||||
export CHATQNA_BACKEND_PORT=8888
|
||||
export CHATQNA_REDIS_VECTOR_PORT=6377
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5175
|
||||
export NGINX_PORT=80
|
||||
export FAQGen_COMPONENT_NAME="OpeaFaqGenvLLM"
|
||||
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
|
||||
123
ChatQnA/tests/README.md
Normal file
123
ChatQnA/tests/README.md
Normal file
@@ -0,0 +1,123 @@
|
||||
# ChatQnA E2E test scripts
|
||||
|
||||
## Set the required environment variable
|
||||
|
||||
```bash
|
||||
export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token"
|
||||
```
|
||||
|
||||
## Run test
|
||||
|
||||
On Intel Xeon with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_tgi_on_xeon.sh
|
||||
```
|
||||
|
||||
On Intel Xeon with vLLM:
|
||||
|
||||
```bash
|
||||
bash test_compose_on_xeon.sh
|
||||
```
|
||||
|
||||
On Intel Xeon with MariaDB Vector:
|
||||
|
||||
```bash
|
||||
bash test_compose_mariadb_on_xeon.sh
|
||||
```
|
||||
|
||||
On Intel Xeon with Pinecone:
|
||||
|
||||
```bash
|
||||
bash test_compose_pinecone_on_xeon.sh
|
||||
```
|
||||
|
||||
On Intel Xeon with Milvus
|
||||
|
||||
```bash
|
||||
bash test_compose_milvus_on_xeon.sh
|
||||
```
|
||||
|
||||
On Intel Xeon with Qdrant
|
||||
|
||||
```bash
|
||||
bash test_compose_qdrant_on_xeon.sh
|
||||
```
|
||||
|
||||
On Intel Xeon without Rerank:
|
||||
|
||||
```bash
|
||||
bash test_compose_without_rerank_on_xeon.sh
|
||||
```
|
||||
|
||||
On Intel Gaudi with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_tgi_on_gaudi.sh
|
||||
```
|
||||
|
||||
On Intel Gaudi with vLLM:
|
||||
|
||||
```bash
|
||||
bash test_compose_on_gaudi.sh
|
||||
```
|
||||
|
||||
On Intel Gaudi with Guardrails:
|
||||
|
||||
```bash
|
||||
bash test_compose_guardrails_on_gaudi.sh
|
||||
```
|
||||
|
||||
On Intel Gaudi without Rerank:
|
||||
|
||||
```bash
|
||||
bash test_compose_without_rerank_on_gaudi.sh
|
||||
```
|
||||
|
||||
On AMD ROCm with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_on_rocm.sh
|
||||
```
|
||||
|
||||
On AMD ROCm with vLLM:
|
||||
|
||||
```bash
|
||||
bash test_compose_vllm_on_rocm.sh
|
||||
```
|
||||
|
||||
Test FAQ Generation On Intel Xeon with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_faqgen_tgi_on_xeon.sh
|
||||
```
|
||||
|
||||
Test FAQ Generation On Intel Xeon with vLLM:
|
||||
|
||||
```bash
|
||||
bash test_compose_faqgen_on_xeon.sh
|
||||
```
|
||||
|
||||
Test FAQ Generation On Intel Gaudi with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_faqgen_tgi_on_gaudi.sh
|
||||
```
|
||||
|
||||
Test FAQ Generation On Intel Gaudi with vLLM:
|
||||
|
||||
```bash
|
||||
bash test_compose_faqgen_on_gaudi.sh
|
||||
```
|
||||
|
||||
Test FAQ Generation On AMD ROCm with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_faqgen_on_rocm.sh
|
||||
```
|
||||
|
||||
Test FAQ Generation On AMD ROCm with vLLM:
|
||||
|
||||
```bash
|
||||
bash test_compose_faqgen_vllm_on_rocm.sh
|
||||
```
|
||||
@@ -36,27 +36,7 @@ function build_docker_images() {
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/hpu/gaudi
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export NUM_CARDS=1
|
||||
export INDEX_NAME="rag-redis"
|
||||
export host_ip=${ip_address}
|
||||
export LLM_ENDPOINT_PORT=8010
|
||||
export LLM_SERVER_PORT=9001
|
||||
export CHATQNA_BACKEND_PORT=8888
|
||||
export CHATQNA_REDIS_VECTOR_PORT=6377
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5175
|
||||
export NGINX_PORT=80
|
||||
export FAQGen_COMPONENT_NAME="OpeaFaqGenvLLM"
|
||||
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
|
||||
export HF_TOKEN=${HF_TOKEN}
|
||||
export VLLM_SKIP_WARMUP=true
|
||||
export LOGFLAG=True
|
||||
export http_proxy=${http_proxy}
|
||||
export https_proxy=${https_proxy}
|
||||
export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-gaudi-backend-server,chatqna-gaudi-ui-server,chatqna-gaudi-nginx-server"
|
||||
source set_env_faqgen.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_faqgen.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -15,44 +15,7 @@ WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
export HOST_IP=${ip_address}
|
||||
export HOST_IP_EXTERNAL=${ip_address}
|
||||
|
||||
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_PORT=8888
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5173
|
||||
export CHATQNA_LLM_FAQGEN_PORT=18011
|
||||
export CHATQNA_NGINX_PORT=80
|
||||
export CHATQNA_REDIS_DATAPREP_PORT=18103
|
||||
export CHATQNA_REDIS_RETRIEVER_PORT=7000
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001
|
||||
export CHATQNA_REDIS_VECTOR_PORT=6379
|
||||
export CHATQNA_TEI_EMBEDDING_PORT=18090
|
||||
export CHATQNA_TEI_RERANKING_PORT=18808
|
||||
export CHATQNA_TGI_SERVICE_PORT=18008
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_BACKEND_SERVICE_PORT}/v1/chatqna"
|
||||
export CHATQNA_BACKEND_SERVICE_IP=${HOST_IP}
|
||||
export CHATQNA_DATAPREP_DELETE_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/delete"
|
||||
export CHATQNA_DATAPREP_GET_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/get"
|
||||
export CHATQNA_DATAPREP_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/ingest"
|
||||
export CHATQNA_EMBEDDING_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_FRONTEND_SERVICE_IP=${HOST_IP}
|
||||
export CHATQNA_LLM_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_LLM_ENDPOINT="http://${HOST_IP}:${CHATQNA_TGI_SERVICE_PORT}"
|
||||
export CHATQNA_MEGA_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_REDIS_URL="redis://${HOST_IP}:${CHATQNA_REDIS_VECTOR_PORT}"
|
||||
export CHATQNA_RERANK_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_RETRIEVER_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_TEI_EMBEDDING_ENDPOINT="http://${HOST_IP}:${CHATQNA_TEI_EMBEDDING_PORT}"
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_NAME=chatqna
|
||||
export CHATQNA_INDEX_NAME="rag-redis"
|
||||
export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi"
|
||||
source $WORKPATH/docker_compose/amd/gpu/rocm/set_env_faqgen.sh
|
||||
|
||||
export PATH="~/miniconda3/bin:$PATH"
|
||||
|
||||
|
||||
@@ -37,26 +37,16 @@ function build_docker_images() {
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export INDEX_NAME="rag-redis"
|
||||
export host_ip=${ip_address}
|
||||
export LLM_ENDPOINT_PORT=8010
|
||||
export LLM_SERVER_PORT=9001
|
||||
export CHATQNA_BACKEND_PORT=8888
|
||||
export CHATQNA_REDIS_VECTOR_PORT=6377
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5175
|
||||
export NGINX_PORT=80
|
||||
export FAQGen_COMPONENT_NAME="OpeaFaqGenvLLM"
|
||||
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
|
||||
export HF_TOKEN=${HF_TOKEN}
|
||||
export VLLM_SKIP_WARMUP=true
|
||||
export LOGFLAG=True
|
||||
export http_proxy=${http_proxy}
|
||||
export https_proxy=${https_proxy}
|
||||
export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-xeon-backend-server,chatqna-xeon-ui-server,chatqna-xeon-nginx-server"
|
||||
source set_env.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_faqgen.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -33,25 +33,8 @@ function build_docker_images() {
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/hpu/gaudi
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export INDEX_NAME="rag-redis"
|
||||
export host_ip=${ip_address}
|
||||
export LLM_ENDPOINT_PORT=8010
|
||||
export LLM_SERVER_PORT=9001
|
||||
export CHATQNA_BACKEND_PORT=8888
|
||||
export CHATQNA_REDIS_VECTOR_PORT=6377
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5175
|
||||
export NGINX_PORT=80
|
||||
export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi"
|
||||
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
|
||||
export HF_TOKEN=${HF_TOKEN}
|
||||
export LOGFLAG=True
|
||||
export http_proxy=${http_proxy}
|
||||
export https_proxy=${https_proxy}
|
||||
export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-gaudi-backend-server,chatqna-gaudi-ui-server,chatqna-gaudi-nginx-server"
|
||||
source set_env_faqgen.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_faqgen_tgi.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -37,25 +37,16 @@ function build_docker_images() {
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export INDEX_NAME="rag-redis"
|
||||
export host_ip=${ip_address}
|
||||
export LLM_ENDPOINT_PORT=8010
|
||||
export LLM_SERVER_PORT=9001
|
||||
export CHATQNA_BACKEND_PORT=8888
|
||||
export CHATQNA_REDIS_VECTOR_PORT=6377
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5175
|
||||
export NGINX_PORT=80
|
||||
export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi"
|
||||
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
|
||||
export HF_TOKEN=${HF_TOKEN}
|
||||
export LOGFLAG=True
|
||||
export http_proxy=${http_proxy}
|
||||
export https_proxy=${https_proxy}
|
||||
export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-xeon-backend-server,chatqna-xeon-ui-server,chatqna-xeon-nginx-server"
|
||||
source set_env.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_faqgen_tgi.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -14,41 +14,7 @@ WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
export HOST_IP=${ip_address}
|
||||
export HOST_IP_EXTERNAL=${ip_address}
|
||||
|
||||
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_PORT=8888
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5173
|
||||
export CHATQNA_LLM_FAQGEN_PORT=18011
|
||||
export CHATQNA_NGINX_PORT=80
|
||||
export CHATQNA_REDIS_DATAPREP_PORT=18103
|
||||
export CHATQNA_REDIS_RETRIEVER_PORT=7000
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001
|
||||
export CHATQNA_REDIS_VECTOR_PORT=6379
|
||||
export CHATQNA_TEI_EMBEDDING_PORT=18090
|
||||
export CHATQNA_TEI_RERANKING_PORT=18808
|
||||
export CHATQNA_VLLM_SERVICE_PORT=18008
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_BACKEND_SERVICE_PORT}/v1/chatqna"
|
||||
export CHATQNA_BACKEND_SERVICE_IP=${HOST_IP_EXTERNAL}
|
||||
export CHATQNA_DATAPREP_DELETE_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/delete"
|
||||
export CHATQNA_DATAPREP_GET_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/get"
|
||||
export CHATQNA_DATAPREP_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/ingest"
|
||||
export CHATQNA_FRONTEND_SERVICE_IP=${HOST_IP}
|
||||
export CHATQNA_MEGA_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_REDIS_URL="redis://${HOST_IP}:${CHATQNA_REDIS_VECTOR_PORT}"
|
||||
export CHATQNA_TEI_EMBEDDING_ENDPOINT="http://${HOST_IP}:${CHATQNA_TEI_EMBEDDING_PORT}"
|
||||
export LLM_ENDPOINT="http://${HOST_IP}:${CHATQNA_VLLM_SERVICE_PORT}"
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_NAME=chatqna
|
||||
export CHATQNA_INDEX_NAME="rag-redis"
|
||||
export CHATQNA_TYPE="CHATQNA_FAQGEN"
|
||||
export FAQGen_COMPONENT_NAME="OpeaFaqGenvLLM"
|
||||
source $WORKPATH/docker_compose/amd/gpu/rocm/set_env_faqgen_vllm.sh
|
||||
|
||||
function build_docker_images() {
|
||||
opea_branch=${opea_branch:-"main"}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -36,14 +36,8 @@ function build_docker_images() {
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/hpu/gaudi
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export NUM_CARDS=1
|
||||
export INDEX_NAME="rag-redis"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export host_ip=${ip_address}
|
||||
export GURADRAILS_MODEL_ID="meta-llama/Meta-Llama-Guard-2-8B"
|
||||
source set_env_faqgen.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_guardrails.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2025 MariaDB Foundation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -39,14 +39,8 @@ function build_docker_images() {
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon
|
||||
export MARIADB_DATABASE="vectordb"
|
||||
export MARIADB_USER="chatqna"
|
||||
export MARIADB_PASSWORD="test"
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export host_ip=${ip_address}
|
||||
source set_env_mariadb.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_mariadb.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
@@ -140,7 +134,7 @@ function validate_megaservice() {
|
||||
|
||||
function stop_docker() {
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon
|
||||
docker compose down
|
||||
docker compose -f compose_mariadb.yaml down
|
||||
}
|
||||
|
||||
function main() {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -39,11 +39,8 @@ function build_docker_images() {
|
||||
}
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon/
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export LOGFLAG=true
|
||||
source set_env.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_milvus.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -36,16 +36,10 @@ function build_docker_images() {
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/hpu/gaudi
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export NUM_CARDS=1
|
||||
export INDEX_NAME="rag-redis"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export NON_INTERACTIVE=true
|
||||
export host_ip=${ip_address}
|
||||
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
|
||||
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
|
||||
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
|
||||
export telemetry=yes
|
||||
source set_env.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose.yaml -f compose.telemetry.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -15,41 +15,7 @@ WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
export HOST_IP=${ip_address}
|
||||
export HOST_IP_EXTERNAL=${ip_address}
|
||||
|
||||
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_PORT=8888
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5173
|
||||
export CHATQNA_NGINX_PORT=80
|
||||
export CHATQNA_REDIS_DATAPREP_PORT=18103
|
||||
export CHATQNA_REDIS_RETRIEVER_PORT=7000
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001
|
||||
export CHATQNA_REDIS_VECTOR_PORT=6379
|
||||
export CHATQNA_TEI_EMBEDDING_PORT=18090
|
||||
export CHATQNA_TEI_RERANKING_PORT=18808
|
||||
export CHATQNA_TGI_SERVICE_PORT=18008
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_BACKEND_SERVICE_PORT}/v1/chatqna"
|
||||
export CHATQNA_BACKEND_SERVICE_IP=${HOST_IP}
|
||||
export CHATQNA_DATAPREP_DELETE_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/delete"
|
||||
export CHATQNA_DATAPREP_GET_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/get"
|
||||
export CHATQNA_DATAPREP_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/ingest"
|
||||
export CHATQNA_EMBEDDING_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_FRONTEND_SERVICE_IP=${HOST_IP}
|
||||
export CHATQNA_LLM_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_MEGA_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_REDIS_URL="redis://${HOST_IP}:${CHATQNA_REDIS_VECTOR_PORT}"
|
||||
export CHATQNA_RERANK_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_RETRIEVER_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_TEI_EMBEDDING_ENDPOINT="http://${HOST_IP}:${CHATQNA_TEI_EMBEDDING_PORT}"
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_NAME=chatqna
|
||||
export CHATQNA_INDEX_NAME="rag-redis"
|
||||
source $WORKPATH/docker_compose/amd/gpu/rocm/set_env.sh
|
||||
|
||||
export PATH="~/miniconda3/bin:$PATH"
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -40,15 +40,7 @@ function build_docker_images() {
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon
|
||||
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export INDEX_NAME="rag-redis"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export host_ip=${ip_address}
|
||||
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
|
||||
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
|
||||
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
|
||||
source set_env.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose.yaml -f compose.telemetry.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -41,14 +41,11 @@ function build_docker_images() {
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon/
|
||||
export no_proxy=${no_proxy},${ip_address}
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export PINECONE_API_KEY=${PINECONE_KEY_LANGCHAIN_TEST}
|
||||
export PINECONE_INDEX_NAME="langchain-test"
|
||||
export INDEX_NAME="langchain-test"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export LOGFLAG=true
|
||||
source set_env.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_pinecone.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -40,11 +40,8 @@ function build_docker_images() {
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon
|
||||
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export INDEX_NAME="rag-qdrant"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
source set_env.sh
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -32,15 +32,10 @@ function build_docker_images() {
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/hpu/gaudi
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export NUM_CARDS=1
|
||||
export INDEX_NAME="rag-redis"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
|
||||
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
|
||||
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
|
||||
export NON_INTERACTIVE=true
|
||||
export host_ip=${ip_address}
|
||||
export telemetry=yes
|
||||
source set_env.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_tgi.yaml -f compose_tgi.telemetry.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -33,14 +33,7 @@ function build_docker_images() {
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon
|
||||
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export INDEX_NAME="rag-redis"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
|
||||
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317
|
||||
export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces
|
||||
source set_env.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_tgi.yaml -f compose_tgi.telemetry.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -14,42 +14,7 @@ WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
export HOST_IP=${ip_address}
|
||||
export HOST_IP_EXTERNAL=${ip_address}
|
||||
|
||||
export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_PORT=8888
|
||||
export CHATQNA_FRONTEND_SERVICE_PORT=5173
|
||||
export CHATQNA_NGINX_PORT=80
|
||||
export CHATQNA_REDIS_DATAPREP_PORT=18103
|
||||
export CHATQNA_REDIS_RETRIEVER_PORT=7000
|
||||
export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001
|
||||
export CHATQNA_REDIS_VECTOR_PORT=6379
|
||||
export CHATQNA_TEI_EMBEDDING_PORT=18090
|
||||
export CHATQNA_TEI_RERANKING_PORT=18808
|
||||
export CHATQNA_VLLM_SERVICE_PORT=18008
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_BACKEND_SERVICE_PORT}/v1/chatqna"
|
||||
export CHATQNA_BACKEND_SERVICE_IP=${HOST_IP_EXTERNAL}
|
||||
export CHATQNA_DATAPREP_DELETE_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/delete"
|
||||
export CHATQNA_DATAPREP_GET_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/get"
|
||||
export CHATQNA_DATAPREP_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/ingest"
|
||||
export CHATQNA_EMBEDDING_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_FRONTEND_SERVICE_IP=${HOST_IP}
|
||||
export CHATQNA_LLM_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_MEGA_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_REDIS_URL="redis://${HOST_IP}:${CHATQNA_REDIS_VECTOR_PORT}"
|
||||
export CHATQNA_RERANK_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_RETRIEVER_SERVICE_HOST_IP=${HOST_IP}
|
||||
export CHATQNA_TEI_EMBEDDING_ENDPOINT="http://${HOST_IP}:${CHATQNA_TEI_EMBEDDING_PORT}"
|
||||
|
||||
export CHATQNA_BACKEND_SERVICE_NAME=chatqna
|
||||
export CHATQNA_INDEX_NAME="rag-redis"
|
||||
|
||||
source $WORKPATH/docker_compose/amd/gpu/rocm/set_env_vllm.sh
|
||||
|
||||
function build_docker_images() {
|
||||
opea_branch=${opea_branch:-"main"}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -36,11 +36,8 @@ function build_docker_images() {
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/hpu/gaudi
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export NUM_CARDS=1
|
||||
export INDEX_NAME="rag-redis"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export NON_INTERACTIVE=true
|
||||
source set_env.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_without_rerank.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -41,10 +41,7 @@ function build_docker_images() {
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon
|
||||
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
export INDEX_NAME="rag-redis"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
source set_env.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_without_rerank.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
ARG IMAGE_REPO=opea
|
||||
ARG BASE_TAG=latest
|
||||
FROM opea/comps-base:$BASE_TAG
|
||||
FROM $IMAGE_REPO/comps-base:$BASE_TAG
|
||||
|
||||
COPY ./codegen.py $HOME/codegen.py
|
||||
|
||||
|
||||
@@ -52,18 +52,29 @@ This uses the default vLLM-based deployment profile (`codegen-xeon-vllm`).
|
||||
|
||||
```bash
|
||||
# Replace with your host's external IP address (do not use localhost or 127.0.0.1)
|
||||
export HOST_IP="your_external_ip_address"
|
||||
export host_ip="your_external_ip_address"
|
||||
# Replace with your Hugging Face Hub API token
|
||||
export HUGGINGFACEHUB_API_TOKEN="your_huggingface_token"
|
||||
|
||||
# Optional: Configure proxy if needed
|
||||
# export http_proxy="your_http_proxy"
|
||||
# export https_proxy="your_https_proxy"
|
||||
# export no_proxy="localhost,127.0.0.1,${HOST_IP}" # Add other hosts if necessary
|
||||
# export no_proxy="localhost,127.0.0.1,${host_ip}" # Add other hosts if necessary
|
||||
source ../../../set_env.sh
|
||||
```
|
||||
|
||||
_Note: The compose file might read additional variables from a `.env` file or expect them defined elsewhere. Ensure all required variables like ports (`LLM_SERVICE_PORT`, `MEGA_SERVICE_PORT`, etc.) are set if not using defaults from the compose file._
|
||||
_Note: The compose file might read additional variables from set_env.sh. Ensure all required variables like ports (`LLM_SERVICE_PORT`, `MEGA_SERVICE_PORT`, etc.) are set if not using defaults from the compose file._
|
||||
like
|
||||
|
||||
```
|
||||
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-32B-Instruct"
|
||||
```
|
||||
|
||||
can be changed to small model if needed
|
||||
|
||||
```
|
||||
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
|
||||
```
|
||||
|
||||
2. **Start Services (vLLM Profile):**
|
||||
|
||||
@@ -91,7 +102,7 @@ The `compose.yaml` file uses Docker Compose profiles to select the LLM serving b
|
||||
- **Services Deployed:** `codegen-tgi-server`, `codegen-llm-server`, `codegen-tei-embedding-server`, `codegen-retriever-server`, `redis-vector-db`, `codegen-dataprep-server`, `codegen-backend-server`, `codegen-gradio-ui-server`.
|
||||
- **To Run:**
|
||||
```bash
|
||||
# Ensure environment variables (HOST_IP, HUGGINGFACEHUB_API_TOKEN) are set
|
||||
# Ensure environment variables (host_ip, HUGGINGFACEHUB_API_TOKEN) are set
|
||||
docker compose --profile codegen-xeon-tgi up -d
|
||||
```
|
||||
|
||||
@@ -103,14 +114,14 @@ Key parameters are configured via environment variables set before running `dock
|
||||
|
||||
| Environment Variable | Description | Default (Set Externally) |
|
||||
| :-------------------------------------- | :------------------------------------------------------------------------------------------------------------------ | :----------------------------------------------------------------------------------------------- |
|
||||
| `HOST_IP` | External IP address of the host machine. **Required.** | `your_external_ip_address` |
|
||||
| `host_ip` | External IP address of the host machine. **Required.** | `your_external_ip_address` |
|
||||
| `HUGGINGFACEHUB_API_TOKEN` | Your Hugging Face Hub token for model access. **Required.** | `your_huggingface_token` |
|
||||
| `LLM_MODEL_ID` | Hugging Face model ID for the CodeGen LLM (used by TGI/vLLM service). Configured within `compose.yaml` environment. | `Qwen/Qwen2.5-Coder-7B-Instruct` |
|
||||
| `EMBEDDING_MODEL_ID` | Hugging Face model ID for the embedding model (used by TEI service). Configured within `compose.yaml` environment. | `BAAI/bge-base-en-v1.5` |
|
||||
| `LLM_ENDPOINT` | Internal URL for the LLM serving endpoint (used by `codegen-llm-server`). Configured in `compose.yaml`. | `http://codegen-tgi-server:80/generate` or `http://codegen-vllm-server:8000/v1/chat/completions` |
|
||||
| `TEI_EMBEDDING_ENDPOINT` | Internal URL for the Embedding service. Configured in `compose.yaml`. | `http://codegen-tei-embedding-server:80/embed` |
|
||||
| `DATAPREP_ENDPOINT` | Internal URL for the Data Preparation service. Configured in `compose.yaml`. | `http://codegen-dataprep-server:80/dataprep` |
|
||||
| `BACKEND_SERVICE_ENDPOINT` | External URL for the CodeGen Gateway (MegaService). Derived from `HOST_IP` and port `7778`. | `http://${HOST_IP}:7778/v1/codegen` |
|
||||
| `BACKEND_SERVICE_ENDPOINT` | External URL for the CodeGen Gateway (MegaService). Derived from `host_ip` and port `7778`. | `http://${host_ip}:7778/v1/codegen` |
|
||||
| `*_PORT` (Internal) | Internal container ports (e.g., `80`, `6379`). Defined in `compose.yaml`. | N/A |
|
||||
| `http_proxy` / `https_proxy`/`no_proxy` | Network proxy settings (if required). | `""` |
|
||||
|
||||
@@ -150,23 +161,23 @@ Check logs for specific services: `docker compose logs <service_name>`
|
||||
|
||||
### Run Validation Script/Commands
|
||||
|
||||
Use `curl` commands to test the main service endpoints. Ensure `HOST_IP` is correctly set in your environment.
|
||||
Use `curl` commands to test the main service endpoints. Ensure `host_ip` is correctly set in your environment.
|
||||
|
||||
1. **Validate LLM Serving Endpoint (Example for vLLM on default port 8000 internally, exposed differently):**
|
||||
1. **Validate LLM Serving Endpoint (Example for vLLM on default port 9000 internally, exposed differently):**
|
||||
|
||||
```bash
|
||||
# This command structure targets the OpenAI-compatible vLLM endpoint
|
||||
curl http://${HOST_IP}:8000/v1/chat/completions \
|
||||
curl http://${host_ip}:9000/v1/chat/completions \
|
||||
-X POST \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"model": "Qwen/Qwen2.5-Coder-7B-Instruct", "messages": [{"role": "user", "content": "Implement a basic Python class"}], "max_tokens":32}'
|
||||
-d '{"model": "Qwen/Qwen2.5-Coder-32B-Instruct", "messages": [{"role": "user", "content": "Implement a basic Python class"}], "max_tokens":32}'
|
||||
```
|
||||
|
||||
- **Expected Output:** A JSON response with generated code in `choices[0].message.content`.
|
||||
|
||||
2. **Validate CodeGen Gateway (MegaService on default port 7778):**
|
||||
```bash
|
||||
curl http://${HOST_IP}:7778/v1/codegen \
|
||||
curl http://${host_ip}:7778/v1/codegen \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"messages": "Write a Python function that adds two numbers."}'
|
||||
```
|
||||
@@ -179,8 +190,8 @@ Multiple UI options can be configured via the `compose.yaml`.
|
||||
### Gradio UI (Default)
|
||||
|
||||
Access the default Gradio UI by navigating to:
|
||||
`http://{HOST_IP}:8080`
|
||||
_(Port `8080` is the default host mapping for `codegen-gradio-ui-server`)_
|
||||
`http://{host_ip}:5173`
|
||||
_(Port `5173` is the default host mapping for `codegen-gradio-ui-server`)_
|
||||
|
||||

|
||||

|
||||
@@ -189,7 +200,7 @@ _(Port `8080` is the default host mapping for `codegen-gradio-ui-server`)_
|
||||
|
||||
1. Modify `compose.yaml`: Comment out the `codegen-gradio-ui-server` service and uncomment/add the `codegen-xeon-ui-server` (Svelte) service definition, ensuring the port mapping is correct (e.g., `"- 5173:5173"`).
|
||||
2. Restart Docker Compose: `docker compose --profile <profile_name> up -d`
|
||||
3. Access: `http://{HOST_IP}:5173` (or the host port you mapped).
|
||||
3. Access: `http://{host_ip}:5173` (or the host port you mapped).
|
||||
|
||||

|
||||
|
||||
@@ -197,7 +208,7 @@ _(Port `8080` is the default host mapping for `codegen-gradio-ui-server`)_
|
||||
|
||||
1. Modify `compose.yaml`: Comment out the default UI service and uncomment/add the `codegen-xeon-react-ui-server` definition, ensuring correct port mapping (e.g., `"- 5174:80"`).
|
||||
2. Restart Docker Compose: `docker compose --profile <profile_name> up -d`
|
||||
3. Access: `http://{HOST_IP}:5174` (or the host port you mapped).
|
||||
3. Access: `http://{host_ip}:5174` (or the host port you mapped).
|
||||
|
||||

|
||||
|
||||
@@ -207,7 +218,7 @@ Users can interact with the backend service using the `Neural Copilot` VS Code e
|
||||
|
||||
1. **Install:** Find and install `Neural Copilot` from the VS Code Marketplace.
|
||||

|
||||
2. **Configure:** Set the "Service URL" in the extension settings to your CodeGen backend endpoint: `http://${HOST_IP}:7778/v1/codegen` (use the correct port if changed).
|
||||
2. **Configure:** Set the "Service URL" in the extension settings to your CodeGen backend endpoint: `http://${host_ip}:7778/v1/codegen` (use the correct port if changed).
|
||||

|
||||
3. **Usage:**
|
||||
- **Inline Suggestion:** Type a comment describing the code you want (e.g., `# Python function to read a file`) and wait for suggestions.
|
||||
@@ -218,7 +229,7 @@ Users can interact with the backend service using the `Neural Copilot` VS Code e
|
||||
## Troubleshooting
|
||||
|
||||
- **Model Download Issues:** Check `HUGGINGFACEHUB_API_TOKEN`. Ensure internet connectivity or correct proxy settings. Check logs of `tgi-service`/`vllm-service` and `tei-embedding-server`. Gated models need prior Hugging Face access.
|
||||
- **Connection Errors:** Verify `HOST_IP` is correct and accessible. Check `docker ps` for port mappings. Ensure `no_proxy` includes `HOST_IP` if using a proxy. Check logs of the service failing to connect (e.g., `codegen-backend-server` logs if it can't reach `codegen-llm-server`).
|
||||
- **Connection Errors:** Verify `host_ip` is correct and accessible. Check `docker ps` for port mappings. Ensure `no_proxy` includes `host_ip` if using a proxy. Check logs of the service failing to connect (e.g., `codegen-backend-server` logs if it can't reach `codegen-llm-server`).
|
||||
- **"Container name is in use"**: Stop existing containers (`docker compose down`) or change `container_name` in `compose.yaml`.
|
||||
- **Resource Issues:** CodeGen models can be memory-intensive. Monitor host RAM usage. Increase Docker resources if needed.
|
||||
|
||||
|
||||
@@ -53,18 +53,29 @@ This uses the default vLLM-based deployment profile (`codegen-gaudi-vllm`).
|
||||
|
||||
```bash
|
||||
# Replace with your host's external IP address (do not use localhost or 127.0.0.1)
|
||||
export HOST_IP="your_external_ip_address"
|
||||
export host_ip="your_external_ip_address"
|
||||
# Replace with your Hugging Face Hub API token
|
||||
export HUGGINGFACEHUB_API_TOKEN="your_huggingface_token"
|
||||
|
||||
# Optional: Configure proxy if needed
|
||||
# export http_proxy="your_http_proxy"
|
||||
# export https_proxy="your_https_proxy"
|
||||
# export no_proxy="localhost,127.0.0.1,${HOST_IP}" # Add other hosts if necessary
|
||||
# export no_proxy="localhost,127.0.0.1,${host_ip}" # Add other hosts if necessary
|
||||
source ../../../set_env.sh
|
||||
```
|
||||
|
||||
_Note: Ensure all required variables like ports (`LLM_SERVICE_PORT`, `MEGA_SERVICE_PORT`, etc.) are set if not using defaults from the compose file._
|
||||
_Note: The compose file might read additional variables from set_env.sh. Ensure all required variables like ports (`LLM_SERVICE_PORT`, `MEGA_SERVICE_PORT`, etc.) are set if not using defaults from the compose file._
|
||||
like
|
||||
|
||||
```
|
||||
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-32B-Instruct"
|
||||
```
|
||||
|
||||
can be changed to small model if needed
|
||||
|
||||
```
|
||||
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
|
||||
```
|
||||
|
||||
2. **Start Services (vLLM Profile):**
|
||||
|
||||
@@ -94,7 +105,7 @@ The `compose.yaml` file uses Docker Compose profiles to select the LLM serving b
|
||||
- **Other Services:** Same CPU-based services as the vLLM profile.
|
||||
- **To Run:**
|
||||
```bash
|
||||
# Ensure environment variables (HOST_IP, HUGGINGFACEHUB_API_TOKEN) are set
|
||||
# Ensure environment variables (host_ip, HUGGINGFACEHUB_API_TOKEN) are set
|
||||
docker compose --profile codegen-gaudi-tgi up -d
|
||||
```
|
||||
|
||||
@@ -106,14 +117,14 @@ Key parameters are configured via environment variables set before running `dock
|
||||
|
||||
| Environment Variable | Description | Default (Set Externally) |
|
||||
| :-------------------------------------- | :------------------------------------------------------------------------------------------------------------------ | :----------------------------------------------------------------------------------------------- |
|
||||
| `HOST_IP` | External IP address of the host machine. **Required.** | `your_external_ip_address` |
|
||||
| `host_ip` | External IP address of the host machine. **Required.** | `your_external_ip_address` |
|
||||
| `HUGGINGFACEHUB_API_TOKEN` | Your Hugging Face Hub token for model access. **Required.** | `your_huggingface_token` |
|
||||
| `LLM_MODEL_ID` | Hugging Face model ID for the CodeGen LLM (used by TGI/vLLM service). Configured within `compose.yaml` environment. | `Qwen/Qwen2.5-Coder-7B-Instruct` |
|
||||
| `LLM_MODEL_ID` | Hugging Face model ID for the CodeGen LLM (used by TGI/vLLM service). Configured within `compose.yaml` environment. | `Qwen/Qwen2.5-Coder-32B-Instruct` |
|
||||
| `EMBEDDING_MODEL_ID` | Hugging Face model ID for the embedding model (used by TEI service). Configured within `compose.yaml` environment. | `BAAI/bge-base-en-v1.5` |
|
||||
| `LLM_ENDPOINT` | Internal URL for the LLM serving endpoint (used by `codegen-llm-server`). Configured in `compose.yaml`. | `http://codegen-tgi-server:80/generate` or `http://codegen-vllm-server:8000/v1/chat/completions` |
|
||||
| `TEI_EMBEDDING_ENDPOINT` | Internal URL for the Embedding service. Configured in `compose.yaml`. | `http://codegen-tei-embedding-server:80/embed` |
|
||||
| `DATAPREP_ENDPOINT` | Internal URL for the Data Preparation service. Configured in `compose.yaml`. | `http://codegen-dataprep-server:80/dataprep` |
|
||||
| `BACKEND_SERVICE_ENDPOINT` | External URL for the CodeGen Gateway (MegaService). Derived from `HOST_IP` and port `7778`. | `http://${HOST_IP}:7778/v1/codegen` |
|
||||
| `BACKEND_SERVICE_ENDPOINT` | External URL for the CodeGen Gateway (MegaService). Derived from `host_ip` and port `7778`. | `http://${host_ip}:7778/v1/codegen` |
|
||||
| `*_PORT` (Internal) | Internal container ports (e.g., `80`, `6379`). Defined in `compose.yaml`. | N/A |
|
||||
| `http_proxy` / `https_proxy`/`no_proxy` | Network proxy settings (if required). | `""` |
|
||||
|
||||
@@ -170,21 +181,21 @@ Check logs: `docker compose logs <service_name>`. Pay attention to `vllm-gaudi-s
|
||||
|
||||
### Run Validation Script/Commands
|
||||
|
||||
Use `curl` commands targeting the main service endpoints. Ensure `HOST_IP` is correctly set.
|
||||
Use `curl` commands targeting the main service endpoints. Ensure `host_ip` is correctly set.
|
||||
|
||||
1. **Validate LLM Serving Endpoint (Example for vLLM on default port 8000 internally, exposed differently):**
|
||||
1. **Validate LLM Serving Endpoint (Example for vLLM on default port 9000 internally, exposed differently):**
|
||||
|
||||
```bash
|
||||
# This command structure targets the OpenAI-compatible vLLM endpoint
|
||||
curl http://${HOST_IP}:8000/v1/chat/completions \
|
||||
curl http://${host_ip}:9000/v1/chat/completions \
|
||||
-X POST \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"model": "Qwen/Qwen2.5-Coder-7B-Instruct", "messages": [{"role": "user", "content": "Implement a basic Python class"}], "max_tokens":32}'
|
||||
-d '{"model": "Qwen/Qwen2.5-Coder-32B-Instruct", "messages": [{"role": "user", "content": "Implement a basic Python class"}], "max_tokens":32}'
|
||||
```
|
||||
|
||||
2. **Validate CodeGen Gateway (MegaService, default host port 7778):**
|
||||
```bash
|
||||
curl http://${HOST_IP}:7778/v1/codegen \
|
||||
curl http://${host_ip}:7778/v1/codegen \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"messages": "Implement a sorting algorithm in Python."}'
|
||||
```
|
||||
@@ -197,8 +208,8 @@ UI options are similar to the Xeon deployment.
|
||||
### Gradio UI (Default)
|
||||
|
||||
Access the default Gradio UI:
|
||||
`http://{HOST_IP}:8080`
|
||||
_(Port `8080` is the default host mapping)_
|
||||
`http://{host_ip}:5173`
|
||||
_(Port `5173` is the default host mapping)_
|
||||
|
||||

|
||||
|
||||
@@ -206,17 +217,17 @@ _(Port `8080` is the default host mapping)_
|
||||
|
||||
1. Modify `compose.yaml`: Swap Gradio service for Svelte (`codegen-gaudi-ui-server`), check port map (e.g., `5173:5173`).
|
||||
2. Restart: `docker compose --profile <profile_name> up -d`
|
||||
3. Access: `http://{HOST_IP}:5173`
|
||||
3. Access: `http://{host_ip}:5173`
|
||||
|
||||
### React UI (Optional)
|
||||
|
||||
1. Modify `compose.yaml`: Swap Gradio service for React (`codegen-gaudi-react-ui-server`), check port map (e.g., `5174:80`).
|
||||
2. Restart: `docker compose --profile <profile_name> up -d`
|
||||
3. Access: `http://{HOST_IP}:5174`
|
||||
3. Access: `http://{host_ip}:5174`
|
||||
|
||||
### VS Code Extension (Optional)
|
||||
|
||||
Use the `Neural Copilot` extension configured with the CodeGen backend URL: `http://${HOST_IP}:7778/v1/codegen`. (See Xeon README for detailed setup screenshots).
|
||||
Use the `Neural Copilot` extension configured with the CodeGen backend URL: `http://${host_ip}:7778/v1/codegen`. (See Xeon README for detailed setup screenshots).
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
@@ -226,7 +237,7 @@ Use the `Neural Copilot` extension configured with the CodeGen backend URL: `htt
|
||||
- Verify `runtime: habana` and volume mounts in `compose.yaml`.
|
||||
- Gaudi initialization can take significant time and memory. Monitor resource usage.
|
||||
- **Model Download Issues:** Check `HUGGINGFACEHUB_API_TOKEN`, internet access, proxy settings. Check LLM service logs.
|
||||
- **Connection Errors:** Verify `HOST_IP`, ports, and proxy settings. Use `docker ps` and check service logs.
|
||||
- **Connection Errors:** Verify `host_ip`, ports, and proxy settings. Use `docker ps` and check service logs.
|
||||
|
||||
## Stopping the Application
|
||||
|
||||
|
||||
@@ -5,6 +5,8 @@ services:
|
||||
codegen:
|
||||
build:
|
||||
args:
|
||||
IMAGE_REPO: ${REGISTRY}
|
||||
BASE_TAG: ${TAG}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
no_proxy: ${no_proxy}
|
||||
@@ -39,6 +41,7 @@ services:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/third_parties/vllm/src/Dockerfile.amd_gpu
|
||||
extends: codegen
|
||||
image: ${REGISTRY:-opea}/vllm-rocm:${TAG:-latest}
|
||||
vllm:
|
||||
build:
|
||||
|
||||
@@ -27,19 +27,13 @@ export no_proxy=${no_proxy},${ip_address}
|
||||
|
||||
function build_docker_images() {
|
||||
opea_branch=${opea_branch:-"main"}
|
||||
# If the opea_branch isn't main, replace the git clone branch in Dockerfile.
|
||||
if [[ "${opea_branch}" != "main" ]]; then
|
||||
cd $WORKPATH
|
||||
OLD_STRING="RUN git clone --depth 1 https://github.com/opea-project/GenAIComps.git"
|
||||
NEW_STRING="RUN git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git"
|
||||
find . -type f -name "Dockerfile*" | while read -r file; do
|
||||
echo "Processing file: $file"
|
||||
sed -i "s|$OLD_STRING|$NEW_STRING|g" "$file"
|
||||
done
|
||||
fi
|
||||
|
||||
cd $WORKPATH/docker_image_build
|
||||
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
|
||||
pushd GenAIComps
|
||||
echo "GenAIComps test commit is $(git rev-parse HEAD)"
|
||||
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
popd && sleep 1s
|
||||
|
||||
# Download Gaudi vllm of latest tag
|
||||
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
|
||||
@@ -250,24 +244,36 @@ function main() {
|
||||
stop_docker "${docker_compose_profiles[${i}]}"
|
||||
done
|
||||
|
||||
# build docker images
|
||||
echo "::group::build_docker_images"
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
echo "::endgroup::"
|
||||
|
||||
# loop all profiles
|
||||
for ((i = 0; i < len_profiles; i++)); do
|
||||
echo "Process [${i}]: ${docker_compose_profiles[$i]}, ${docker_llm_container_names[${i}]}"
|
||||
|
||||
echo "::group::start_services"
|
||||
start_services "${docker_compose_profiles[${i}]}" "${docker_llm_container_names[${i}]}"
|
||||
echo "::endgroup::"
|
||||
docker ps -a
|
||||
|
||||
echo "::group::validate_microservices"
|
||||
validate_microservices "${docker_llm_container_names[${i}]}"
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::validate_megaservice"
|
||||
validate_megaservice
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::validate_gradio"
|
||||
validate_gradio
|
||||
echo "::endgroup::"
|
||||
|
||||
stop_docker "${docker_compose_profiles[${i}]}"
|
||||
sleep 5s
|
||||
done
|
||||
|
||||
echo y | docker system prune
|
||||
docker system prune -f
|
||||
}
|
||||
|
||||
main
|
||||
|
||||
@@ -17,19 +17,13 @@ ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
opea_branch=${opea_branch:-"main"}
|
||||
# If the opea_branch isn't main, replace the git clone branch in Dockerfile.
|
||||
if [[ "${opea_branch}" != "main" ]]; then
|
||||
cd $WORKPATH
|
||||
OLD_STRING="RUN git clone --depth 1 https://github.com/opea-project/GenAIComps.git"
|
||||
NEW_STRING="RUN git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git"
|
||||
find . -type f -name "Dockerfile*" | while read -r file; do
|
||||
echo "Processing file: $file"
|
||||
sed -i "s|$OLD_STRING|$NEW_STRING|g" "$file"
|
||||
done
|
||||
fi
|
||||
|
||||
cd $WORKPATH/docker_image_build
|
||||
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
|
||||
pushd GenAIComps
|
||||
echo "GenAIComps test commit is $(git rev-parse HEAD)"
|
||||
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
popd && sleep 1s
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="codegen codegen-ui llm-textgen"
|
||||
@@ -164,18 +158,35 @@ function stop_docker() {
|
||||
|
||||
function main() {
|
||||
|
||||
echo "::group::stop_docker"
|
||||
stop_docker
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::build_docker_images"
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::start_services"
|
||||
start_services
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::validate_microservices"
|
||||
validate_microservices
|
||||
validate_megaservice
|
||||
validate_frontend
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::validate_megaservice"
|
||||
validate_megaservice
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::validate_frontend"
|
||||
validate_frontend
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::stop_docker"
|
||||
stop_docker
|
||||
echo y | docker system prune
|
||||
cd $WORKPATH
|
||||
echo "::endgroup::"
|
||||
|
||||
docker system prune -f
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -27,19 +27,13 @@ export no_proxy=${no_proxy},${ip_address}
|
||||
|
||||
function build_docker_images() {
|
||||
opea_branch=${opea_branch:-"main"}
|
||||
# If the opea_branch isn't main, replace the git clone branch in Dockerfile.
|
||||
if [[ "${opea_branch}" != "main" ]]; then
|
||||
cd $WORKPATH
|
||||
OLD_STRING="RUN git clone --depth 1 https://github.com/opea-project/GenAIComps.git"
|
||||
NEW_STRING="RUN git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git"
|
||||
find . -type f -name "Dockerfile*" | while read -r file; do
|
||||
echo "Processing file: $file"
|
||||
sed -i "s|$OLD_STRING|$NEW_STRING|g" "$file"
|
||||
done
|
||||
fi
|
||||
|
||||
cd $WORKPATH/docker_image_build
|
||||
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
|
||||
pushd GenAIComps
|
||||
echo "GenAIComps test commit is $(git rev-parse HEAD)"
|
||||
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
popd && sleep 1s
|
||||
|
||||
git clone https://github.com/vllm-project/vllm.git && cd vllm
|
||||
VLLM_VER="v0.8.3"
|
||||
@@ -256,17 +250,28 @@ function main() {
|
||||
for ((i = 0; i < len_profiles; i++)); do
|
||||
echo "Process [${i}]: ${docker_compose_profiles[$i]}, ${docker_llm_container_names[${i}]}"
|
||||
docker ps -a
|
||||
start_services "${docker_compose_profiles[${i}]}" "${docker_llm_container_names[${i}]}"
|
||||
|
||||
echo "::group::start_services"
|
||||
start_services "${docker_compose_profiles[${i}]}" "${docker_llm_container_names[${i}]}"
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::validate_microservices"
|
||||
validate_microservices "${docker_llm_container_names[${i}]}"
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::validate_megaservice"
|
||||
validate_megaservice
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::validate_gradio"
|
||||
validate_gradio
|
||||
echo "::endgroup::"
|
||||
|
||||
stop_docker "${docker_compose_profiles[${i}]}"
|
||||
sleep 5s
|
||||
done
|
||||
|
||||
echo y | docker system prune
|
||||
docker system prune -f
|
||||
}
|
||||
|
||||
main
|
||||
|
||||
@@ -17,19 +17,13 @@ ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
opea_branch=${opea_branch:-"main"}
|
||||
# If the opea_branch isn't main, replace the git clone branch in Dockerfile.
|
||||
if [[ "${opea_branch}" != "main" ]]; then
|
||||
cd $WORKPATH
|
||||
OLD_STRING="RUN git clone --depth 1 https://github.com/opea-project/GenAIComps.git"
|
||||
NEW_STRING="RUN git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git"
|
||||
find . -type f -name "Dockerfile*" | while read -r file; do
|
||||
echo "Processing file: $file"
|
||||
sed -i "s|$OLD_STRING|$NEW_STRING|g" "$file"
|
||||
done
|
||||
fi
|
||||
|
||||
cd $WORKPATH/docker_image_build
|
||||
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
|
||||
pushd GenAIComps
|
||||
echo "GenAIComps test commit is $(git rev-parse HEAD)"
|
||||
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
popd && sleep 1s
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="vllm-rocm llm-textgen codegen codegen-ui"
|
||||
@@ -164,17 +158,35 @@ function stop_docker() {
|
||||
|
||||
function main() {
|
||||
|
||||
echo "::group::stop_docker"
|
||||
stop_docker
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::build_docker_images"
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::start_services"
|
||||
start_services
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::validate_microservices"
|
||||
validate_microservices
|
||||
validate_megaservice
|
||||
validate_frontend
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::validate_megaservice"
|
||||
validate_megaservice
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::validate_frontend"
|
||||
validate_frontend
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::stop_docker"
|
||||
stop_docker
|
||||
echo y | docker system prune
|
||||
cd $WORKPATH
|
||||
echo "::endgroup::"
|
||||
|
||||
docker system prune -f
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -22,12 +22,11 @@ This Code Translation use case demonstrates Text Generation Inference across mul
|
||||
|
||||
The table below lists currently available deployment options. They outline in detail the implementation of this example on selected hardware.
|
||||
|
||||
| Category | Deployment Option | Description |
|
||||
| ---------------------- | -------------------- | ----------------------------------------------------------------- |
|
||||
| On-premise Deployments | Docker compose | [CodeTrans deployment on Xeon](./docker_compose/intel/cpu/xeon) |
|
||||
| | | [CodeTrans deployment on Gaudi](./docker_compose/intel/hpu/gaudi) |
|
||||
| | | [CodeTrans deployment on AMD ROCm](./docker_compose/amd/gpu/rocm) |
|
||||
| | Kubernetes | [Helm Charts](./kubernetes/helm) |
|
||||
| | | [GMC](./kubernetes/gmc) |
|
||||
| | Azure | Work-in-progress |
|
||||
| | Intel Tiber AI Cloud | Work-in-progress |
|
||||
| Category | Deployment Option | Description |
|
||||
| ---------------------- | -------------------- | --------------------------------------------------------------------------- |
|
||||
| On-premise Deployments | Docker compose | [CodeTrans deployment on Xeon](./docker_compose/intel/cpu/xeon/README.md) |
|
||||
| | | [CodeTrans deployment on Gaudi](./docker_compose/intel/hpu/gaudi/README.md) |
|
||||
| | | [CodeTrans deployment on AMD ROCm](./docker_compose/amd/gpu/rocm/README.md) |
|
||||
| | Kubernetes | [Helm Charts](./kubernetes/helm/README.md) |
|
||||
| | Azure | Work-in-progress |
|
||||
| | Intel Tiber AI Cloud | Work-in-progress |
|
||||
|
||||
@@ -44,3 +44,38 @@ Some HuggingFace resources, such as some models, are only accessible if the deve
|
||||
|
||||
2. (Docker only) If all microservices work well, check the port ${host_ip}:7777, the port may be allocated by other users, you can modify the `compose.yaml`.
|
||||
3. (Docker only) If you get errors like "The container name is in use", change container name in `compose.yaml`.
|
||||
|
||||
## Monitoring OPEA Services with Prometheus and Grafana Dashboard
|
||||
|
||||
OPEA microservice deployment can easily be monitored through Grafana dashboards using data collected via Prometheus. Follow the [README](https://github.com/opea-project/GenAIEval/blob/main/evals/benchmark/grafana/README.md) to setup Prometheus and Grafana servers and import dashboards to monitor the OPEA services.
|
||||
|
||||

|
||||

|
||||
|
||||
## Tracing with OpenTelemetry and Jaeger
|
||||
|
||||
> NOTE: This feature is disabled by default. Please use the compose.telemetry.yaml file to enable this feature.
|
||||
|
||||
OPEA microservice and [TGI](https://huggingface.co/docs/text-generation-inference/en/index)/[TEI](https://huggingface.co/docs/text-embeddings-inference/en/index) serving can easily be traced through [Jaeger](https://www.jaegertracing.io/) dashboards in conjunction with [OpenTelemetry](https://opentelemetry.io/) Tracing feature. Follow the [README](https://github.com/opea-project/GenAIComps/tree/main/comps/cores/telemetry#tracing) to trace additional functions if needed.
|
||||
|
||||
Tracing data is exported to http://{EXTERNAL_IP}:4318/v1/traces via Jaeger.
|
||||
Users could also get the external IP via below command.
|
||||
|
||||
```bash
|
||||
ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+'
|
||||
```
|
||||
|
||||
Access the Jaeger dashboard UI at http://{EXTERNAL_IP}:16686
|
||||
|
||||
For TGI serving on Gaudi, users could see different services like opea, TEI and TGI.
|
||||

|
||||
|
||||
Here is a screenshot for one tracing of TGI serving request.
|
||||

|
||||
|
||||
There are also OPEA related tracings. Users could understand the time breakdown of each service request by looking into each opea:schedule operation.
|
||||

|
||||
|
||||
There could be asynchronous function such as `llm/MicroService_asyn_generate` and user needs to check the trace of the asynchronous function in another operation like
|
||||
opea:llm_generate_stream.
|
||||

|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 120 KiB After Width: | Height: | Size: 90 KiB |
BIN
CodeTrans/assets/img/example_dashboards.png
Normal file
BIN
CodeTrans/assets/img/example_dashboards.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 100 KiB |
BIN
CodeTrans/assets/img/tgi_dashboard.png
Normal file
BIN
CodeTrans/assets/img/tgi_dashboard.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 414 KiB |
@@ -46,7 +46,7 @@ export http_proxy="Your_HTTP_Proxy" # http proxy if any
|
||||
export https_proxy="Your_HTTPs_Proxy" # https proxy if any
|
||||
export no_proxy=localhost,127.0.0.1,$host_ip # additional no proxies if needed
|
||||
export NGINX_PORT=${your_nginx_port} # your usable port for nginx, 80 for example
|
||||
source ./set_env.sh
|
||||
source docker_compose/intel/set_env.sh
|
||||
```
|
||||
|
||||
Consult the section on [CodeTrans Service configuration](#codetrans-configuration) for information on how service specific configuration parameters affect deployments.
|
||||
|
||||
@@ -46,7 +46,7 @@ export http_proxy="Your_HTTP_Proxy" # http proxy if any
|
||||
export https_proxy="Your_HTTPs_Proxy" # https proxy if any
|
||||
export no_proxy=localhost,127.0.0.1,$host_ip # additional no proxies if needed
|
||||
export NGINX_PORT=${your_nginx_port} # your usable port for nginx, 80 for example
|
||||
source ./set_env.sh
|
||||
source docker_compose/intel/set_env.sh
|
||||
```
|
||||
|
||||
Consult the section on [CodeTrans Service configuration](#codetrans-configuration) for information on how service specific configuration parameters affect deployments.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
pushd "../../" > /dev/null
|
||||
pushd "../../../" > /dev/null
|
||||
source .set_env.sh
|
||||
popd > /dev/null
|
||||
|
||||
@@ -37,12 +37,12 @@ function build_docker_images() {
|
||||
}
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose
|
||||
cd $WORKPATH/docker_compose/intel
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export NGINX_PORT=80
|
||||
export host_ip=${ip_address}
|
||||
source set_env.sh
|
||||
cd intel/hpu/gaudi
|
||||
cd hpu/gaudi
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
|
||||
|
||||
|
||||
@@ -39,13 +39,13 @@ function build_docker_images() {
|
||||
}
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose
|
||||
cd $WORKPATH/docker_compose/intel
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
|
||||
export NGINX_PORT=80
|
||||
export host_ip=${ip_address}
|
||||
source set_env.sh
|
||||
cd intel/cpu/xeon/
|
||||
cd cpu/xeon/
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
|
||||
|
||||
|
||||
@@ -34,13 +34,13 @@ function build_docker_images() {
|
||||
}
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose
|
||||
cd $WORKPATH/docker_compose/intel
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
|
||||
export NGINX_PORT=80
|
||||
export host_ip=${ip_address}
|
||||
source set_env.sh
|
||||
cd intel/hpu/gaudi/
|
||||
cd hpu/gaudi/
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
|
||||
|
||||
|
||||
@@ -34,13 +34,13 @@ function build_docker_images() {
|
||||
}
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose
|
||||
cd $WORKPATH/docker_compose/intel
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
|
||||
export NGINX_PORT=80
|
||||
export host_ip=${ip_address}
|
||||
source set_env.sh
|
||||
cd intel/cpu/xeon/
|
||||
cd cpu/xeon/
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
ARG IMAGE_REPO=opea
|
||||
ARG BASE_TAG=latest
|
||||
FROM opea/comps-base:$BASE_TAG
|
||||
FROM $IMAGE_REPO/comps-base:$BASE_TAG
|
||||
|
||||
COPY ./retrieval_tool.py $HOME/retrieval_tool.py
|
||||
|
||||
|
||||
@@ -2,7 +2,11 @@
|
||||
|
||||
DocRetriever are the most widely adopted use case for leveraging the different methodologies to match user query against a set of free-text records. DocRetriever is essential to RAG system, which bridges the knowledge gap by dynamically fetching relevant information from external sources, ensuring that responses generated remain factual and current. The core of this architecture are vector databases, which are instrumental in enabling efficient and semantic retrieval of information. These databases store data as vectors, allowing RAG to swiftly access the most pertinent documents or data points based on semantic similarity.
|
||||
|
||||
## 1. Build Images for necessary microservices. (Optional after docker image release)
|
||||
\_Note:
|
||||
|
||||
As the related docker images were published to Docker Hub, you can ignore the below step 1 and 2, quick start from step 3.
|
||||
|
||||
## 1. Build Images for necessary microservices. (Optional)
|
||||
|
||||
- Embedding TEI Image
|
||||
|
||||
@@ -30,7 +34,7 @@ DocRetriever are the most widely adopted use case for leveraging the different m
|
||||
docker build -t opea/dataprep:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/dataprep/src/Dockerfile .
|
||||
```
|
||||
|
||||
## 2. Build Images for MegaService
|
||||
## 2. Build Images for MegaService (Optional)
|
||||
|
||||
```bash
|
||||
cd ..
|
||||
@@ -44,6 +48,19 @@ docker build --no-cache -t opea/doc-index-retriever:latest --build-arg https_pro
|
||||
```bash
|
||||
export host_ip="YOUR IP ADDR"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token}
|
||||
```
|
||||
|
||||
Set environment variables by
|
||||
|
||||
```
|
||||
cd GenAIExamples/DocIndexRetriever/docker_compose/intel/cpu/xeon
|
||||
source set_env.sh
|
||||
```
|
||||
|
||||
Note: set_env.sh will help to set all required variables. Please ensure all required variables like ports (LLM_SERVICE_PORT, MEGA_SERVICE_PORT, etc.) are set if not using defaults from the compose file.
|
||||
or Set environment variables manually
|
||||
|
||||
```
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export TEI_EMBEDDING_ENDPOINT="http://${host_ip}:6006"
|
||||
|
||||
@@ -5,6 +5,8 @@ services:
|
||||
doc-index-retriever:
|
||||
build:
|
||||
args:
|
||||
IMAGE_REPO: ${REGISTRY:-opea}
|
||||
BASE_TAG: ${TAG:-latest}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
no_proxy: ${no_proxy}
|
||||
|
||||
@@ -20,17 +20,15 @@ function build_docker_images() {
|
||||
if [ ! -d "GenAIComps" ] ; then
|
||||
git clone --single-branch --branch "${opea_branch:-"main"}" https://github.com/opea-project/GenAIComps.git
|
||||
fi
|
||||
pushd GenAIComps
|
||||
echo "GenAIComps test commit is $(git rev-parse HEAD)"
|
||||
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
popd && sleep 1s
|
||||
|
||||
service_list="dataprep embedding retriever reranking doc-index-retriever"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
docker pull ghcr.io/huggingface/tei-gaudi:1.5.0
|
||||
docker pull quay.io/coreos/etcd:v3.5.5
|
||||
docker pull minio/minio:RELEASE.2023-03-20T20-16-18Z
|
||||
docker pull milvusdb/milvus:v2.4.6
|
||||
docker images && sleep 1s
|
||||
|
||||
echo "Docker images built!"
|
||||
}
|
||||
|
||||
function start_services() {
|
||||
@@ -112,19 +110,27 @@ function stop_docker() {
|
||||
|
||||
function main() {
|
||||
|
||||
echo "::group::stop_docker"
|
||||
stop_docker
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
echo "Dump current docker ps"
|
||||
docker ps
|
||||
start_time=$(date +%s)
|
||||
start_services
|
||||
end_time=$(date +%s)
|
||||
duration=$((end_time-start_time))
|
||||
echo "Mega service start duration is $duration s"
|
||||
validate_megaservice
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::build_docker_images"
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::start_services"
|
||||
start_services
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::validate_megaservice"
|
||||
validate_megaservice
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::stop_docker"
|
||||
stop_docker
|
||||
echo y | docker system prune
|
||||
echo "::endgroup::"
|
||||
|
||||
docker system prune -f
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -20,16 +20,15 @@ function build_docker_images() {
|
||||
if [ ! -d "GenAIComps" ] ; then
|
||||
git clone --single-branch --branch "${opea_branch:-"main"}" https://github.com/opea-project/GenAIComps.git
|
||||
fi
|
||||
pushd GenAIComps
|
||||
echo "GenAIComps test commit is $(git rev-parse HEAD)"
|
||||
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
popd && sleep 1s
|
||||
|
||||
service_list="dataprep embedding retriever reranking doc-index-retriever"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
docker pull quay.io/coreos/etcd:v3.5.5
|
||||
docker pull minio/minio:RELEASE.2023-03-20T20-16-18Z
|
||||
docker pull milvusdb/milvus:v2.4.6
|
||||
docker images && sleep 1s
|
||||
|
||||
echo "Docker images built!"
|
||||
}
|
||||
|
||||
function start_services() {
|
||||
@@ -111,19 +110,27 @@ function stop_docker() {
|
||||
|
||||
function main() {
|
||||
|
||||
echo "::group::stop_docker"
|
||||
stop_docker
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
echo "Dump current docker ps"
|
||||
docker ps
|
||||
start_time=$(date +%s)
|
||||
start_services
|
||||
end_time=$(date +%s)
|
||||
duration=$((end_time-start_time))
|
||||
echo "Mega service start duration is $duration s"
|
||||
validate_megaservice
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::build_docker_images"
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::start_services"
|
||||
start_services
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::validate_megaservice"
|
||||
validate_megaservice
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::stop_docker"
|
||||
stop_docker
|
||||
echo y | docker system prune
|
||||
echo "::endgroup::"
|
||||
|
||||
docker system prune -f
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -21,14 +21,15 @@ function build_docker_images() {
|
||||
if [ ! -d "GenAIComps" ] ; then
|
||||
git clone --single-branch --branch "${opea_branch:-"main"}" https://github.com/opea-project/GenAIComps.git
|
||||
fi
|
||||
pushd GenAIComps
|
||||
echo "GenAIComps test commit is $(git rev-parse HEAD)"
|
||||
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
popd && sleep 1s
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
docker compose -f build.yaml build --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull redis/redis-stack:7.2.0-v9
|
||||
docker pull ghcr.io/huggingface/tei-gaudi:1.5.0
|
||||
docker images && sleep 1s
|
||||
echo "Docker images built!"
|
||||
}
|
||||
|
||||
function start_services() {
|
||||
@@ -103,19 +104,27 @@ function stop_docker() {
|
||||
|
||||
function main() {
|
||||
|
||||
echo "::group::stop_docker"
|
||||
stop_docker
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
echo "Dump current docker ps"
|
||||
docker ps
|
||||
start_time=$(date +%s)
|
||||
start_services
|
||||
end_time=$(date +%s)
|
||||
duration=$((end_time-start_time))
|
||||
echo "Mega service start duration is $duration s"
|
||||
validate_megaservice
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::build_docker_images"
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::start_services"
|
||||
start_services
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::validate_megaservice"
|
||||
validate_megaservice
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::stop_docker"
|
||||
stop_docker
|
||||
echo y | docker system prune
|
||||
echo "::endgroup::"
|
||||
|
||||
docker system prune -f
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -21,14 +21,15 @@ function build_docker_images() {
|
||||
if [ ! -d "GenAIComps" ] ; then
|
||||
git clone --single-branch --branch "${opea_branch:-"main"}" https://github.com/opea-project/GenAIComps.git
|
||||
fi
|
||||
pushd GenAIComps
|
||||
echo "GenAIComps test commit is $(git rev-parse HEAD)"
|
||||
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
popd && sleep 1s
|
||||
|
||||
service_list="dataprep embedding retriever reranking doc-index-retriever"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
docker pull redis/redis-stack:7.2.0-v9
|
||||
docker images && sleep 1s
|
||||
|
||||
echo "Docker images built!"
|
||||
}
|
||||
|
||||
function start_services() {
|
||||
@@ -110,20 +111,27 @@ function stop_docker() {
|
||||
|
||||
function main() {
|
||||
|
||||
echo "::group::stop_docker"
|
||||
stop_docker
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::build_docker_images"
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
echo "Dump current docker ps"
|
||||
docker ps
|
||||
echo "::endgroup::"
|
||||
|
||||
start_time=$(date +%s)
|
||||
echo "::group::start_services"
|
||||
start_services
|
||||
end_time=$(date +%s)
|
||||
duration=$((end_time-start_time))
|
||||
echo "Mega service start duration is $duration s"
|
||||
validate_megaservice
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::validate_megaservice"
|
||||
validate_megaservice
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::stop_docker"
|
||||
stop_docker
|
||||
echo y | docker system prune
|
||||
echo "::endgroup::"
|
||||
|
||||
docker system prune -f
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -21,6 +21,11 @@ function build_docker_images() {
|
||||
if [ ! -d "GenAIComps" ] ; then
|
||||
git clone --single-branch --branch "${opea_branch:-"main"}" https://github.com/opea-project/GenAIComps.git
|
||||
fi
|
||||
pushd GenAIComps
|
||||
echo "GenAIComps test commit is $(git rev-parse HEAD)"
|
||||
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
popd && sleep 1s
|
||||
|
||||
service_list="dataprep embedding retriever doc-index-retriever"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
@@ -114,19 +119,27 @@ function stop_docker() {
|
||||
|
||||
function main() {
|
||||
|
||||
echo "::group::stop_docker"
|
||||
stop_docker
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
echo "Dump current docker ps"
|
||||
docker ps
|
||||
start_time=$(date +%s)
|
||||
start_services
|
||||
end_time=$(date +%s)
|
||||
duration=$((end_time-start_time))
|
||||
echo "Mega service start duration is $duration s"
|
||||
validate_megaservice
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::build_docker_images"
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::start_services"
|
||||
start_services
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::validate_megaservice"
|
||||
validate_megaservice
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::stop_docker"
|
||||
stop_docker
|
||||
echo y | docker system prune
|
||||
echo "::endgroup::"
|
||||
|
||||
docker system prune -f
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -21,40 +21,35 @@ This section describes how to quickly deploy and test the DocSum service manuall
|
||||
6. [Test the Pipeline](#test-the-pipeline)
|
||||
7. [Cleanup the Deployment](#cleanup-the-deployment)
|
||||
|
||||
### Access the Code
|
||||
### Access the Code and Set Up Environment
|
||||
|
||||
Clone the GenAIExample repository and access the ChatQnA Intel Xeon platform Docker Compose files and supporting scripts:
|
||||
|
||||
```
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIExamples.git
|
||||
cd GenAIExamples/DocSum/docker_compose/intel/cpu/xeon/
|
||||
cd GenAIExamples/DocSum/docker_compose/intel
|
||||
source set_env.sh
|
||||
cd cpu/xeon/
|
||||
```
|
||||
|
||||
Checkout a released version, such as v1.2:
|
||||
NOTE: by default vLLM does "warmup" at start, to optimize its performance for the specified model and the underlying platform, which can take long time. For development (and e.g. autoscaling) it can be skipped with `export VLLM_SKIP_WARMUP=true`.
|
||||
|
||||
```
|
||||
git checkout v1.2
|
||||
Checkout a released version, such as v1.3:
|
||||
|
||||
```bash
|
||||
git checkout v1.3
|
||||
```
|
||||
|
||||
### Generate a HuggingFace Access Token
|
||||
|
||||
Some HuggingFace resources, such as some models, are only accessible if you have an access token. If you do not already have a HuggingFace access token, you can create one by first creating an account by following the steps provided at [HuggingFace](https://huggingface.co/) and then generating a [user access token](https://huggingface.co/docs/transformers.js/en/guides/private#step-1-generating-a-user-access-token).
|
||||
|
||||
### Configure the Deployment Environment
|
||||
|
||||
To set up environment variables for deploying DocSum services, source the _set_env.sh_ script in this directory:
|
||||
|
||||
```
|
||||
source ./set_env.sh
|
||||
```
|
||||
|
||||
The _set_env.sh_ script will prompt for required and optional environment variables used to configure the DocSum services. If a value is not entered, the script will use a default value for the same. It will also generate a _.env_ file defining the desired configuration. Consult the section on [DocSum Service configuration](#docsum-service-configuration) for information on how service specific configuration parameters affect deployments.
|
||||
|
||||
### Deploy the Services Using Docker Compose
|
||||
|
||||
To deploy the DocSum services, execute the `docker compose up` command with the appropriate arguments. For a default deployment, execute:
|
||||
|
||||
```bash
|
||||
cd cpu/xeon/
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
@@ -78,13 +73,13 @@ Please refer to the table below to build different microservices from source:
|
||||
|
||||
After running docker compose, check if all the containers launched via docker compose have started:
|
||||
|
||||
```
|
||||
```bash
|
||||
docker ps -a
|
||||
```
|
||||
|
||||
For the default deployment, the following 5 containers should have started:
|
||||
|
||||
```
|
||||
```bash
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
748f577b3c78 opea/whisper:latest "python whisper_s…" 5 minutes ago Up About a minute 0.0.0.0:7066->7066/tcp, :::7066->7066/tcp docsum-xeon-whisper-server
|
||||
4eq8b7034fd9 opea/docsum-gradio-ui:latest "docker-entrypoint.s…" 5 minutes ago Up About a minute 0.0.0.0:5173->5173/tcp, :::5173->5173/tcp docsum-xeon-ui-server
|
||||
@@ -109,7 +104,7 @@ curl -X POST http://${host_ip}:8888/v1/docsum \
|
||||
|
||||
To stop the containers associated with the deployment, execute the following command:
|
||||
|
||||
```
|
||||
```bash
|
||||
docker compose -f compose.yaml down
|
||||
```
|
||||
|
||||
|
||||
@@ -23,40 +23,35 @@ This section describes how to quickly deploy and test the DocSum service manuall
|
||||
6. [Test the Pipeline](#test-the-pipeline)
|
||||
7. [Cleanup the Deployment](#cleanup-the-deployment)
|
||||
|
||||
### Access the Code
|
||||
### Access the Code and Set Up Environment
|
||||
|
||||
Clone the GenAIExample repository and access the ChatQnA Intel® Gaudi® platform Docker Compose files and supporting scripts:
|
||||
Clone the GenAIExample repository and access the DocSum Intel® Gaudi® platform Docker Compose files and supporting scripts:
|
||||
|
||||
```
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIExamples.git
|
||||
cd GenAIExamples/DocSum/docker_compose/intel/hpu/gaudi/
|
||||
cd GenAIExamples/DocSum/docker_compose/intel
|
||||
source set_env.sh
|
||||
cd hpu/gaudi/
|
||||
```
|
||||
|
||||
Checkout a released version, such as v1.2:
|
||||
NOTE: by default vLLM does "warmup" at start, to optimize its performance for the specified model and the underlying platform, which can take long time. For development (and e.g. autoscaling) it can be skipped with `export VLLM_SKIP_WARMUP=true`.
|
||||
|
||||
```
|
||||
git checkout v1.2
|
||||
Checkout a released version, such as v1.3:
|
||||
|
||||
```bash
|
||||
git checkout v1.3
|
||||
```
|
||||
|
||||
### Generate a HuggingFace Access Token
|
||||
|
||||
Some HuggingFace resources, such as some models, are only accessible if you have an access token. If you do not already have a HuggingFace access token, you can create one by first creating an account by following the steps provided at [HuggingFace](https://huggingface.co/) and then generating a [user access token](https://huggingface.co/docs/transformers.js/en/guides/private#step-1-generating-a-user-access-token).
|
||||
|
||||
### Configure the Deployment Environment
|
||||
|
||||
To set up environment variables for deploying DocSum services, source the _set_env.sh_ script in this directory:
|
||||
|
||||
```
|
||||
source ./set_env.sh
|
||||
```
|
||||
|
||||
The _set_env.sh_ script will prompt for required and optional environment variables used to configure the DocSum services. If a value is not entered, the script will use a default value for the same. It will also generate a _.env_ file defining the desired configuration. Consult the section on [DocSum Service configuration](#docsum-service-configuration) for information on how service specific configuration parameters affect deployments.
|
||||
|
||||
### Deploy the Services Using Docker Compose
|
||||
|
||||
To deploy the DocSum services, execute the `docker compose up` command with the appropriate arguments. For a default deployment, execute:
|
||||
|
||||
```bash
|
||||
cd hpu/gaudi/
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
@@ -80,13 +75,13 @@ Please refer to the table below to build different microservices from source:
|
||||
|
||||
After running docker compose, check if all the containers launched via docker compose have started:
|
||||
|
||||
```
|
||||
```bash
|
||||
docker ps -a
|
||||
```
|
||||
|
||||
For the default deployment, the following 5 containers should have started:
|
||||
|
||||
```
|
||||
```bash
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
748f577b3c78 opea/whisper:latest "python whisper_s…" 5 minutes ago Up About a minute 0.0.0.0:7066->7066/tcp, :::7066->7066/tcp docsum-gaudi-whisper-server
|
||||
4eq8b7034fd9 opea/docsum-gradio-ui:latest "docker-entrypoint.s…" 5 minutes ago Up About a minute 0.0.0.0:5173->5173/tcp, :::5173->5173/tcp docsum-gaudi-ui-server
|
||||
@@ -111,7 +106,7 @@ curl -X POST http://${host_ip}:8888/v1/docsum \
|
||||
|
||||
To stop the containers associated with the deployment, execute the following command:
|
||||
|
||||
```
|
||||
```bash
|
||||
docker compose -f compose.yaml down
|
||||
```
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ services:
|
||||
OMPI_MCA_btl_vader_single_copy_mechanism: none
|
||||
LLM_MODEL_ID: ${LLM_MODEL_ID}
|
||||
NUM_CARDS: ${NUM_CARDS}
|
||||
VLLM_SKIP_WARMUP: ${VLLM_SKIP_WARMUP:-false}
|
||||
VLLM_TORCH_PROFILER_DIR: "/mnt"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://localhost:80/health || exit 1"]
|
||||
|
||||
@@ -6,10 +6,10 @@ pushd "../../" > /dev/null
|
||||
source .set_env.sh
|
||||
popd > /dev/null
|
||||
|
||||
export host_ip=$(hostname -I | awk '{print $1}') # Example: host_ip="192.168.1.1"
|
||||
export no_proxy="${no_proxy},${host_ip}" # Example: no_proxy="localhost, 127.0.0.1, 192.168.1.1"
|
||||
export http_proxy=$http_proxy
|
||||
export https_proxy=$https_proxy
|
||||
export host_ip=$(hostname -I | awk '{print $1}') # Example: host_ip="192.168.1.1"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
|
||||
export LLM_ENDPOINT_PORT=8008
|
||||
@@ -29,3 +29,8 @@ export BACKEND_SERVICE_PORT=8888
|
||||
export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum"
|
||||
|
||||
export LOGFLAG=True
|
||||
|
||||
export NUM_CARDS=1
|
||||
export BLOCK_SIZE=128
|
||||
export MAX_NUM_SEQS=256
|
||||
export MAX_SEQ_LEN_TO_CAPTURE=2048
|
||||
@@ -16,7 +16,7 @@ echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
source $WORKPATH/docker_compose/set_env.sh
|
||||
source $WORKPATH/docker_compose/intel/set_env.sh
|
||||
|
||||
export MODEL_CACHE=${model_cache:-"./data"}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
source $WORKPATH/docker_compose/set_env.sh
|
||||
source $WORKPATH/docker_compose/intel/set_env.sh
|
||||
export MODEL_CACHE=${model_cache:-"./data"}
|
||||
|
||||
export MAX_INPUT_TOKENS=2048
|
||||
|
||||
@@ -16,7 +16,7 @@ echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
source $WORKPATH/docker_compose/set_env.sh
|
||||
source $WORKPATH/docker_compose/intel/set_env.sh
|
||||
export MODEL_CACHE=${model_cache:-"./data"}
|
||||
|
||||
export MAX_INPUT_TOKENS=2048
|
||||
|
||||
@@ -16,7 +16,7 @@ echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
source $WORKPATH/docker_compose/set_env.sh
|
||||
source $WORKPATH/docker_compose/intel/set_env.sh
|
||||
export MODEL_CACHE=${model_cache:-"./data"}
|
||||
|
||||
export MAX_INPUT_TOKENS=2048
|
||||
|
||||
@@ -5,3 +5,13 @@
|
||||
pushd "../../../../../" > /dev/null
|
||||
source .set_env.sh
|
||||
popd > /dev/null
|
||||
|
||||
export MODEL_PATH=${MODEL_PATH}
|
||||
export DOC_PATH=${DOC_PATH}
|
||||
export UI_TMPFILE_PATH=${UI_TMPFILE_PATH}
|
||||
export HOST_IP=${HOST_IP}
|
||||
export LLM_MODEL=${LLM_MODEL}
|
||||
export HF_ENDPOINT=${HF_ENDPOINT}
|
||||
export vLLM_ENDPOINT=${vLLM_ENDPOINT}
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export no_proxy="localhost, 127.0.0.1, 192.168.1.1"
|
||||
|
||||
21
EdgeCraftRAG/tests/README.md
Normal file
21
EdgeCraftRAG/tests/README.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# EdgeCraftRAG E2E test scripts
|
||||
|
||||
## Set the required environment variable
|
||||
|
||||
```bash
|
||||
export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token"
|
||||
```
|
||||
|
||||
## Run test
|
||||
|
||||
On Intel ARC with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_on_arc.sh
|
||||
```
|
||||
|
||||
On Intel ARC with vLLM:
|
||||
|
||||
```bash
|
||||
bash test_compose_vllm_on_arc.sh
|
||||
```
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user