diff --git a/AvatarChatbot/docker_compose/amd/gpu/rocm/set_env.sh b/AvatarChatbot/docker_compose/amd/gpu/rocm/set_env.sh index e6a2af098..38d54c38f 100644 --- a/AvatarChatbot/docker_compose/amd/gpu/rocm/set_env.sh +++ b/AvatarChatbot/docker_compose/amd/gpu/rocm/set_env.sh @@ -41,7 +41,7 @@ export FACE="/home/user/comps/animation/src/assets/img/avatar5.png" # export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None' export AUDIO='None' export FACESIZE=96 -export OUTFILE="/outputs/result.mp4" +export OUTFILE="./outputs/result.mp4" export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed export UPSCALE_FACTOR=1 -export FPS=10 +export FPS=5 diff --git a/AvatarChatbot/docker_compose/intel/cpu/xeon/set_env.sh b/AvatarChatbot/docker_compose/intel/cpu/xeon/set_env.sh index e4f5c207b..49c7e4cdd 100644 --- a/AvatarChatbot/docker_compose/intel/cpu/xeon/set_env.sh +++ b/AvatarChatbot/docker_compose/intel/cpu/xeon/set_env.sh @@ -5,3 +5,32 @@ pushd "../../../../../" > /dev/null source .set_env.sh popd > /dev/null + +export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} +export host_ip=$(hostname -I | awk '{print $1}') +export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3 +export WAV2LIP_ENDPOINT=http://$host_ip:7860 +export MEGA_SERVICE_HOST_IP=${host_ip} +export WHISPER_SERVER_HOST_IP=${host_ip} +export WHISPER_SERVER_PORT=7066 +export SPEECHT5_SERVER_HOST_IP=${host_ip} +export SPEECHT5_SERVER_PORT=7055 +export LLM_SERVER_HOST_IP=${host_ip} +export LLM_SERVER_PORT=3006 +export ANIMATION_SERVICE_HOST_IP=${host_ip} +export ANIMATION_SERVICE_PORT=3008 + +export MEGA_SERVICE_PORT=8888 + +export DEVICE="cpu" +export WAV2LIP_PORT=7860 +export INFERENCE_MODE='wav2lip+gfpgan' +export CHECKPOINT_PATH='/usr/local/lib/python3.11/site-packages/Wav2Lip/checkpoints/wav2lip_gan.pth' +export FACE="/home/user/comps/animation/src/assets/img/avatar5.png" +# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None' +export AUDIO='None' +export FACESIZE=96 +export OUTFILE="/outputs/result.mp4" +export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed +export UPSCALE_FACTOR=1 +export FPS=10 diff --git a/AvatarChatbot/docker_compose/intel/hpu/gaudi/set_env.sh b/AvatarChatbot/docker_compose/intel/hpu/gaudi/set_env.sh index e4f5c207b..a55f4b4f5 100644 --- a/AvatarChatbot/docker_compose/intel/hpu/gaudi/set_env.sh +++ b/AvatarChatbot/docker_compose/intel/hpu/gaudi/set_env.sh @@ -5,3 +5,35 @@ pushd "../../../../../" > /dev/null source .set_env.sh popd > /dev/null + +export HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN +export host_ip=$(hostname -I | awk '{print $1}') + +export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3 + +export WAV2LIP_ENDPOINT=http://$host_ip:7860 + +export MEGA_SERVICE_HOST_IP=${host_ip} +export WHISPER_SERVER_HOST_IP=${host_ip} +export WHISPER_SERVER_PORT=7066 +export SPEECHT5_SERVER_HOST_IP=${host_ip} +export SPEECHT5_SERVER_PORT=7055 +export LLM_SERVER_HOST_IP=${host_ip} +export LLM_SERVER_PORT=3006 +export ANIMATION_SERVICE_HOST_IP=${host_ip} +export ANIMATION_SERVICE_PORT=3008 + +export MEGA_SERVICE_PORT=8888 + +export DEVICE="hpu" +export WAV2LIP_PORT=7860 +export INFERENCE_MODE='wav2lip+gfpgan' +export CHECKPOINT_PATH='/usr/local/lib/python3.10/dist-packages/Wav2Lip/checkpoints/wav2lip_gan.pth' +export FACE="/home/user/comps/animation/src/assets/img/avatar1.jpg" +# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None' +export AUDIO='None' +export FACESIZE=96 +export OUTFILE="/outputs/result.mp4" +export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed +export UPSCALE_FACTOR=1 +export FPS=10 diff --git a/AvatarChatbot/tests/README.md b/AvatarChatbot/tests/README.md new file mode 100644 index 000000000..411afc28b --- /dev/null +++ b/AvatarChatbot/tests/README.md @@ -0,0 +1,27 @@ +# AvatarChatbot E2E test scripts + +## Set the required environment variable + +```bash +export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token" +``` + +## Run test + +On Intel Xeon with TGI: + +```bash +bash test_compose_on_xeon.sh +``` + +On Intel Gaudi with TGI: + +```bash +bash test_compose_on_gaudi.sh +``` + +On AMD ROCm with TGI: + +```bash +bash test_compose_on_rocm.sh +``` diff --git a/AvatarChatbot/tests/test_compose_on_gaudi.sh b/AvatarChatbot/tests/test_compose_on_gaudi.sh index faf156907..c9d693c41 100755 --- a/AvatarChatbot/tests/test_compose_on_gaudi.sh +++ b/AvatarChatbot/tests/test_compose_on_gaudi.sh @@ -45,37 +45,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/hpu/gaudi - export HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN - export host_ip=$(hostname -I | awk '{print $1}') - - export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3 - - export WAV2LIP_ENDPOINT=http://$host_ip:7860 - - export MEGA_SERVICE_HOST_IP=${host_ip} - export WHISPER_SERVER_HOST_IP=${host_ip} - export WHISPER_SERVER_PORT=7066 - export SPEECHT5_SERVER_HOST_IP=${host_ip} - export SPEECHT5_SERVER_PORT=7055 - export LLM_SERVER_HOST_IP=${host_ip} - export LLM_SERVER_PORT=3006 - export ANIMATION_SERVICE_HOST_IP=${host_ip} - export ANIMATION_SERVICE_PORT=3008 - - export MEGA_SERVICE_PORT=8888 - - export DEVICE="hpu" - export WAV2LIP_PORT=7860 - export INFERENCE_MODE='wav2lip+gfpgan' - export CHECKPOINT_PATH='/usr/local/lib/python3.10/dist-packages/Wav2Lip/checkpoints/wav2lip_gan.pth' - export FACE="/home/user/comps/animation/src/assets/img/avatar1.jpg" - # export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None' - export AUDIO='None' - export FACESIZE=96 - export OUTFILE="/outputs/result.mp4" - export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed - export UPSCALE_FACTOR=1 - export FPS=10 + source set_env.sh # Start Docker Containers docker compose up -d > ${LOG_PATH}/start_services_with_compose.log diff --git a/AvatarChatbot/tests/test_compose_on_rocm.sh b/AvatarChatbot/tests/test_compose_on_rocm.sh index 514921f6e..dab4564a2 100644 --- a/AvatarChatbot/tests/test_compose_on_rocm.sh +++ b/AvatarChatbot/tests/test_compose_on_rocm.sh @@ -2,7 +2,7 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -set -e +set -xe IMAGE_REPO=${IMAGE_REPO:-"opea"} IMAGE_TAG=${IMAGE_TAG:-"latest"} echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" @@ -42,48 +42,8 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/amd/gpu/rocm - - export HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN export OPENAI_API_KEY=$OPENAI_API_KEY - export host_ip=${ip_address} - - export TGI_SERVICE_PORT=3006 - export TGI_LLM_ENDPOINT=http://${host_ip}:${TGI_SERVICE_PORT} - export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" - - export ASR_ENDPOINT=http://${host_ip}:7066 - export TTS_ENDPOINT=http://${host_ip}:7055 - export WAV2LIP_ENDPOINT=http://${host_ip}:7860 - - export MEGA_SERVICE_HOST_IP=${host_ip} - export ASR_SERVICE_HOST_IP=${host_ip} - export TTS_SERVICE_HOST_IP=${host_ip} - export LLM_SERVICE_HOST_IP=${host_ip} - export ANIMATION_SERVICE_HOST_IP=${host_ip} - export WHISPER_SERVER_HOST_IP=${host_ip} - export WHISPER_SERVER_PORT=7066 - - export SPEECHT5_SERVER_HOST_IP=${host_ip} - export SPEECHT5_SERVER_PORT=7055 - - export MEGA_SERVICE_PORT=8888 - export ASR_SERVICE_PORT=3001 - export TTS_SERVICE_PORT=3002 - export LLM_SERVICE_PORT=3006 - export ANIMATION_SERVICE_PORT=3008 - - export DEVICE="cpu" - export WAV2LIP_PORT=7860 - export INFERENCE_MODE='wav2lip+gfpgan' - export CHECKPOINT_PATH='/usr/local/lib/python3.11/site-packages/Wav2Lip/checkpoints/wav2lip_gan.pth' - export FACE="/home/user/comps/animation/src/assets/img/avatar5.png" - # export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None' - export AUDIO='None' - export FACESIZE=96 - export OUTFILE="./outputs/result.mp4" - export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed - export UPSCALE_FACTOR=1 - export FPS=5 + source set_env.sh # Start Docker Containers docker compose up -d --force-recreate diff --git a/AvatarChatbot/tests/test_compose_on_xeon.sh b/AvatarChatbot/tests/test_compose_on_xeon.sh index 8e9a04535..b0013aa2a 100755 --- a/AvatarChatbot/tests/test_compose_on_xeon.sh +++ b/AvatarChatbot/tests/test_compose_on_xeon.sh @@ -45,37 +45,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon - export HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN - export host_ip=$(hostname -I | awk '{print $1}') - - export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3 - - export WAV2LIP_ENDPOINT=http://$host_ip:7860 - - export MEGA_SERVICE_HOST_IP=${host_ip} - export WHISPER_SERVER_HOST_IP=${host_ip} - export WHISPER_SERVER_PORT=7066 - export SPEECHT5_SERVER_HOST_IP=${host_ip} - export SPEECHT5_SERVER_PORT=7055 - export LLM_SERVER_HOST_IP=${host_ip} - export LLM_SERVER_PORT=3006 - export ANIMATION_SERVICE_HOST_IP=${host_ip} - export ANIMATION_SERVICE_PORT=3008 - - export MEGA_SERVICE_PORT=8888 - - export DEVICE="cpu" - export WAV2LIP_PORT=7860 - export INFERENCE_MODE='wav2lip+gfpgan' - export CHECKPOINT_PATH='/usr/local/lib/python3.11/site-packages/Wav2Lip/checkpoints/wav2lip_gan.pth' - export FACE="/home/user/comps/animation/src/assets/img/avatar5.png" - # export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None' - export AUDIO='None' - export FACESIZE=96 - export OUTFILE="/outputs/result.mp4" - export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed - export UPSCALE_FACTOR=1 - export FPS=10 + source set_env.sh # Start Docker Containers docker compose up -d