Integrate AvatarChatbot set_env to ut scripts. (#1961)
Signed-off-by: ZePan110 <ze.pan@intel.com>
This commit is contained in:
@@ -41,7 +41,7 @@ export FACE="/home/user/comps/animation/src/assets/img/avatar5.png"
|
||||
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
|
||||
export AUDIO='None'
|
||||
export FACESIZE=96
|
||||
export OUTFILE="/outputs/result.mp4"
|
||||
export OUTFILE="./outputs/result.mp4"
|
||||
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
|
||||
export UPSCALE_FACTOR=1
|
||||
export FPS=10
|
||||
export FPS=5
|
||||
|
||||
@@ -5,3 +5,32 @@
|
||||
pushd "../../../../../" > /dev/null
|
||||
source .set_env.sh
|
||||
popd > /dev/null
|
||||
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export host_ip=$(hostname -I | awk '{print $1}')
|
||||
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3
|
||||
export WAV2LIP_ENDPOINT=http://$host_ip:7860
|
||||
export MEGA_SERVICE_HOST_IP=${host_ip}
|
||||
export WHISPER_SERVER_HOST_IP=${host_ip}
|
||||
export WHISPER_SERVER_PORT=7066
|
||||
export SPEECHT5_SERVER_HOST_IP=${host_ip}
|
||||
export SPEECHT5_SERVER_PORT=7055
|
||||
export LLM_SERVER_HOST_IP=${host_ip}
|
||||
export LLM_SERVER_PORT=3006
|
||||
export ANIMATION_SERVICE_HOST_IP=${host_ip}
|
||||
export ANIMATION_SERVICE_PORT=3008
|
||||
|
||||
export MEGA_SERVICE_PORT=8888
|
||||
|
||||
export DEVICE="cpu"
|
||||
export WAV2LIP_PORT=7860
|
||||
export INFERENCE_MODE='wav2lip+gfpgan'
|
||||
export CHECKPOINT_PATH='/usr/local/lib/python3.11/site-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
|
||||
export FACE="/home/user/comps/animation/src/assets/img/avatar5.png"
|
||||
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
|
||||
export AUDIO='None'
|
||||
export FACESIZE=96
|
||||
export OUTFILE="/outputs/result.mp4"
|
||||
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
|
||||
export UPSCALE_FACTOR=1
|
||||
export FPS=10
|
||||
|
||||
@@ -5,3 +5,35 @@
|
||||
pushd "../../../../../" > /dev/null
|
||||
source .set_env.sh
|
||||
popd > /dev/null
|
||||
|
||||
export HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN
|
||||
export host_ip=$(hostname -I | awk '{print $1}')
|
||||
|
||||
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3
|
||||
|
||||
export WAV2LIP_ENDPOINT=http://$host_ip:7860
|
||||
|
||||
export MEGA_SERVICE_HOST_IP=${host_ip}
|
||||
export WHISPER_SERVER_HOST_IP=${host_ip}
|
||||
export WHISPER_SERVER_PORT=7066
|
||||
export SPEECHT5_SERVER_HOST_IP=${host_ip}
|
||||
export SPEECHT5_SERVER_PORT=7055
|
||||
export LLM_SERVER_HOST_IP=${host_ip}
|
||||
export LLM_SERVER_PORT=3006
|
||||
export ANIMATION_SERVICE_HOST_IP=${host_ip}
|
||||
export ANIMATION_SERVICE_PORT=3008
|
||||
|
||||
export MEGA_SERVICE_PORT=8888
|
||||
|
||||
export DEVICE="hpu"
|
||||
export WAV2LIP_PORT=7860
|
||||
export INFERENCE_MODE='wav2lip+gfpgan'
|
||||
export CHECKPOINT_PATH='/usr/local/lib/python3.10/dist-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
|
||||
export FACE="/home/user/comps/animation/src/assets/img/avatar1.jpg"
|
||||
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
|
||||
export AUDIO='None'
|
||||
export FACESIZE=96
|
||||
export OUTFILE="/outputs/result.mp4"
|
||||
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
|
||||
export UPSCALE_FACTOR=1
|
||||
export FPS=10
|
||||
|
||||
27
AvatarChatbot/tests/README.md
Normal file
27
AvatarChatbot/tests/README.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# AvatarChatbot E2E test scripts
|
||||
|
||||
## Set the required environment variable
|
||||
|
||||
```bash
|
||||
export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token"
|
||||
```
|
||||
|
||||
## Run test
|
||||
|
||||
On Intel Xeon with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_on_xeon.sh
|
||||
```
|
||||
|
||||
On Intel Gaudi with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_on_gaudi.sh
|
||||
```
|
||||
|
||||
On AMD ROCm with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_on_rocm.sh
|
||||
```
|
||||
@@ -45,37 +45,7 @@ function build_docker_images() {
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/hpu/gaudi
|
||||
|
||||
export HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN
|
||||
export host_ip=$(hostname -I | awk '{print $1}')
|
||||
|
||||
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3
|
||||
|
||||
export WAV2LIP_ENDPOINT=http://$host_ip:7860
|
||||
|
||||
export MEGA_SERVICE_HOST_IP=${host_ip}
|
||||
export WHISPER_SERVER_HOST_IP=${host_ip}
|
||||
export WHISPER_SERVER_PORT=7066
|
||||
export SPEECHT5_SERVER_HOST_IP=${host_ip}
|
||||
export SPEECHT5_SERVER_PORT=7055
|
||||
export LLM_SERVER_HOST_IP=${host_ip}
|
||||
export LLM_SERVER_PORT=3006
|
||||
export ANIMATION_SERVICE_HOST_IP=${host_ip}
|
||||
export ANIMATION_SERVICE_PORT=3008
|
||||
|
||||
export MEGA_SERVICE_PORT=8888
|
||||
|
||||
export DEVICE="hpu"
|
||||
export WAV2LIP_PORT=7860
|
||||
export INFERENCE_MODE='wav2lip+gfpgan'
|
||||
export CHECKPOINT_PATH='/usr/local/lib/python3.10/dist-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
|
||||
export FACE="/home/user/comps/animation/src/assets/img/avatar1.jpg"
|
||||
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
|
||||
export AUDIO='None'
|
||||
export FACESIZE=96
|
||||
export OUTFILE="/outputs/result.mp4"
|
||||
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
|
||||
export UPSCALE_FACTOR=1
|
||||
export FPS=10
|
||||
source set_env.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -42,48 +42,8 @@ function build_docker_images() {
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/amd/gpu/rocm
|
||||
|
||||
export HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN
|
||||
export OPENAI_API_KEY=$OPENAI_API_KEY
|
||||
export host_ip=${ip_address}
|
||||
|
||||
export TGI_SERVICE_PORT=3006
|
||||
export TGI_LLM_ENDPOINT=http://${host_ip}:${TGI_SERVICE_PORT}
|
||||
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
|
||||
|
||||
export ASR_ENDPOINT=http://${host_ip}:7066
|
||||
export TTS_ENDPOINT=http://${host_ip}:7055
|
||||
export WAV2LIP_ENDPOINT=http://${host_ip}:7860
|
||||
|
||||
export MEGA_SERVICE_HOST_IP=${host_ip}
|
||||
export ASR_SERVICE_HOST_IP=${host_ip}
|
||||
export TTS_SERVICE_HOST_IP=${host_ip}
|
||||
export LLM_SERVICE_HOST_IP=${host_ip}
|
||||
export ANIMATION_SERVICE_HOST_IP=${host_ip}
|
||||
export WHISPER_SERVER_HOST_IP=${host_ip}
|
||||
export WHISPER_SERVER_PORT=7066
|
||||
|
||||
export SPEECHT5_SERVER_HOST_IP=${host_ip}
|
||||
export SPEECHT5_SERVER_PORT=7055
|
||||
|
||||
export MEGA_SERVICE_PORT=8888
|
||||
export ASR_SERVICE_PORT=3001
|
||||
export TTS_SERVICE_PORT=3002
|
||||
export LLM_SERVICE_PORT=3006
|
||||
export ANIMATION_SERVICE_PORT=3008
|
||||
|
||||
export DEVICE="cpu"
|
||||
export WAV2LIP_PORT=7860
|
||||
export INFERENCE_MODE='wav2lip+gfpgan'
|
||||
export CHECKPOINT_PATH='/usr/local/lib/python3.11/site-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
|
||||
export FACE="/home/user/comps/animation/src/assets/img/avatar5.png"
|
||||
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
|
||||
export AUDIO='None'
|
||||
export FACESIZE=96
|
||||
export OUTFILE="./outputs/result.mp4"
|
||||
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
|
||||
export UPSCALE_FACTOR=1
|
||||
export FPS=5
|
||||
source set_env.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d --force-recreate
|
||||
|
||||
@@ -45,37 +45,7 @@ function build_docker_images() {
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon
|
||||
|
||||
export HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN
|
||||
export host_ip=$(hostname -I | awk '{print $1}')
|
||||
|
||||
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3
|
||||
|
||||
export WAV2LIP_ENDPOINT=http://$host_ip:7860
|
||||
|
||||
export MEGA_SERVICE_HOST_IP=${host_ip}
|
||||
export WHISPER_SERVER_HOST_IP=${host_ip}
|
||||
export WHISPER_SERVER_PORT=7066
|
||||
export SPEECHT5_SERVER_HOST_IP=${host_ip}
|
||||
export SPEECHT5_SERVER_PORT=7055
|
||||
export LLM_SERVER_HOST_IP=${host_ip}
|
||||
export LLM_SERVER_PORT=3006
|
||||
export ANIMATION_SERVICE_HOST_IP=${host_ip}
|
||||
export ANIMATION_SERVICE_PORT=3008
|
||||
|
||||
export MEGA_SERVICE_PORT=8888
|
||||
|
||||
export DEVICE="cpu"
|
||||
export WAV2LIP_PORT=7860
|
||||
export INFERENCE_MODE='wav2lip+gfpgan'
|
||||
export CHECKPOINT_PATH='/usr/local/lib/python3.11/site-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
|
||||
export FACE="/home/user/comps/animation/src/assets/img/avatar5.png"
|
||||
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
|
||||
export AUDIO='None'
|
||||
export FACESIZE=96
|
||||
export OUTFILE="/outputs/result.mp4"
|
||||
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
|
||||
export UPSCALE_FACTOR=1
|
||||
export FPS=10
|
||||
source set_env.sh
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d
|
||||
|
||||
Reference in New Issue
Block a user