Add new test cases for VisualQnA (#712)

* Add new test cases for VisualQnA

Signed-off-by: lvliang-intel <liang1.lv@intel.com>
This commit is contained in:
lvliang-intel
2024-09-04 13:21:50 +08:00
committed by GitHub
parent 9cf1d88b6d
commit 995a62c9d9
7 changed files with 376 additions and 75 deletions

View File

@@ -0,0 +1,24 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
services:
visualqna:
build:
args:
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
no_proxy: ${no_proxy}
dockerfile: ./Dockerfile
image: ${REGISTRY:-opea}/visualqna:${TAG:-latest}
visualqna-ui:
build:
context: ui
dockerfile: ./docker/Dockerfile
extends: visualqna
image: ${REGISTRY:-opea}/visualqna-ui:${TAG:-latest}
llm-visualqna-tgi:
build:
context: GenAIComps
dockerfile: comps/lvms/Dockerfile_tgi
extends: visualqna
image: ${REGISTRY:-opea}/lvm-tgi:${TAG:-latest}

View File

@@ -19,15 +19,10 @@ cd GenAIComps
docker build --no-cache -t opea/lvm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/Dockerfile_tgi .
```
### 3. Build TGI Gaudi Image
Since TGI Gaudi has not supported llava-next in main branch, we'll need to build it from a PR branch for now.
### 3. Pull TGI Gaudi Image
```bash
git clone https://github.com/huggingface/tgi-gaudi.git
cd tgi-gaudi/
docker build -t opea/llava-tgi:latest .
cd ../
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.4
```
### 4. Build MegaService Docker Image

View File

@@ -3,7 +3,7 @@
services:
llava-tgi-service:
image: ${REGISTRY:-opea}/llava-tgi:${TAG:-latest}
image: ghcr.io/huggingface/tgi-gaudi:2.0.4
container_name: tgi-llava-gaudi-server
ports:
- "8399:80"

View File

@@ -19,7 +19,7 @@ services:
command: --model-id ${LVM_MODEL_ID} --max-input-length 4096 --max-total-tokens 8192 --cuda-graphs 0
lvm-tgi:
image: ${REGISTRY:-opea}/lvm-tgi:${TAG:-latest}
container_name: lvm-tgi-server
container_name: lvm-tgi-xeon-server
depends_on:
- llava-tgi-service
ports:

View File

@@ -1,66 +0,0 @@
#!/bin/bash
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -xe
function test_env_setup() {
WORKPATH=$(dirname "$PWD")
LOG_PATH="$WORKPATH/tests/inference.log"
CONTAINER_NAME="test-llava-gaudi-service"
cd $WORKPATH
}
function launch_llava_service() {
cd ${WORKPATH}
cd serving/
local port=8855
docker build . --build-arg http_proxy=${http_proxy} --build-arg https_proxy=${http_proxy} -t intel/gen-ai-examples:${CONTAINER_NAME}
docker run -d --name=${CONTAINER_NAME} -p ${port}:8000 -v ~/.cache/huggingface/hub/:/root/.cache/huggingface/hub/ -e http_proxy=$http_proxy -e https_proxy=$http_proxy \
--runtime=habana -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none --cap-add=sys_nice --ipc=host intel/gen-ai-examples:${CONTAINER_NAME}
sleep 3m
}
function run_tests() {
cd $WORKPATH
local port=8855
curl localhost:${port}/health -v 2>&1 | tee $LOG_PATH
}
function check_response() {
cd $WORKPATH
echo "Checking response"
local status=false
if [[ -f $LOG_PATH ]] && [[ $(grep -c "200 OK" $LOG_PATH) != 0 ]]; then
status=true
fi
if [ $status == false ]; then
echo "Response check failed, please check the logs in artifacts!"
exit 1
else
echo "Response check succeed!"
fi
}
function docker_stop() {
local container_name=$1
cid=$(docker ps -aq --filter "name=$container_name")
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi
}
function main() {
test_env_setup
docker_stop $CONTAINER_NAME && sleep 5s
launch_llava_service
run_tests
check_response
docker_stop $CONTAINER_NAME && sleep 5s
echo y | docker system prune
}
main

View File

@@ -0,0 +1,174 @@
#!/bin/bash
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -x
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
export REGISTRY=${IMAGE_REPO}
export TAG=${IMAGE_TAG}
WORKPATH=$(dirname "$PWD")
LOG_PATH="$WORKPATH/tests"
ip_address=$(hostname -I | awk '{print $1}')
function build_docker_images() {
cd $WORKPATH/docker
git clone https://github.com/opea-project/GenAIComps.git
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="visualqna visualqna-ui llm-visualqna-tgi"
docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.4
docker images && sleep 1s
}
function start_services() {
cd $WORKPATH/docker/gaudi
export LVM_MODEL_ID="llava-hf/llava-v1.6-mistral-7b-hf"
export LVM_ENDPOINT="http://${ip_address}:8399"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export LVM_SERVICE_PORT=9399
export MEGA_SERVICE_HOST_IP=${ip_address}
export LVM_SERVICE_HOST_IP=${ip_address}
export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:8888/v1/visualqna"
sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
# Start Docker Containers
docker compose up -d > ${LOG_PATH}/start_services_with_compose.log
n=0
until [[ "$n" -ge 100 ]]; do
docker logs lvm-tgi-gaudi-server > ${LOG_PATH}/lvm_tgi_service_start.log
if grep -q Connected ${LOG_PATH}/lvm_tgi_service_start.log; then
break
fi
sleep 5s
n=$((n+1))
done
}
function validate_services() {
local URL="$1"
local EXPECTED_RESULT="$2"
local SERVICE_NAME="$3"
local DOCKER_NAME="$4"
local INPUT_DATA="$5"
local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL")
if [ "$HTTP_STATUS" -eq 200 ]; then
echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..."
local CONTENT=$(curl -s -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL" | tee ${LOG_PATH}/${SERVICE_NAME}.log)
if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then
echo "[ $SERVICE_NAME ] Content is as expected."
else
echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT"
docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log
exit 1
fi
else
echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS"
docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log
exit 1
fi
sleep 1s
}
function validate_microservices() {
# Check if the microservices are running correctly.
# lvm microservice
validate_services \
"${ip_address}:9399/v1/lvm" \
"The image" \
"lvm-tgi" \
"lvm-tgi-gaudi-server" \
'{"image": "iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8/5+hnoEIwDiqkL4KAcT9GO0U4BxoAAAAAElFTkSuQmCC", "prompt":"What is this?"}'
}
function validate_megaservice() {
# Curl the Mega Service
validate_services \
"${ip_address}:8888/v1/visualqna" \
"The image" \
"visualqna-gaudi-backend-server" \
"visualqna-gaudi-backend-server" \
'{
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "What'\''s in this image?"
},
{
"type": "image_url",
"image_url": {
"url": "https://www.ilankelman.org/stopsigns/australia.jpg"
}
}
]
}
],
"max_tokens": 300
}'
}
function validate_frontend() {
cd $WORKPATH/docker/ui/svelte
local conda_env_name="OPEA_e2e"
export PATH=${HOME}/miniforge3/bin/:$PATH
if conda info --envs | grep -q "$conda_env_name"; then
echo "$conda_env_name exist!"
else
conda create -n ${conda_env_name} python=3.12 -y
fi
source activate ${conda_env_name}
sed -i "s/localhost/$ip_address/g" playwright.config.ts
conda install -c conda-forge nodejs -y
npm install && npm ci && npx playwright install --with-deps
node -v && npm -v && pip list
exit_status=0
npx playwright test || exit_status=$?
if [ $exit_status -ne 0 ]; then
echo "[TEST INFO]: ---------frontend test failed---------"
exit $exit_status
else
echo "[TEST INFO]: ---------frontend test passed---------"
fi
}
function stop_docker() {
cd $WORKPATH/docker/gaudi
docker compose stop && docker compose rm -f
}
function main() {
stop_docker
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
start_services
validate_microservices
validate_megaservice
#validate_frontend
stop_docker
echo y | docker system prune
}
main

View File

@@ -0,0 +1,174 @@
#!/bin/bash
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -x
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
export REGISTRY=${IMAGE_REPO}
export TAG=${IMAGE_TAG}
WORKPATH=$(dirname "$PWD")
LOG_PATH="$WORKPATH/tests"
ip_address=$(hostname -I | awk '{print $1}')
function build_docker_images() {
cd $WORKPATH/docker
git clone https://github.com/opea-project/GenAIComps.git
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="visualqna visualqna-ui llm-visualqna-tgi"
docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/text-generation-inference:2.2.0
docker images && sleep 1s
}
function start_services() {
cd $WORKPATH/docker/xeon
export LVM_MODEL_ID="llava-hf/llava-v1.6-mistral-7b-hf"
export LVM_ENDPOINT="http://${ip_address}:8399"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export LVM_SERVICE_PORT=9399
export MEGA_SERVICE_HOST_IP=${ip_address}
export LVM_SERVICE_HOST_IP=${ip_address}
export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:8888/v1/visualqna"
sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env
# Start Docker Containers
docker compose up -d > ${LOG_PATH}/start_services_with_compose.log
n=0
until [[ "$n" -ge 100 ]]; do
docker logs lvm-tgi-xeon-server > ${LOG_PATH}/lvm_tgi_service_start.log
if grep -q Connected ${LOG_PATH}/lvm_tgi_service_start.log; then
break
fi
sleep 5s
n=$((n+1))
done
}
function validate_services() {
local URL="$1"
local EXPECTED_RESULT="$2"
local SERVICE_NAME="$3"
local DOCKER_NAME="$4"
local INPUT_DATA="$5"
local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL")
if [ "$HTTP_STATUS" -eq 200 ]; then
echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..."
local CONTENT=$(curl -s -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL" | tee ${LOG_PATH}/${SERVICE_NAME}.log)
if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then
echo "[ $SERVICE_NAME ] Content is as expected."
else
echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT"
docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log
exit 1
fi
else
echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS"
docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log
exit 1
fi
sleep 1s
}
function validate_microservices() {
# Check if the microservices are running correctly.
# lvm microservice
validate_services \
"${ip_address}:9399/v1/lvm" \
"The image" \
"lvm-tgi" \
"lvm-tgi-xeon-server" \
'{"image": "iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8/5+hnoEIwDiqkL4KAcT9GO0U4BxoAAAAAElFTkSuQmCC", "prompt":"What is this?"}'
}
function validate_megaservice() {
# Curl the Mega Service
validate_services \
"${ip_address}:8888/v1/visualqna" \
"The image" \
"visualqna-xeon-backend-server" \
"visualqna-xeon-backend-server" \
'{
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "What'\''s in this image?"
},
{
"type": "image_url",
"image_url": {
"url": "https://www.ilankelman.org/stopsigns/australia.jpg"
}
}
]
}
],
"max_tokens": 300
}'
}
function validate_frontend() {
cd $WORKPATH/docker/ui/svelte
local conda_env_name="OPEA_e2e"
export PATH=${HOME}/miniforge3/bin/:$PATH
if conda info --envs | grep -q "$conda_env_name"; then
echo "$conda_env_name exist!"
else
conda create -n ${conda_env_name} python=3.12 -y
fi
source activate ${conda_env_name}
sed -i "s/localhost/$ip_address/g" playwright.config.ts
conda install -c conda-forge nodejs -y
npm install && npm ci && npx playwright install --with-deps
node -v && npm -v && pip list
exit_status=0
npx playwright test || exit_status=$?
if [ $exit_status -ne 0 ]; then
echo "[TEST INFO]: ---------frontend test failed---------"
exit $exit_status
else
echo "[TEST INFO]: ---------frontend test passed---------"
fi
}
function stop_docker() {
cd $WORKPATH/docker/xeon
docker compose stop && docker compose rm -f
}
function main() {
stop_docker
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
start_services
validate_microservices
validate_megaservice
#validate_frontend
stop_docker
echo y | docker system prune
}
main