Files
GenAIExamples/SearchQnA/tests/test_compose_vllm_on_rocm.sh
ZePan110 11b04b38db Integrate SearchQnA set_env to ut scripts. (#1950)
Integrate SearchQnA set_env to ut scripts.
Add README.md for UT scripts.

Signed-off-by: ZePan110 <ze.pan@intel.com>
2025-05-16 15:09:07 +08:00

120 lines
3.4 KiB
Bash

#!/bin/bash
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
export REGISTRY=${IMAGE_REPO}
export TAG=${IMAGE_TAG}
export MODEL_PATH=${model_cache:-"./data"}
WORKPATH=$(dirname "$PWD")
LOG_PATH="$WORKPATH/tests"
ip_address=$(hostname -I | awk '{print $1}')
function build_docker_images() {
cd $WORKPATH/docker_image_build
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="searchqna searchqna-ui embedding web-retriever reranking llm-textgen vllm-rocm"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
docker images && sleep 3s
}
function start_services() {
cd $WORKPATH/docker_compose/amd/gpu/rocm/
export HOST_IP=${ip_address}
# export SEARCH_BACKEND_SERVICE_PORT=3008
source ./set_env_vllm.sh
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
# Start Docker Containers
docker compose -f compose_vllm.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
n=0
until [[ "$n" -ge 100 ]]; do
docker logs search-vllm-service >& $LOG_PATH/search-vllm-service_start.log
if grep -q "Application startup complete" $LOG_PATH/search-vllm-service_start.log; then
break
fi
sleep 10s
n=$((n+1))
done
}
function validate_megaservice() {
result=$(http_proxy="" curl http://${HOST_IP}:${SEARCH_BACKEND_SERVICE_PORT}/v1/searchqna -XPOST -d '{"messages": "What is black myth wukong?", "stream": "False"}' -H 'Content-Type: application/json')
echo $result
if [[ $result == *"the"* ]]; then
docker logs search-web-retriever-server
docker logs search-backend-server
echo "Result correct."
else
docker logs search-web-retriever-server
docker logs search-backend-server
echo "Result wrong."
exit 1
fi
}
function validate_frontend() {
cd $WORKPATH/ui/svelte
local conda_env_name="OPEA_e2e"
export PATH=${HOME}/miniconda3/bin/:$PATH
if conda info --envs | grep -q "$conda_env_name"; then
echo "$conda_env_name exist!"
else
conda create -n ${conda_env_name} python=3.12 -y
fi
source activate ${conda_env_name}
sed -i "s/localhost/$ip_address/g" playwright.config.ts
conda install -c conda-forge nodejs=22.6.0 -y
npm install && npm ci && npx playwright install --with-deps
node -v && npm -v && pip list
exit_status=0
npx playwright test || exit_status=$?
if [ $exit_status -ne 0 ]; then
echo "[TEST INFO]: ---------frontend test failed---------"
exit $exit_status
else
echo "[TEST INFO]: ---------frontend test passed---------"
fi
}
function stop_docker() {
cd $WORKPATH/docker_compose/amd/gpu/rocm/
docker compose -f compose_vllm.yaml stop && docker compose -f compose_vllm.yaml rm -f
}
function main() {
stop_docker
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
start_services
sleep 20
validate_megaservice
validate_frontend
stop_docker
echo y | docker system prune
}
main