#!/bin/bash # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 set -e IMAGE_REPO=${IMAGE_REPO:-"opea"} IMAGE_TAG=${IMAGE_TAG:-"latest"} echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" echo "TAG=IMAGE_TAG=${IMAGE_TAG}" export REGISTRY=${IMAGE_REPO} export TAG=${IMAGE_TAG} export MODEL_CACHE=${model_cache:-"./data"} WORKPATH=$(dirname "$PWD") LOG_PATH="$WORKPATH/tests" ip_address=$(hostname -I | awk '{print $1}') function build_docker_images() { opea_branch=${opea_branch:-"main"} # If the opea_branch isn't main, replace the git clone branch in Dockerfile. if [[ "${opea_branch}" != "main" ]]; then cd $WORKPATH OLD_STRING="RUN git clone --depth 1 https://github.com/opea-project/GenAIComps.git" NEW_STRING="RUN git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git" find . -type f -name "Dockerfile*" | while read -r file; do echo "Processing file: $file" sed -i "s|$OLD_STRING|$NEW_STRING|g" "$file" done fi cd $WORKPATH/docker_image_build git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git git clone https://github.com/HabanaAI/vllm-fork.git cd vllm-fork/ VLLM_VER=$(git describe --tags "$(git rev-list --tags --max-count=1)") echo "Check out vLLM tag ${VLLM_VER}" git checkout ${VLLM_VER} &> /dev/null && cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." service_list="audioqna audioqna-ui whisper-gaudi speecht5-gaudi vllm-gaudi" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker images && sleep 1s } function start_services() { cd $WORKPATH/docker_compose/intel/hpu/gaudi export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} export LLM_MODEL_ID=meta-llama/Meta-Llama-3-8B-Instruct export NUM_CARDS=1 export BLOCK_SIZE=128 export MAX_NUM_SEQS=256 export MAX_SEQ_LEN_TO_CAPTURE=2048 export MEGA_SERVICE_HOST_IP=${ip_address} export WHISPER_SERVER_HOST_IP=${ip_address} export SPEECHT5_SERVER_HOST_IP=${ip_address} export LLM_SERVER_HOST_IP=${ip_address} export WHISPER_SERVER_PORT=7066 export SPEECHT5_SERVER_PORT=7055 export LLM_SERVER_PORT=3006 export BACKEND_SERVICE_ENDPOINT=http://${ip_address}:3008/v1/audioqna export host_ip=${ip_address} # sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env # Start Docker Containers docker compose up -d > ${LOG_PATH}/start_services_with_compose.log n=0 until [[ "$n" -ge 200 ]]; do docker logs vllm-gaudi-service > $LOG_PATH/vllm_service_start.log 2>&1 if grep -q complete $LOG_PATH/vllm_service_start.log; then break fi sleep 5s n=$((n+1)) done n=0 until [[ "$n" -ge 100 ]]; do docker logs whisper-service > $LOG_PATH/whisper_service_start.log if grep -q "Uvicorn server setup on port" $LOG_PATH/whisper_service_start.log; then break fi sleep 5s n=$((n+1)) done } function validate_megaservice() { response=$(http_proxy="" curl http://${ip_address}:3008/v1/audioqna -XPOST -d '{"audio": "UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA", "max_tokens":64}' -H 'Content-Type: application/json') # always print the log docker logs whisper-service > $LOG_PATH/whisper-service.log docker logs speecht5-service > $LOG_PATH/tts-service.log docker logs vllm-gaudi-service > $LOG_PATH/vllm-gaudi-service.log docker logs audioqna-gaudi-backend-server > $LOG_PATH/audioqna-gaudi-backend-server.log echo "$response" | sed 's/^"//;s/"$//' | base64 -d > speech.mp3 if [[ $(file speech.mp3) == *"RIFF"* ]]; then echo "Result correct." else echo "Result wrong." exit 1 fi } #function validate_frontend() { # cd $WORKPATH/ui/svelte # local conda_env_name="OPEA_e2e" # export PATH=${HOME}/miniforge3/bin/:$PATH ## conda remove -n ${conda_env_name} --all -y ## conda create -n ${conda_env_name} python=3.12 -y # source activate ${conda_env_name} # # sed -i "s/localhost/$ip_address/g" playwright.config.ts # ## conda install -c conda-forge nodejs=22.6.0 -y # npm install && npm ci && npx playwright install --with-deps # node -v && npm -v && pip list # # exit_status=0 # npx playwright test || exit_status=$? # # if [ $exit_status -ne 0 ]; then # echo "[TEST INFO]: ---------frontend test failed---------" # exit $exit_status # else # echo "[TEST INFO]: ---------frontend test passed---------" # fi #} function stop_docker() { cd $WORKPATH/docker_compose/intel/hpu/gaudi docker compose -f compose.yaml stop && docker compose rm -f } function main() { stop_docker if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi start_services validate_megaservice # validate_frontend stop_docker echo y | docker system prune } main