#!/bin/bash # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 set -xe IMAGE_REPO=${IMAGE_REPO:-"opea"} IMAGE_TAG=${IMAGE_TAG:-"latest"} echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" echo "TAG=IMAGE_TAG=${IMAGE_TAG}" export REGISTRY=${IMAGE_REPO} export TAG=${IMAGE_TAG} export MODEL_CACHE=${model_cache:-"./data"} WORKPATH=$(dirname "$PWD") LOG_PATH="$WORKPATH/tests" ip_address=$(hostname -I | awk '{print $1}') function build_docker_images() { opea_branch=${opea_branch:-"main"} # If the opea_branch isn't main, replace the git clone branch in Dockerfile. if [[ "${opea_branch}" != "main" ]]; then cd $WORKPATH OLD_STRING="RUN git clone --depth 1 https://github.com/opea-project/GenAIComps.git" NEW_STRING="RUN git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git" find . -type f -name "Dockerfile*" | while read -r file; do echo "Processing file: $file" sed -i "s|$OLD_STRING|$NEW_STRING|g" "$file" done fi cd $WORKPATH/docker_image_build git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git echo "Build all the images with --no-cache, check docker_image_build.log for details..." service_list="codegen codegen-ui llm-textgen" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker pull ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu docker images && sleep 1s } function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon/ export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" export TGI_LLM_ENDPOINT="http://${ip_address}:8028" export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} export MEGA_SERVICE_HOST_IP=${ip_address} export LLM_SERVICE_HOST_IP=${ip_address} export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:7778/v1/codegen" export host_ip=${ip_address} sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env # Start Docker Containers docker compose up -d > ${LOG_PATH}/start_services_with_compose.log n=0 until [[ "$n" -ge 100 ]]; do docker logs tgi-service > ${LOG_PATH}/tgi_service_start.log if grep -q Connected ${LOG_PATH}/tgi_service_start.log; then break fi sleep 5s n=$((n+1)) done } function validate_services() { local URL="$1" local EXPECTED_RESULT="$2" local SERVICE_NAME="$3" local DOCKER_NAME="$4" local INPUT_DATA="$5" local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") if [ "$HTTP_STATUS" -eq 200 ]; then echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." local CONTENT=$(curl -s -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL" | tee ${LOG_PATH}/${SERVICE_NAME}.log) if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then echo "[ $SERVICE_NAME ] Content is as expected." else echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT" docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log exit 1 fi else echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log exit 1 fi sleep 5s } function validate_microservices() { # tgi for llm service validate_services \ "${ip_address}:8028/generate" \ "generated_text" \ "tgi-llm" \ "tgi-service" \ '{"inputs":"def print_hello_world():","parameters":{"max_new_tokens":256, "do_sample": true}}' # llm microservice validate_services \ "${ip_address}:9000/v1/chat/completions" \ "data: " \ "llm" \ "llm-textgen-server" \ '{"query":"def print_hello_world():"}' } function validate_megaservice() { # Curl the Mega Service validate_services \ "${ip_address}:7778/v1/codegen" \ "print" \ "mega-codegen" \ "codegen-xeon-backend-server" \ '{"messages": "def print_hello_world():"}' } function validate_frontend() { cd $WORKPATH/ui/svelte local conda_env_name="OPEA_e2e" export PATH=${HOME}/miniforge3/bin/:$PATH if conda info --envs | grep -q "$conda_env_name"; then echo "$conda_env_name exist!" else conda create -n ${conda_env_name} python=3.12 -y fi source activate ${conda_env_name} sed -i "s/localhost/$ip_address/g" playwright.config.ts conda install -c conda-forge nodejs=22.6.0 -y npm install && npm ci && npx playwright install --with-deps node -v && npm -v && pip list exit_status=0 npx playwright test || exit_status=$? if [ $exit_status -ne 0 ]; then echo "[TEST INFO]: ---------frontend test failed---------" exit $exit_status else echo "[TEST INFO]: ---------frontend test passed---------" fi } function stop_docker() { cd $WORKPATH/docker_compose/intel/cpu/xeon/ docker compose stop && docker compose rm -f } function main() { stop_docker if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi start_services validate_microservices validate_megaservice validate_frontend stop_docker echo y | docker system prune } main