diff --git a/DocSum/docker_compose/amd/gpu/rocm/compose.yaml b/DocSum/docker_compose/amd/gpu/rocm/compose.yaml index d2d41bab4..616f53121 100644 --- a/DocSum/docker_compose/amd/gpu/rocm/compose.yaml +++ b/DocSum/docker_compose/amd/gpu/rocm/compose.yaml @@ -28,6 +28,7 @@ services: - seccomp:unconfined ipc: host command: --model-id ${DOCSUM_LLM_MODEL_ID} + docsum-llm-server: image: ${REGISTRY:-opea}/llm-docsum-tgi:${TAG:-latest} container_name: docsum-llm-server @@ -53,6 +54,47 @@ services: TGI_LLM_ENDPOINT: "http://${HOST_IP}:${DOCSUM_TGI_SERVICE_PORT}" HUGGINGFACEHUB_API_TOKEN: ${DOCSUM_HUGGINGFACEHUB_API_TOKEN} restart: unless-stopped + + whisper: + image: ${REGISTRY:-opea}/whisper:${TAG:-latest} + container_name: whisper-service + ports: + - "7066:7066" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + restart: unless-stopped + + dataprep-audio2text: + image: ${REGISTRY:-opea}/dataprep-audio2text:${TAG:-latest} + container_name: dataprep-audio2text-service + ports: + - "9099:9099" + ipc: host + environment: + A2T_ENDPOINT: ${A2T_ENDPOINT} + + dataprep-video2audio: + image: ${REGISTRY:-opea}/dataprep-video2audio:${TAG:-latest} + container_name: dataprep-video2audio-service + ports: + - "7078:7078" + ipc: host + environment: + V2A_ENDPOINT: ${V2A_ENDPOINT} + + dataprep-multimedia2text: + image: ${REGISTRY:-opea}/dataprep-multimedia2text:${TAG:-latest} + container_name: dataprep-multimedia2text + ports: + - "7079:7079" + ipc: host + environment: + V2A_ENDPOINT: ${V2A_ENDPOINT} + A2T_ENDPOINT: ${A2T_ENDPOINT} + docsum-backend-server: image: ${REGISTRY:-opea}/docsum:${TAG:-latest} container_name: docsum-backend-server @@ -66,21 +108,24 @@ services: - https_proxy=${https_proxy} - http_proxy=${http_proxy} - MEGA_SERVICE_HOST_IP=${HOST_IP} + - DATA_SERVICE_HOST_IP=${DATA_SERVICE_HOST_IP} - LLM_SERVICE_HOST_IP=${HOST_IP} ipc: host restart: always - docsum-ui-server: - image: ${REGISTRY:-opea}/docsum-ui:${TAG:-latest} + + docsum-gradio-ui: + image: ${REGISTRY:-opea}/docsum-gradio-ui:${TAG:-latest} container_name: docsum-ui-server depends_on: - docsum-backend-server ports: - - "${DOCSUM_FRONTEND_PORT}:5173" + - "5173:5173" environment: - no_proxy=${no_proxy} - https_proxy=${https_proxy} - http_proxy=${http_proxy} - - DOC_BASE_URL="http://${HOST_IP}:${DOCSUM_BACKEND_SERVER_PORT}/v1/docsum" + - BACKEND_SERVICE_ENDPOINT=${BACKEND_SERVICE_ENDPOINT} + - DOC_BASE_URL=${BACKEND_SERVICE_ENDPOINT} ipc: host restart: always diff --git a/DocSum/docker_compose/amd/gpu/rocm/set_env.sh b/DocSum/docker_compose/amd/gpu/rocm/set_env.sh index 16e9e4750..37428797d 100644 --- a/DocSum/docker_compose/amd/gpu/rocm/set_env.sh +++ b/DocSum/docker_compose/amd/gpu/rocm/set_env.sh @@ -13,3 +13,11 @@ export DOCSUM_LLM_SERVER_PORT="9000" export DOCSUM_BACKEND_SERVER_PORT="8888" export DOCSUM_FRONTEND_PORT="5173" export BACKEND_SERVICE_ENDPOINT="http://${HOST_IP}:${DOCSUM_BACKEND_SERVER_PORT}/v1/docsum" +export V2A_SERVICE_HOST_IP=${host_ip} +export V2A_ENDPOINT=http://$host_ip:7078 +export A2T_ENDPOINT=http://$host_ip:7066 +export A2T_SERVICE_HOST_IP=${host_ip} +export A2T_SERVICE_PORT=9099 +export DATA_ENDPOINT=http://$host_ip:7079 +export DATA_SERVICE_HOST_IP=${host_ip} +export DATA_SERVICE_PORT=7079 diff --git a/DocSum/tests/test_compose_on_rocm.sh b/DocSum/tests/test_compose_on_rocm.sh index 92ed9cfe1..581fde013 100644 --- a/DocSum/tests/test_compose_on_rocm.sh +++ b/DocSum/tests/test_compose_on_rocm.sh @@ -19,35 +19,41 @@ export TAG=${IMAGE_TAG} export DOCSUM_TGI_IMAGE="ghcr.io/huggingface/text-generation-inference:2.3.1-rocm" export DOCSUM_LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" export HOST_IP=${ip_address} +export host_ip=${ip_address} export DOCSUM_TGI_SERVICE_PORT="8008" -export DOCSUM_TGI_LLM_ENDPOINT="http://${HOST_IP}:8008" +export DOCSUM_TGI_LLM_ENDPOINT="http://${host_ip}:8008" export DOCSUM_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} export DOCSUM_LLM_SERVER_PORT="9000" export DOCSUM_BACKEND_SERVER_PORT="8888" export DOCSUM_FRONTEND_PORT="5552" -export MEGA_SERVICE_HOST_IP=${ip_address} -export LLM_SERVICE_HOST_IP=${ip_address} +export MEGA_SERVICE_HOST_IP=${host_ip} +export LLM_SERVICE_HOST_IP=${host_ip} export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:8888/v1/docsum" export DOCSUM_CARD_ID="card1" export DOCSUM_RENDER_ID="renderD136" +export V2A_SERVICE_HOST_IP=${host_ip} +export V2A_ENDPOINT=http://${host_ip}:7078 +export A2T_ENDPOINT=http://${host_ip}:7066 +export A2T_SERVICE_HOST_IP=${host_ip} +export A2T_SERVICE_PORT=9099 +export DATA_ENDPOINT=http://${host_ip}:7079 +export DATA_SERVICE_HOST_IP=${host_ip} +export DATA_SERVICE_PORT=7079 function build_docker_images() { - cd "$WORKPATH"/docker_image_build + cd $WORKPATH/docker_image_build git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="docsum docsum-ui llm-docsum-tgi" - docker compose -f build.yaml build ${service_list} --no-cache > "${LOG_PATH}"/docker_image_build.log + service_list="docsum docsum-gradio-ui whisper dataprep-multimedia2text dataprep-audio2text dataprep-video2audio llm-docsum-tgi" + docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log - docker pull ghcr.io/huggingface/text-generation-inference:2.3.1-rocm + docker pull ghcr.io/huggingface/text-generation-inference:1.4 docker images && sleep 1s } function start_services() { cd "$WORKPATH"/docker_compose/amd/gpu/rocm - - - sed -i "s/backend_address/$ip_address/g" "$WORKPATH"/ui/svelte/.env # Start Docker Containers @@ -71,121 +77,225 @@ function validate_services() { local INPUT_DATA="$5" local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") + + echo "===========================================" + if [ "$HTTP_STATUS" -eq 200 ]; then echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." - local CONTENT=$(curl -s -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL" | tee "${LOG_PATH}"/"${SERVICE_NAME}".log) + local CONTENT=$(curl -s -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL" | tee ${LOG_PATH}/${SERVICE_NAME}.log) if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then echo "[ $SERVICE_NAME ] Content is as expected." else + echo "EXPECTED_RESULT==> $EXPECTED_RESULT" + echo "CONTENT==> $CONTENT" echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT" - docker logs "${DOCKER_NAME}" >> "${LOG_PATH}"/"${SERVICE_NAME}".log + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log exit 1 + fi else echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" - docker logs "${DOCKER_NAME}" >> "${LOG_PATH}"/"${SERVICE_NAME}".log + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log exit 1 fi sleep 1s } +get_base64_str() { + local file_name=$1 + base64 -w 0 "$file_name" +} + +# Function to generate input data for testing based on the document type +input_data_for_test() { + local document_type=$1 + case $document_type in + ("text") + echo "THIS IS A TEST >>>> and a number of states are starting to adopt them voluntarily special correspondent john delenco of education week reports it takes just 10 minutes to cross through gillette wyoming this small city sits in the northeast corner of the state surrounded by 100s of miles of prairie but schools here in campbell county are on the edge of something big the next generation science standards you are going to build a strand of dna and you are going to decode it and figure out what that dna actually says for christy mathis at sage valley junior high school the new standards are about learning to think like a scientist there is a lot of really good stuff in them every standard is a performance task it is not you know the child needs to memorize these things it is the student needs to be able to do some pretty intense stuff we are analyzing we are critiquing we are." + ;; + ("audio") + get_base64_str "$WORKPATH/tests/data/test.wav" + ;; + ("video") + get_base64_str "$WORKPATH/tests/data/test.mp4" + ;; + (*) + echo "Invalid document type" >&2 + exit 1 + ;; + esac +} + function validate_microservices() { # Check if the microservices are running correctly. + # whisper microservice + ulimit -s 65536 + validate_services \ + "${host_ip}:7066/v1/asr" \ + '{"asr_result":"well"}' \ + "whisper-service" \ + "whisper-service" \ + "{\"audio\": \"$(input_data_for_test "audio")\"}" + + # Audio2Text service + validate_services \ + "${host_ip}:9099/v1/audio/transcriptions" \ + '"query":"well"' \ + "dataprep-audio2text" \ + "dataprep-audio2text-service" \ + "{\"byte_str\": \"$(input_data_for_test "audio")\"}" + + # Video2Audio service + validate_services \ + "${host_ip}:7078/v1/video2audio" \ + "SUQzBAAAAAAAI1RTU0UAAAAPAAADTGF2ZjU4LjI5LjEwMAAAAAAAAAAAAAAA//tQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAASW5mbwAAAA8AAAAIAAAN3wAtLS0tLS0tLS0tLS1LS0tLS0tLS0tLS0tpaWlpaWlpaWlpaWlph4eHh4eHh4eHh4eHpaWlpaWlpaWlpaWlpcPDw8PDw8PDw8PDw+Hh4eHh4eHh4eHh4eH///////////////8AAAAATGF2YzU4LjU0AAAAAAAAAAAAAAAAJAYwAAAAAAAADd95t4qPAAAAAAAAAAAAAAAAAAAAAP/7kGQAAAMhClSVMEACMOAabaCMAREA" \ + "dataprep-video2audio" \ + "dataprep-video2audio-service" \ + "{\"byte_str\": \"$(input_data_for_test "video")\"}" + + # Docsum Data service - video + validate_services \ + "${host_ip}:7079/v1/multimedia2text" \ + "well" \ + "dataprep-multimedia2text-service" \ + "dataprep-multimedia2text" \ + "{\"video\": \"$(input_data_for_test "video")\"}" + + # Docsum Data service - audio + validate_services \ + "${host_ip}:7079/v1/multimedia2text" \ + "well" \ + "dataprep-multimedia2text-service" \ + "dataprep-multimedia2text" \ + "{\"audio\": \"$(input_data_for_test "audio")\"}" + + # Docsum Data service - text + validate_services \ + "${host_ip}:7079/v1/multimedia2text" \ + "THIS IS A TEST >>>> and a number of states are starting to adopt them voluntarily special correspondent john delenco" \ + "dataprep-multimedia2text-service" \ + "dataprep-multimedia2text" \ + "{\"text\": \"$(input_data_for_test "text")\"}" + # tgi for llm service validate_services \ - "${ip_address}:8008/generate" \ + "${host_ip}:8008/generate" \ "generated_text" \ - "tgi-llm" \ - "tgi-service" \ + "docsum-tgi-service" \ + "docsum-tgi-service" \ '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}' # llm microservice validate_services \ - "${ip_address}:9000/v1/chat/docsum" \ + "${host_ip}:9000/v1/chat/docsum" \ "data: " \ - "llm" \ + "docsum-llm-server" \ "docsum-llm-server" \ '{"query":"Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}' + } function validate_megaservice() { - local SERVICE_NAME="mega-docsum" + local SERVICE_NAME="docsum-backend-server" local DOCKER_NAME="docsum-backend-server" - local EXPECTED_RESULT="embedding" + local EXPECTED_RESULT="[DONE]" local INPUT_DATA="messages=Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5." - local URL="${ip_address}:8888/v1/docsum" + local URL="${host_ip}:8888/v1/docsum" + local DATA_TYPE="type=text" + + local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -F "$DATA_TYPE" -F "$INPUT_DATA" -H 'Content-Type: multipart/form-data' "$URL") - local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -F "$INPUT_DATA" -H 'Content-Type: multipart/form-data' "$URL") if [ "$HTTP_STATUS" -eq 200 ]; then echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." - local CONTENT=$(curl -s -X POST -F "$INPUT_DATA" -H 'Content-Type: multipart/form-data' "$URL" | tee "${LOG_PATH}"/"${SERVICE_NAME}".log) + local CONTENT=$(curl -s -X POST -F "$DATA_TYPE" -F "$INPUT_DATA" -H 'Content-Type: multipart/form-data' "$URL" | tee ${LOG_PATH}/${SERVICE_NAME}.log) if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then echo "[ $SERVICE_NAME ] Content is as expected." else echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT" - docker logs "${DOCKER_NAME}" >> "${LOG_PATH}"/"${SERVICE_NAME}".log + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log exit 1 fi else echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" - docker logs "${DOCKER_NAME}" >> "${LOG_PATH}"/"${SERVICE_NAME}".log + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log exit 1 fi sleep 1s } -#function validate_frontend() { -# cd "$WORKPATH"/ui/svelte -# local conda_env_name="OPEA_e2e" -# export PATH=${HOME}/miniforge3/bin/:$PATH -# if conda info --envs | grep -q "$conda_env_name"; then -# echo "$conda_env_name exist!" -# else -# conda create -n ${conda_env_name} python=3.12 -y -# fi -# source activate ${conda_env_name} -# -# sed -i "s/localhost/$ip_address/g" playwright.config.ts -# -# conda install -c conda-forge nodejs=22.6.0 -y -# npm install && npm ci && npx playwright install --with-deps -# node -v && npm -v && pip list -# -# exit_status=0 -# npx playwright test || exit_status=$? -# -# if [ $exit_status -ne 0 ]; then -# echo "[TEST INFO]: ---------frontend test failed---------" -# exit $exit_status -# else -# echo "[TEST INFO]: ---------frontend test passed---------" -# fi -#} +function validate_megaservice_json() { + # Curl the Mega Service + echo "" + echo ">>> Checking text data with Content-Type: application/json" + validate_services \ + "${host_ip}:8888/v1/docsum" \ + "[DONE]" \ + "docsum-backend-server" \ + "docsum-backend-server" \ + '{"type": "text", "messages": "Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}' + + echo ">>> Checking audio data" + validate_services \ + "${host_ip}:8888/v1/docsum" \ + "[DONE]" \ + "docsum-backend-server" \ + "docsum-backend-server" \ + "{\"type\": \"audio\", \"messages\": \"$(input_data_for_test "audio")\"}" + + echo ">>> Checking video data" + validate_services \ + "${host_ip}:8888/v1/docsum" \ + "[DONE]" \ + "docsum-backend-server" \ + "docsum-backend-server" \ + "{\"type\": \"video\", \"messages\": \"$(input_data_for_test "video")\"}" + +} function stop_docker() { - cd "$WORKPATH"/docker_compose/amd/gpu/rocm + cd $WORKPATH/docker_compose/amd/gpu/rocm/ docker compose stop && docker compose rm -f } function main() { - + echo "===========================================" + echo ">>>> Stopping any running Docker containers..." stop_docker - if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi + echo "===========================================" + if [[ "$IMAGE_REPO" == "opea" ]]; then + echo ">>>> Building Docker images..." + build_docker_images + fi + + echo "===========================================" + echo ">>>> Starting Docker services..." start_services + echo "===========================================" + echo ">>>> Validating microservices..." validate_microservices + + echo "===========================================" + echo ">>>> Validating megaservice..." validate_megaservice - #validate_frontend + echo ">>>> Validating validate_megaservice_json..." + validate_megaservice_json + echo "===========================================" + echo ">>>> Stopping Docker containers..." stop_docker - echo y | docker system prune + echo "===========================================" + echo ">>>> Pruning Docker system..." + echo y | docker system prune + echo ">>>> Docker system pruned successfully." + echo "===========================================" } main