Adding files to deploy CodeGen application on ROCm vLLM (#1544)
Signed-off-by: Chingis Yundunov <YundunovCN@sibedge.com>
This commit is contained in:
committed by
GitHub
parent
fe2a6674e0
commit
31b1d69e40
@@ -34,7 +34,7 @@ function build_docker_images() {
|
||||
service_list="codegen codegen-ui llm-textgen"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu
|
||||
docker pull ghcr.io/huggingface/text-generation-inference:2.3.1-rocm
|
||||
docker images && sleep 1s
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ function start_services() {
|
||||
export CODEGEN_BACKEND_SERVICE_PORT=7778
|
||||
export CODEGEN_BACKEND_SERVICE_URL="http://${ip_address}:${CODEGEN_BACKEND_SERVICE_PORT}/v1/codegen"
|
||||
export CODEGEN_UI_SERVICE_PORT=5173
|
||||
export host_ip=${ip_address}
|
||||
export HOST_IP=${ip_address}
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
|
||||
|
||||
@@ -100,15 +100,15 @@ function validate_services() {
|
||||
function validate_microservices() {
|
||||
# tgi for llm service
|
||||
validate_services \
|
||||
"${ip_address}:8028/generate" \
|
||||
"${ip_address}:${CODEGEN_TGI_SERVICE_PORT}/generate" \
|
||||
"generated_text" \
|
||||
"codegen-tgi-service" \
|
||||
"codegen-tgi-service" \
|
||||
'{"inputs":"def print_hello_world():","parameters":{"max_new_tokens":256, "do_sample": true}}'
|
||||
|
||||
sleep 10
|
||||
# llm microservice
|
||||
validate_services \
|
||||
"${ip_address}:9000/v1/chat/completions" \
|
||||
"${ip_address}:${CODEGEN_LLM_SERVICE_PORT}/v1/chat/completions" \
|
||||
"data: " \
|
||||
"codegen-llm-server" \
|
||||
"codegen-llm-server" \
|
||||
|
||||
181
CodeGen/tests/test_compose_vllm_on_rocm.sh
Normal file
181
CodeGen/tests/test_compose_vllm_on_rocm.sh
Normal file
@@ -0,0 +1,181 @@
|
||||
#!/bin/bash
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# Copyright (c) 2024 Advanced Micro Devices, Inc.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
function build_docker_images() {
|
||||
opea_branch=${opea_branch:-"main"}
|
||||
# If the opea_branch isn't main, replace the git clone branch in Dockerfile.
|
||||
if [[ "${opea_branch}" != "main" ]]; then
|
||||
cd $WORKPATH
|
||||
OLD_STRING="RUN git clone --depth 1 https://github.com/opea-project/GenAIComps.git"
|
||||
NEW_STRING="RUN git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git"
|
||||
find . -type f -name "Dockerfile*" | while read -r file; do
|
||||
echo "Processing file: $file"
|
||||
sed -i "s|$OLD_STRING|$NEW_STRING|g" "$file"
|
||||
done
|
||||
fi
|
||||
|
||||
cd $WORKPATH/docker_image_build
|
||||
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="vllm-rocm llm-textgen codegen codegen-ui"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker images && sleep 1s
|
||||
}
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/amd/gpu/rocm/
|
||||
|
||||
export CODEGEN_LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
|
||||
export CODEGEN_VLLM_SERVICE_PORT=8028
|
||||
export CODEGEN_VLLM_ENDPOINT="http://${ip_address}:${CODEGEN_VLLM_SERVICE_PORT}"
|
||||
export CODEGEN_LLM_SERVICE_PORT=9000
|
||||
export CODEGEN_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export CODEGEN_MEGA_SERVICE_HOST_IP=${ip_address}
|
||||
export CODEGEN_LLM_SERVICE_HOST_IP=${ip_address}
|
||||
export CODEGEN_BACKEND_SERVICE_PORT=7778
|
||||
export CODEGEN_BACKEND_SERVICE_URL="http://${ip_address}:${CODEGEN_BACKEND_SERVICE_PORT}/v1/codegen"
|
||||
export CODEGEN_UI_SERVICE_PORT=5173
|
||||
export HOST_IP=${ip_address}
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose -f compose_vllm.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
n=0
|
||||
until [[ "$n" -ge 500 ]]; do
|
||||
docker logs codegen-vllm-service >& "${LOG_PATH}"/codegen-vllm-service_start.log
|
||||
if grep -q "Application startup complete" "${LOG_PATH}"/codegen-vllm-service_start.log; then
|
||||
break
|
||||
fi
|
||||
sleep 20s
|
||||
n=$((n+1))
|
||||
done
|
||||
}
|
||||
|
||||
function validate_services() {
|
||||
local URL="$1"
|
||||
local EXPECTED_RESULT="$2"
|
||||
local SERVICE_NAME="$3"
|
||||
local DOCKER_NAME="$4"
|
||||
local INPUT_DATA="$5"
|
||||
|
||||
local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL")
|
||||
if [ "$HTTP_STATUS" -eq 200 ]; then
|
||||
echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..."
|
||||
|
||||
local CONTENT=$(curl -s -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL" | tee ${LOG_PATH}/${SERVICE_NAME}.log)
|
||||
|
||||
if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then
|
||||
echo "[ $SERVICE_NAME ] Content is as expected."
|
||||
else
|
||||
echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT"
|
||||
docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS"
|
||||
docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log
|
||||
exit 1
|
||||
fi
|
||||
sleep 5s
|
||||
}
|
||||
|
||||
function validate_microservices() {
|
||||
# vLLM for llm service
|
||||
validate_services \
|
||||
"${ip_address}:${CODEGEN_VLLM_SERVICE_PORT}/v1/chat/completions" \
|
||||
"content" \
|
||||
"codegen-vllm-service" \
|
||||
"codegen-vllm-service" \
|
||||
'{"model": "Qwen/Qwen2.5-Coder-7B-Instruct", "messages": [{"role": "user", "content": "What is Deep Learning?"}], "max_tokens": 17}'
|
||||
sleep 10
|
||||
# llm microservice
|
||||
validate_services \
|
||||
"${ip_address}:${CODEGEN_LLM_SERVICE_PORT}/v1/chat/completions" \
|
||||
"data: " \
|
||||
"codegen-llm-server" \
|
||||
"codegen-llm-server" \
|
||||
'{"query":"def print_hello_world():"}'
|
||||
|
||||
}
|
||||
|
||||
function validate_megaservice() {
|
||||
# Curl the Mega Service
|
||||
validate_services \
|
||||
"${ip_address}:${CODEGEN_BACKEND_SERVICE_PORT}/v1/codegen" \
|
||||
"print" \
|
||||
"codegen-backend-server" \
|
||||
"codegen-backend-server" \
|
||||
'{"messages": "def print_hello_world():"}'
|
||||
|
||||
}
|
||||
|
||||
function validate_frontend() {
|
||||
cd $WORKPATH/ui/svelte
|
||||
local conda_env_name="OPEA_e2e"
|
||||
export PATH=${HOME}/miniconda3/bin/:$PATH
|
||||
if conda info --envs | grep -q "$conda_env_name"; then
|
||||
echo "$conda_env_name exist!"
|
||||
else
|
||||
conda create -n ${conda_env_name} python=3.12 -y
|
||||
fi
|
||||
source activate ${conda_env_name}
|
||||
|
||||
sed -i "s/localhost/$ip_address/g" playwright.config.ts
|
||||
|
||||
conda install -c conda-forge nodejs=22.6.0 -y
|
||||
npm install && npm ci && npx playwright install --with-deps
|
||||
node -v && npm -v && pip list
|
||||
|
||||
exit_status=0
|
||||
npx playwright test || exit_status=$?
|
||||
|
||||
if [ $exit_status -ne 0 ]; then
|
||||
echo "[TEST INFO]: ---------frontend test failed---------"
|
||||
exit $exit_status
|
||||
else
|
||||
echo "[TEST INFO]: ---------frontend test passed---------"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
function stop_docker() {
|
||||
echo "OPENAI_API_KEY - ${OPENAI_API_KEY}"
|
||||
cd $WORKPATH/docker_compose/amd/gpu/rocm/
|
||||
docker compose -f compose_vllm.yaml stop && docker compose -f compose_vllm.yaml rm -f
|
||||
}
|
||||
|
||||
function main() {
|
||||
|
||||
stop_docker
|
||||
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
|
||||
start_services
|
||||
|
||||
validate_microservices
|
||||
validate_megaservice
|
||||
validate_frontend
|
||||
|
||||
stop_docker
|
||||
echo y | docker system prune
|
||||
cd $WORKPATH
|
||||
|
||||
}
|
||||
|
||||
main
|
||||
Reference in New Issue
Block a user