From b467a13ec3b9e05d172ba886266a5cbb4dbdec29 Mon Sep 17 00:00:00 2001 From: "Sun, Xuehao" Date: Thu, 8 May 2025 10:34:36 +0800 Subject: [PATCH] daily update vLLM&vLLM-fork version (#1914) Signed-off-by: Sun, Xuehao --- .github/workflows/_build_image.yml | 6 +- .../workflows/daily-update-vllm-version.yml | 93 +++++++++++++++++++ AgentQnA/tests/step1_build_images.sh | 4 +- AudioQnA/tests/test_compose_on_gaudi.sh | 6 +- ChatQnA/tests/test_compose_faqgen_on_gaudi.sh | 4 +- .../tests/test_compose_guardrails_on_gaudi.sh | 4 +- ChatQnA/tests/test_compose_on_gaudi.sh | 4 +- .../test_compose_without_rerank_on_gaudi.sh | 4 +- CodeGen/tests/test_compose_on_gaudi.sh | 6 +- CodeTrans/tests/test_compose_on_gaudi.sh | 4 +- DocSum/tests/test_compose_on_gaudi.sh | 4 +- FinanceAgent/tests/test_compose_on_gaudi.sh | 6 +- 12 files changed, 119 insertions(+), 26 deletions(-) create mode 100644 .github/workflows/daily-update-vllm-version.yml diff --git a/.github/workflows/_build_image.yml b/.github/workflows/_build_image.yml index 4ce00db5e..5d55d474a 100644 --- a/.github/workflows/_build_image.yml +++ b/.github/workflows/_build_image.yml @@ -83,9 +83,9 @@ jobs: fi if [[ $(grep -c "vllm-gaudi:" ${docker_compose_path}) != 0 ]]; then git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork - VLLM_VER=v0.6.6.post1+Gaudi-1.20.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null && cd ../ + VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0 + echo "Check out vLLM tag ${VLLM_FORK_VER}" + git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../ fi git clone --depth 1 --branch ${{ inputs.opea_branch }} https://github.com/opea-project/GenAIComps.git cd GenAIComps && git rev-parse HEAD && cd ../ diff --git a/.github/workflows/daily-update-vllm-version.yml b/.github/workflows/daily-update-vllm-version.yml new file mode 100644 index 000000000..eb09bf5a6 --- /dev/null +++ b/.github/workflows/daily-update-vllm-version.yml @@ -0,0 +1,93 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +name: Daily update vLLM & vLLM-fork version + +on: + schedule: + - cron: "30 22 * * *" + workflow_dispatch: + +env: + BRANCH_NAME: "update" + USER_NAME: "CICD-at-OPEA" + USER_EMAIL: "CICD@opea.dev" + +jobs: + freeze-tag: + runs-on: ubuntu-latest + strategy: + matrix: + include: + - repo: vLLM + repo_name: vllm-project/vllm + ver_name: VLLM_VER + - repo: vLLM-fork + repo_url: HabanaAI/vllm-fork + ver_name: VLLM_FORK_VER + permissions: + contents: write + pull-requests: write + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + ref: ${{ github.ref }} + + - name: Set up Git + run: | + git config --global user.name ${{ env.USER_NAME }} + git config --global user.email ${{ env.USER_EMAIL }} + git remote set-url origin https://${{ env.USER_NAME }}:"${{ secrets.ACTION_TOKEN }}"@github.com/${{ github.repository }}.git + git fetch + + if git ls-remote https://github.com/${{ github.repository }}.git "refs/heads/${{ env.BRANCH_NAME }}_${{ matrix.repo }}" | grep -q "refs/heads/${{ env.BRANCH_NAME }}_${{ matrix.repo }}"; then + echo "branch ${{ env.BRANCH_NAME }}_${{ matrix.repo }} exists" + git checkout ${{ env.BRANCH_NAME }}_${{ matrix.repo }} + else + echo "branch ${{ env.BRANCH_NAME }}_${{ matrix.repo }} not exists" + git checkout -b ${{ env.BRANCH_NAME }}_${{ matrix.repo }} + git push origin ${{ env.BRANCH_NAME }}_${{ matrix.repo }} + echo "branch ${{ env.BRANCH_NAME }}_${{ matrix.repo }} created successfully" + fi + + - name: Run script + run: | + latest_vllm_ver=$(curl -s "https://api.github.com/repos/${{ matrix.repo_name }}/tags" | jq '.[0].name' -) + echo "latest_vllm_ver=${latest_vllm_ver}" >> "$GITHUB_ENV" + find . -type f \( -name "*.sh" -o -name "_build_image.yml" \) -exec sed -i "s/${{ matrix.ver_name }}=.*/${{ matrix.ver_name }}=${latest_vllm_ver}/" {} \; + + - name: Commit changes + run: | + git add . + if git diff-index --quiet HEAD --; then + echo "No changes detected, skipping commit." + exit 1 + else + git commit -s -m "Update ${{ matrix.repo }} version to ${latest_vllm_ver}" + git push + fi + + - name: Create Pull Request + run: | + pr_count=$(curl -H "Authorization: token ${{ secrets.ACTION_TOKEN }}" -s "https://api.github.com/repos/${{ github.repository }}/pulls?state=all&head=${{ env.USER_NAME }}:${{ env.BRANCH_NAME }}_${{ matrix.repo }}" | jq '. | length') + if [ $pr_count -gt 0 ]; then + echo "Pull Request exists" + pr_number=$(curl -H "Authorization: token ${{ secrets.ACTION_TOKEN }}" -s "https://api.github.com/repos/${{ github.repository }}/pulls?state=all&head=${{ env.USER_NAME }}:${{ env.BRANCH_NAME }}_${{ matrix.repo }}" | jq '.[0].number') + curl -X PATCH -H "Authorization: token ${{ secrets.ACTION_TOKEN }}" -d "{ + \"title\":\"Update ${{ matrix.repo }} version to ${latest_vllm_ver}\", + \"body\":\"Update ${{ matrix.repo }} version to ${latest_vllm_ver}\", + \"state\":\"open\" + }" "https://api.github.com/repos/${{ github.repository }}/pulls/${pr_number}" + echo "Pull Request updated successfully" + else + echo "Pull Request not exists..." + curl -H "Authorization: token ${{ secrets.ACTION_TOKEN }}" -d "{ + \"title\":\"Update ${{ matrix.repo }} version to ${latest_vllm_ver}\", + \"body\":\"Update ${{ matrix.repo }} version to ${latest_vllm_ver}\", + \"head\":\"${{ env.USER_NAME }}:${{ env.BRANCH_NAME }}_${{ matrix.repo }}\", + \"base\":\"main\" + }" "https://api.github.com/repos/${{ github.repository }}/pulls" + echo "Pull Request created successfully" + fi diff --git a/AgentQnA/tests/step1_build_images.sh b/AgentQnA/tests/step1_build_images.sh index dfb20df22..8edd7b623 100644 --- a/AgentQnA/tests/step1_build_images.sh +++ b/AgentQnA/tests/step1_build_images.sh @@ -37,8 +37,8 @@ function build_agent_docker_image_gaudi_vllm() { get_genai_comps git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork - VLLM_VER=v0.6.6.post1+Gaudi-1.20.0 - git checkout ${VLLM_VER} &> /dev/null && cd ../ + VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0 + git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../ echo "Build agent image with --no-cache..." service_list="agent agent-ui vllm-gaudi" diff --git a/AudioQnA/tests/test_compose_on_gaudi.sh b/AudioQnA/tests/test_compose_on_gaudi.sh index e2d58b72e..c24f5ff82 100644 --- a/AudioQnA/tests/test_compose_on_gaudi.sh +++ b/AudioQnA/tests/test_compose_on_gaudi.sh @@ -27,9 +27,9 @@ function build_docker_images() { git clone https://github.com/HabanaAI/vllm-fork.git cd vllm-fork/ - VLLM_VER=v0.6.6.post1+Gaudi-1.20.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null && cd ../ + VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0 + echo "Check out vLLM tag ${VLLM_FORK_VER}" + git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." service_list="audioqna audioqna-ui whisper-gaudi speecht5-gaudi vllm-gaudi" diff --git a/ChatQnA/tests/test_compose_faqgen_on_gaudi.sh b/ChatQnA/tests/test_compose_faqgen_on_gaudi.sh index be040bdbe..2a30dbb77 100644 --- a/ChatQnA/tests/test_compose_faqgen_on_gaudi.sh +++ b/ChatQnA/tests/test_compose_faqgen_on_gaudi.sh @@ -24,8 +24,8 @@ function build_docker_images() { docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork - VLLM_VER=v0.6.6.post1+Gaudi-1.20.0 - git checkout ${VLLM_VER} &> /dev/null && cd ../ + VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0 + git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." service_list="chatqna chatqna-ui dataprep retriever llm-faqgen vllm-gaudi nginx" diff --git a/ChatQnA/tests/test_compose_guardrails_on_gaudi.sh b/ChatQnA/tests/test_compose_guardrails_on_gaudi.sh index 06b58bedc..f9057f6ec 100644 --- a/ChatQnA/tests/test_compose_guardrails_on_gaudi.sh +++ b/ChatQnA/tests/test_compose_guardrails_on_gaudi.sh @@ -24,8 +24,8 @@ function build_docker_images() { docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork - VLLM_VER=v0.6.6.post1+Gaudi-1.20.0 - git checkout ${VLLM_VER} &> /dev/null && cd ../ + VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0 + git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." service_list="chatqna chatqna-ui dataprep retriever vllm-gaudi guardrails nginx" diff --git a/ChatQnA/tests/test_compose_on_gaudi.sh b/ChatQnA/tests/test_compose_on_gaudi.sh index 0fe3cf11a..144f54190 100644 --- a/ChatQnA/tests/test_compose_on_gaudi.sh +++ b/ChatQnA/tests/test_compose_on_gaudi.sh @@ -24,8 +24,8 @@ function build_docker_images() { docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork - VLLM_VER=v0.6.6.post1+Gaudi-1.20.0 - git checkout ${VLLM_VER} &> /dev/null && cd ../ + VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0 + git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." service_list="chatqna chatqna-ui dataprep retriever vllm-gaudi nginx" diff --git a/ChatQnA/tests/test_compose_without_rerank_on_gaudi.sh b/ChatQnA/tests/test_compose_without_rerank_on_gaudi.sh index 6e4782ba7..7d6837402 100644 --- a/ChatQnA/tests/test_compose_without_rerank_on_gaudi.sh +++ b/ChatQnA/tests/test_compose_without_rerank_on_gaudi.sh @@ -24,8 +24,8 @@ function build_docker_images() { docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork - VLLM_VER=v0.6.6.post1+Gaudi-1.20.0 - git checkout ${VLLM_VER} &> /dev/null && cd ../ + VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0 + git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." service_list="chatqna chatqna-ui dataprep retriever vllm-gaudi nginx" diff --git a/CodeGen/tests/test_compose_on_gaudi.sh b/CodeGen/tests/test_compose_on_gaudi.sh index fda73cd16..58eb0888d 100644 --- a/CodeGen/tests/test_compose_on_gaudi.sh +++ b/CodeGen/tests/test_compose_on_gaudi.sh @@ -43,9 +43,9 @@ function build_docker_images() { # Download Gaudi vllm of latest tag git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork - VLLM_VER=v0.6.6.post1+Gaudi-1.20.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null && cd ../ + VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0 + echo "Check out vLLM tag ${VLLM_FORK_VER}" + git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." service_list="codegen codegen-gradio-ui llm-textgen vllm-gaudi dataprep retriever embedding" diff --git a/CodeTrans/tests/test_compose_on_gaudi.sh b/CodeTrans/tests/test_compose_on_gaudi.sh index 5f287eb02..41472244a 100644 --- a/CodeTrans/tests/test_compose_on_gaudi.sh +++ b/CodeTrans/tests/test_compose_on_gaudi.sh @@ -31,8 +31,8 @@ function build_docker_images() { cd $WORKPATH/docker_image_build git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork - VLLM_VER=v0.6.6.post1+Gaudi-1.20.0 - git checkout ${VLLM_VER} &> /dev/null && cd ../ + VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0 + git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." service_list="codetrans codetrans-ui llm-textgen vllm-gaudi nginx" diff --git a/DocSum/tests/test_compose_on_gaudi.sh b/DocSum/tests/test_compose_on_gaudi.sh index 3c0f3d695..64d306387 100644 --- a/DocSum/tests/test_compose_on_gaudi.sh +++ b/DocSum/tests/test_compose_on_gaudi.sh @@ -50,8 +50,8 @@ function build_docker_images() { popd && sleep 1s git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork - VLLM_VER=v0.6.6.post1+Gaudi-1.20.0 - git checkout ${VLLM_VER} &> /dev/null && cd ../ + VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0 + git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." service_list="docsum docsum-gradio-ui whisper llm-docsum vllm-gaudi" diff --git a/FinanceAgent/tests/test_compose_on_gaudi.sh b/FinanceAgent/tests/test_compose_on_gaudi.sh index 207dcc62f..0f4281397 100644 --- a/FinanceAgent/tests/test_compose_on_gaudi.sh +++ b/FinanceAgent/tests/test_compose_on_gaudi.sh @@ -59,9 +59,9 @@ function build_vllm_docker_image() { git clone https://github.com/HabanaAI/vllm-fork.git fi cd ./vllm-fork - # VLLM_VER=$(git describe --tags "$(git rev-list --tags --max-count=1)") - VLLM_VER=v0.6.6.post1+Gaudi-1.20.0 - git checkout ${VLLM_VER} &> /dev/null + + VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0 + git checkout ${VLLM_FORK_VER} &> /dev/null docker build --no-cache -f Dockerfile.hpu -t $vllm_image --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy if [ $? -ne 0 ]; then echo "$vllm_image failed"