daily update vLLM&vLLM-fork version (#1914)

Signed-off-by: Sun, Xuehao <xuehao.sun@intel.com>
This commit is contained in:
Sun, Xuehao
2025-05-08 10:34:36 +08:00
committed by GitHub
parent 05011ebaac
commit b467a13ec3
12 changed files with 119 additions and 26 deletions

View File

@@ -83,9 +83,9 @@ jobs:
fi
if [[ $(grep -c "vllm-gaudi:" ${docker_compose_path}) != 0 ]]; then
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null && cd ../
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
echo "Check out vLLM tag ${VLLM_FORK_VER}"
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
fi
git clone --depth 1 --branch ${{ inputs.opea_branch }} https://github.com/opea-project/GenAIComps.git
cd GenAIComps && git rev-parse HEAD && cd ../

View File

@@ -0,0 +1,93 @@
# Copyright (C) 2025 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
name: Daily update vLLM & vLLM-fork version
on:
schedule:
- cron: "30 22 * * *"
workflow_dispatch:
env:
BRANCH_NAME: "update"
USER_NAME: "CICD-at-OPEA"
USER_EMAIL: "CICD@opea.dev"
jobs:
freeze-tag:
runs-on: ubuntu-latest
strategy:
matrix:
include:
- repo: vLLM
repo_name: vllm-project/vllm
ver_name: VLLM_VER
- repo: vLLM-fork
repo_url: HabanaAI/vllm-fork
ver_name: VLLM_FORK_VER
permissions:
contents: write
pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
ref: ${{ github.ref }}
- name: Set up Git
run: |
git config --global user.name ${{ env.USER_NAME }}
git config --global user.email ${{ env.USER_EMAIL }}
git remote set-url origin https://${{ env.USER_NAME }}:"${{ secrets.ACTION_TOKEN }}"@github.com/${{ github.repository }}.git
git fetch
if git ls-remote https://github.com/${{ github.repository }}.git "refs/heads/${{ env.BRANCH_NAME }}_${{ matrix.repo }}" | grep -q "refs/heads/${{ env.BRANCH_NAME }}_${{ matrix.repo }}"; then
echo "branch ${{ env.BRANCH_NAME }}_${{ matrix.repo }} exists"
git checkout ${{ env.BRANCH_NAME }}_${{ matrix.repo }}
else
echo "branch ${{ env.BRANCH_NAME }}_${{ matrix.repo }} not exists"
git checkout -b ${{ env.BRANCH_NAME }}_${{ matrix.repo }}
git push origin ${{ env.BRANCH_NAME }}_${{ matrix.repo }}
echo "branch ${{ env.BRANCH_NAME }}_${{ matrix.repo }} created successfully"
fi
- name: Run script
run: |
latest_vllm_ver=$(curl -s "https://api.github.com/repos/${{ matrix.repo_name }}/tags" | jq '.[0].name' -)
echo "latest_vllm_ver=${latest_vllm_ver}" >> "$GITHUB_ENV"
find . -type f \( -name "*.sh" -o -name "_build_image.yml" \) -exec sed -i "s/${{ matrix.ver_name }}=.*/${{ matrix.ver_name }}=${latest_vllm_ver}/" {} \;
- name: Commit changes
run: |
git add .
if git diff-index --quiet HEAD --; then
echo "No changes detected, skipping commit."
exit 1
else
git commit -s -m "Update ${{ matrix.repo }} version to ${latest_vllm_ver}"
git push
fi
- name: Create Pull Request
run: |
pr_count=$(curl -H "Authorization: token ${{ secrets.ACTION_TOKEN }}" -s "https://api.github.com/repos/${{ github.repository }}/pulls?state=all&head=${{ env.USER_NAME }}:${{ env.BRANCH_NAME }}_${{ matrix.repo }}" | jq '. | length')
if [ $pr_count -gt 0 ]; then
echo "Pull Request exists"
pr_number=$(curl -H "Authorization: token ${{ secrets.ACTION_TOKEN }}" -s "https://api.github.com/repos/${{ github.repository }}/pulls?state=all&head=${{ env.USER_NAME }}:${{ env.BRANCH_NAME }}_${{ matrix.repo }}" | jq '.[0].number')
curl -X PATCH -H "Authorization: token ${{ secrets.ACTION_TOKEN }}" -d "{
\"title\":\"Update ${{ matrix.repo }} version to ${latest_vllm_ver}\",
\"body\":\"Update ${{ matrix.repo }} version to ${latest_vllm_ver}\",
\"state\":\"open\"
}" "https://api.github.com/repos/${{ github.repository }}/pulls/${pr_number}"
echo "Pull Request updated successfully"
else
echo "Pull Request not exists..."
curl -H "Authorization: token ${{ secrets.ACTION_TOKEN }}" -d "{
\"title\":\"Update ${{ matrix.repo }} version to ${latest_vllm_ver}\",
\"body\":\"Update ${{ matrix.repo }} version to ${latest_vllm_ver}\",
\"head\":\"${{ env.USER_NAME }}:${{ env.BRANCH_NAME }}_${{ matrix.repo }}\",
\"base\":\"main\"
}" "https://api.github.com/repos/${{ github.repository }}/pulls"
echo "Pull Request created successfully"
fi

View File

@@ -37,8 +37,8 @@ function build_agent_docker_image_gaudi_vllm() {
get_genai_comps
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
git checkout ${VLLM_VER} &> /dev/null && cd ../
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
echo "Build agent image with --no-cache..."
service_list="agent agent-ui vllm-gaudi"

View File

@@ -27,9 +27,9 @@ function build_docker_images() {
git clone https://github.com/HabanaAI/vllm-fork.git
cd vllm-fork/
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null && cd ../
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
echo "Check out vLLM tag ${VLLM_FORK_VER}"
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="audioqna audioqna-ui whisper-gaudi speecht5-gaudi vllm-gaudi"

View File

@@ -24,8 +24,8 @@ function build_docker_images() {
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
git checkout ${VLLM_VER} &> /dev/null && cd ../
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="chatqna chatqna-ui dataprep retriever llm-faqgen vllm-gaudi nginx"

View File

@@ -24,8 +24,8 @@ function build_docker_images() {
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
git checkout ${VLLM_VER} &> /dev/null && cd ../
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="chatqna chatqna-ui dataprep retriever vllm-gaudi guardrails nginx"

View File

@@ -24,8 +24,8 @@ function build_docker_images() {
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
git checkout ${VLLM_VER} &> /dev/null && cd ../
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="chatqna chatqna-ui dataprep retriever vllm-gaudi nginx"

View File

@@ -24,8 +24,8 @@ function build_docker_images() {
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
git checkout ${VLLM_VER} &> /dev/null && cd ../
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="chatqna chatqna-ui dataprep retriever vllm-gaudi nginx"

View File

@@ -43,9 +43,9 @@ function build_docker_images() {
# Download Gaudi vllm of latest tag
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null && cd ../
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
echo "Check out vLLM tag ${VLLM_FORK_VER}"
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="codegen codegen-gradio-ui llm-textgen vllm-gaudi dataprep retriever embedding"

View File

@@ -31,8 +31,8 @@ function build_docker_images() {
cd $WORKPATH/docker_image_build
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
git checkout ${VLLM_VER} &> /dev/null && cd ../
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="codetrans codetrans-ui llm-textgen vllm-gaudi nginx"

View File

@@ -50,8 +50,8 @@ function build_docker_images() {
popd && sleep 1s
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
git checkout ${VLLM_VER} &> /dev/null && cd ../
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="docsum docsum-gradio-ui whisper llm-docsum vllm-gaudi"

View File

@@ -59,9 +59,9 @@ function build_vllm_docker_image() {
git clone https://github.com/HabanaAI/vllm-fork.git
fi
cd ./vllm-fork
# VLLM_VER=$(git describe --tags "$(git rev-list --tags --max-count=1)")
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
git checkout ${VLLM_VER} &> /dev/null
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
git checkout ${VLLM_FORK_VER} &> /dev/null
docker build --no-cache -f Dockerfile.hpu -t $vllm_image --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
if [ $? -ne 0 ]; then
echo "$vllm_image failed"