Fix vllm hpu to a stable release (#1398)
Signed-off-by: Xinyao Wang <xinyao.wang@intel.com>
This commit is contained in:
1
.github/workflows/_example-workflow.yml
vendored
1
.github/workflows/_example-workflow.yml
vendored
@@ -79,6 +79,7 @@ jobs:
|
||||
fi
|
||||
if [[ $(grep -c "vllm-gaudi:" ${docker_compose_path}) != 0 ]]; then
|
||||
git clone https://github.com/HabanaAI/vllm-fork.git
|
||||
cd vllm-fork && git checkout v0.6.4.post2+Gaudi-1.19.0 && cd ../
|
||||
fi
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps && git checkout ${{ inputs.opea_branch }} && git rev-parse HEAD && cd ../
|
||||
|
||||
@@ -17,7 +17,7 @@ ip_address=$(hostname -I | awk '{print $1}')
|
||||
function build_docker_images() {
|
||||
cd $WORKPATH/docker_image_build
|
||||
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
|
||||
git clone https://github.com/HabanaAI/vllm-fork.git
|
||||
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork && git checkout v0.6.4.post2+Gaudi-1.19.0 && cd ../
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="chatqna chatqna-ui dataprep-redis retriever vllm-gaudi nginx"
|
||||
|
||||
Reference in New Issue
Block a user