remove vllm hpu commit id limit (#1139)
Signed-off-by: Xinyao Wang <xinyao.wang@intel.com>
This commit is contained in:
1
.github/workflows/_comps-workflow.yml
vendored
1
.github/workflows/_comps-workflow.yml
vendored
@@ -65,7 +65,6 @@ jobs:
|
|||||||
fi
|
fi
|
||||||
if [[ $(grep -c "vllm-gaudi:" ${docker_compose_yml}) != 0 ]]; then
|
if [[ $(grep -c "vllm-gaudi:" ${docker_compose_yml}) != 0 ]]; then
|
||||||
git clone https://github.com/HabanaAI/vllm-fork.git vllm-fork
|
git clone https://github.com/HabanaAI/vllm-fork.git vllm-fork
|
||||||
cd vllm-fork && git checkout 3c39626 && cd ../
|
|
||||||
fi
|
fi
|
||||||
- name: Get build list
|
- name: Get build list
|
||||||
id: get-build-list
|
id: get-build-list
|
||||||
|
|||||||
@@ -37,7 +37,6 @@ fi
|
|||||||
if [ "$hw_mode" = "hpu" ]; then
|
if [ "$hw_mode" = "hpu" ]; then
|
||||||
git clone https://github.com/HabanaAI/vllm-fork.git
|
git clone https://github.com/HabanaAI/vllm-fork.git
|
||||||
cd ./vllm-fork/
|
cd ./vllm-fork/
|
||||||
git checkout 3c39626
|
|
||||||
docker build -f Dockerfile.hpu -t opea/vllm-gaudi:latest --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
|
docker build -f Dockerfile.hpu -t opea/vllm-gaudi:latest --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
|
||||||
cd ..
|
cd ..
|
||||||
rm -rf vllm-fork
|
rm -rf vllm-fork
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ function build_docker_images() {
|
|||||||
cd $WORKPATH
|
cd $WORKPATH
|
||||||
git clone https://github.com/HabanaAI/vllm-fork.git
|
git clone https://github.com/HabanaAI/vllm-fork.git
|
||||||
cd vllm-fork/
|
cd vllm-fork/
|
||||||
git checkout 3c39626
|
|
||||||
docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:latest --shm-size=128g .
|
docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:latest --shm-size=128g .
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
echo "opea/vllm-gaudi built fail"
|
echo "opea/vllm-gaudi built fail"
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ function build_docker_images() {
|
|||||||
cd $WORKPATH
|
cd $WORKPATH
|
||||||
git clone https://github.com/HabanaAI/vllm-fork.git
|
git clone https://github.com/HabanaAI/vllm-fork.git
|
||||||
cd vllm-fork/
|
cd vllm-fork/
|
||||||
git checkout 3c39626
|
|
||||||
docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:latest --shm-size=128g .
|
docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:latest --shm-size=128g .
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
echo "opea/vllm-gaudi built fail"
|
echo "opea/vllm-gaudi built fail"
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ function build_docker_images() {
|
|||||||
cd $WORKPATH
|
cd $WORKPATH
|
||||||
git clone https://github.com/HabanaAI/vllm-fork.git
|
git clone https://github.com/HabanaAI/vllm-fork.git
|
||||||
cd vllm-fork/
|
cd vllm-fork/
|
||||||
git checkout 3c39626
|
|
||||||
docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:comps --shm-size=128g .
|
docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:comps --shm-size=128g .
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
echo "opea/vllm-gaudi built fail"
|
echo "opea/vllm-gaudi built fail"
|
||||||
|
|||||||
Reference in New Issue
Block a user