Remove vllm hpu triton version fix (#1515)
vllm-fork has fix triton version issue, remove duplicated code https://github.com/HabanaAI/vllm-fork/blob/habana_main/requirements-hpu.txt Signed-off-by: Xinyao Wang <xinyao.wang@intel.com> Co-authored-by: chen, suyue <suyue.chen@intel.com>
This commit is contained in:
@@ -43,7 +43,6 @@ function build_vllm_docker_image() {
|
||||
fi
|
||||
cd ./vllm-fork
|
||||
git checkout v0.6.4.post2+Gaudi-1.19.0
|
||||
sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt
|
||||
docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:ci --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "opea/vllm-gaudi:ci failed"
|
||||
|
||||
Reference in New Issue
Block a user