Compare commits
4 Commits
main
...
update_vLL
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c712c01829 | ||
|
|
66ecaf2e6f | ||
|
|
46fab4a736 | ||
|
|
8ba40a5bff |
2
.github/env/_build_image.sh
vendored
2
.github/env/_build_image.sh
vendored
@@ -2,4 +2,4 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
export VLLM_VER=v0.8.3
|
||||
export VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
|
||||
export VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
|
||||
|
||||
@@ -37,7 +37,7 @@ function build_agent_docker_image_gaudi_vllm() {
|
||||
get_genai_comps
|
||||
|
||||
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
|
||||
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
|
||||
VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
|
||||
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
|
||||
|
||||
echo "Build agent image with --no-cache..."
|
||||
|
||||
@@ -27,7 +27,7 @@ function build_docker_images() {
|
||||
|
||||
git clone https://github.com/HabanaAI/vllm-fork.git
|
||||
cd vllm-fork/
|
||||
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
|
||||
VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
|
||||
echo "Check out vLLM tag ${VLLM_FORK_VER}"
|
||||
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ function build_docker_images() {
|
||||
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
popd && sleep 1s
|
||||
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
|
||||
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
|
||||
VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
|
||||
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
|
||||
@@ -24,7 +24,7 @@ function build_docker_images() {
|
||||
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
popd && sleep 1s
|
||||
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
|
||||
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
|
||||
VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
|
||||
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
|
||||
@@ -24,7 +24,7 @@ function build_docker_images() {
|
||||
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
popd && sleep 1s
|
||||
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
|
||||
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
|
||||
VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
|
||||
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
|
||||
@@ -24,7 +24,7 @@ function build_docker_images() {
|
||||
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
|
||||
popd && sleep 1s
|
||||
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
|
||||
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
|
||||
VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
|
||||
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
|
||||
@@ -37,7 +37,7 @@ function build_docker_images() {
|
||||
|
||||
# Download Gaudi vllm of latest tag
|
||||
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
|
||||
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
|
||||
VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
|
||||
echo "Check out vLLM tag ${VLLM_FORK_VER}"
|
||||
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ function build_docker_images() {
|
||||
popd && sleep 1s
|
||||
|
||||
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
|
||||
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
|
||||
VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
|
||||
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
|
||||
@@ -50,7 +50,7 @@ function build_docker_images() {
|
||||
popd && sleep 1s
|
||||
|
||||
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
|
||||
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
|
||||
VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
|
||||
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
|
||||
@@ -60,7 +60,7 @@ function build_vllm_docker_image() {
|
||||
fi
|
||||
cd ./vllm-fork
|
||||
|
||||
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
|
||||
VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
|
||||
git checkout ${VLLM_FORK_VER} &> /dev/null
|
||||
docker build --no-cache -f Dockerfile.hpu -t $vllm_image --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
|
||||
if [ $? -ne 0 ]; then
|
||||
|
||||
@@ -27,7 +27,7 @@ function build_docker_images() {
|
||||
popd && sleep 1s
|
||||
|
||||
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
|
||||
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
|
||||
VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
|
||||
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
|
||||
|
||||
service_list="visualqna visualqna-ui lvm nginx vllm-gaudi"
|
||||
|
||||
Reference in New Issue
Block a user