Compare commits

...

9 Commits

Author SHA1 Message Date
xiguiw
a03feb700b Merge branch 'main' into update_vLLM 2025-05-16 11:18:10 +08:00
Zhu Yongbo
bb9ec6e5d2 fix EdgeCraftRAG UI image build bug (#1964)
Signed-off-by: Yongbozzz <yongbo.zhu@intel.com>
2025-05-16 10:06:46 +08:00
xiguiw
94222d5783 Merge branch 'main' into update_vLLM 2025-05-16 09:04:30 +08:00
CICD-at-OPEA
274af9eabc Update vLLM version to v0.9.0
Signed-off-by: CICD-at-OPEA <CICD@opea.dev>
2025-05-15 22:41:49 +00:00
Daniel De León
3fb59a9769 Update DocSum README and environment configuration (#1917)
Signed-off-by: Daniel Deleon <daniel.de.leon@intel.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Abolfazl Shahbazi <12436063+ashahba@users.noreply.github.com>
Co-authored-by: chen, suyue <suyue.chen@intel.com>
Co-authored-by: Eero Tamminen <eero.t.tamminen@intel.com>
Co-authored-by: Zhenzhong Xu <zhenzhong.xu@intel.com>
2025-05-15 11:58:58 -07:00
chen, suyue
410df80925 [CICD enhance] AvatarChatbot run CI with latest base image, group logs in GHA outputs. (#1930)
Signed-off-by: chensuyue <suyue.chen@intel.com>
2025-05-15 11:22:49 +08:00
CICD-at-OPEA
238fb52a92 Update vLLM version to v0.8.5
Signed-off-by: CICD-at-OPEA <CICD@opea.dev>
2025-05-13 22:42:16 +00:00
Ying Hu
4a17638b5c Merge branch 'main' into update_vLLM 2025-05-13 16:00:56 +08:00
CICD-at-OPEA
2160d43a32 Update vLLM version to v0.8.5
Signed-off-by: CICD-at-OPEA <CICD@opea.dev>
2025-05-08 08:37:52 +00:00
26 changed files with 126 additions and 112 deletions

View File

@@ -1,5 +1,5 @@
# Copyright (C) 2025 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
export VLLM_VER=v0.8.3
export VLLM_VER=v0.9.0
export VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0

View File

@@ -27,7 +27,7 @@ function build_docker_images() {
git clone https://github.com/vllm-project/vllm.git
cd ./vllm/
VLLM_VER="v0.8.3"
VLLM_VER=v0.9.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null && cd ../

View File

@@ -27,7 +27,7 @@ function build_docker_images() {
git clone https://github.com/vllm-project/vllm.git
cd ./vllm/
VLLM_VER="v0.8.3"
VLLM_VER=v0.9.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null && cd ../

View File

@@ -1,8 +1,9 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
ARG IMAGE_REPO=opea
ARG BASE_TAG=latest
FROM opea/comps-base:$BASE_TAG
FROM $IMAGE_REPO/comps-base:$BASE_TAG
COPY ./avatarchatbot.py $HOME/avatarchatbot.py

View File

@@ -5,6 +5,8 @@ services:
avatarchatbot:
build:
args:
IMAGE_REPO: ${REGISTRY:-opea}
BASE_TAG: ${TAG:-latest}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
no_proxy: ${no_proxy}

View File

@@ -24,19 +24,13 @@ ip_address=$(hostname -I | awk '{print $1}')
function build_docker_images() {
opea_branch=${opea_branch:-"main"}
# If the opea_branch isn't main, replace the git clone branch in Dockerfile.
if [[ "${opea_branch}" != "main" ]]; then
cd $WORKPATH
OLD_STRING="RUN git clone --depth 1 https://github.com/opea-project/GenAIComps.git"
NEW_STRING="RUN git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git"
find . -type f -name "Dockerfile*" | while read -r file; do
echo "Processing file: $file"
sed -i "s|$OLD_STRING|$NEW_STRING|g" "$file"
done
fi
cd $WORKPATH/docker_image_build
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
pushd GenAIComps
echo "GenAIComps test commit is $(git rev-parse HEAD)"
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="avatarchatbot whisper-gaudi speecht5-gaudi wav2lip-gaudi animation"
@@ -128,19 +122,29 @@ function stop_docker() {
function main() {
echo "::group::stop_docker"
stop_docker
echo y | docker builder prune --all
echo y | docker image prune
echo "::endgroup::"
docker builder prune --all -f
docker image prune -f
echo "::group::build_docker_images"
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
start_services
# validate_microservices
validate_megaservice
# validate_frontend
echo "::endgroup::"
echo "::group::start_services"
start_services
echo "::endgroup::"
echo "::group::validate_megaservice"
validate_megaservice
echo "::endgroup::"
echo "::group::stop_docker"
stop_docker
echo y | docker builder prune --all
echo y | docker image prune
echo "::endgroup::"
docker builder prune --all -f
docker image prune -f
}

View File

@@ -25,6 +25,10 @@ ip_address=$(hostname -I | awk '{print $1}')
function build_docker_images() {
cd $WORKPATH/docker_image_build
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
pushd GenAIComps
echo "GenAIComps test commit is $(git rev-parse HEAD)"
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="avatarchatbot whisper asr speecht5 tts wav2lip animation"
@@ -138,11 +142,6 @@ function validate_megaservice() {
}
#function validate_frontend() {
#}
function stop_docker() {
cd $WORKPATH/docker_compose/amd/gpu/rocm
docker compose down && docker compose rm -f
@@ -151,19 +150,27 @@ function stop_docker() {
function main() {
echo $OPENAI_API_KEY
echo $OPENAI_KEY
echo "::group::stop_docker"
stop_docker
echo "::endgroup::"
echo "::group::build_docker_images"
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
start_services
# validate_microservices
sleep 30
validate_megaservice
# validate_frontend
stop_docker
echo "::endgroup::"
echo y | docker system prune
echo "::group::start_services"
start_services
echo "::endgroup::"
echo "::group::validate_megaservice"
validate_megaservice
echo "::endgroup::"
echo "::group::stop_docker"
stop_docker
echo "::endgroup::"
docker system prune -f
}

View File

@@ -24,19 +24,13 @@ ip_address=$(hostname -I | awk '{print $1}')
function build_docker_images() {
opea_branch=${opea_branch:-"main"}
# If the opea_branch isn't main, replace the git clone branch in Dockerfile.
if [[ "${opea_branch}" != "main" ]]; then
cd $WORKPATH
OLD_STRING="RUN git clone --depth 1 https://github.com/opea-project/GenAIComps.git"
NEW_STRING="RUN git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git"
find . -type f -name "Dockerfile*" | while read -r file; do
echo "Processing file: $file"
sed -i "s|$OLD_STRING|$NEW_STRING|g" "$file"
done
fi
cd $WORKPATH/docker_image_build
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
pushd GenAIComps
echo "GenAIComps test commit is $(git rev-parse HEAD)"
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="avatarchatbot whisper speecht5 wav2lip animation"
@@ -127,16 +121,28 @@ function stop_docker() {
function main() {
stop_docker
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
start_services
# validate_microservices
validate_megaservice
# validate_frontend
stop_docker
echo y | docker builder prune --all
echo y | docker image prune
echo "::group::stop_docker"
stop_docker
echo "::endgroup::"
echo "::group::build_docker_images"
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
echo "::endgroup::"
echo "::group::start_services"
start_services
echo "::endgroup::"
echo "::group::validate_megaservice"
validate_megaservice
echo "::endgroup::"
echo "::group::stop_docker"
stop_docker
echo "::endgroup::"
docker system prune -f
}

View File

@@ -24,7 +24,7 @@ function build_docker_images() {
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s
git clone https://github.com/vllm-project/vllm.git && cd vllm
VLLM_VER="v0.8.3"
VLLM_VER=v0.9.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null && cd ../

View File

@@ -24,7 +24,7 @@ function build_docker_images() {
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s
git clone https://github.com/vllm-project/vllm.git && cd vllm
VLLM_VER="v0.8.3"
VLLM_VER=v0.9.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null && cd ../

View File

@@ -24,7 +24,7 @@ function build_docker_images() {
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s
git clone https://github.com/vllm-project/vllm.git && cd vllm
VLLM_VER="v0.8.3"
VLLM_VER=v0.9.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null
# make sure NOT change the pwd

View File

@@ -25,7 +25,7 @@ function build_docker_images() {
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s
git clone https://github.com/vllm-project/vllm.git && cd vllm
VLLM_VER="v0.8.3"
VLLM_VER=v0.9.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null
# make sure NOT change the pwd

View File

@@ -24,7 +24,7 @@ function build_docker_images() {
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s
git clone https://github.com/vllm-project/vllm.git && cd vllm
VLLM_VER="v0.8.3"
VLLM_VER=v0.9.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null
# make sure NOT change the pwd

View File

@@ -25,7 +25,7 @@ function build_docker_images() {
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s
git clone https://github.com/vllm-project/vllm.git && cd vllm
VLLM_VER="v0.8.3"
VLLM_VER=v0.9.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null
# Not change the pwd

View File

@@ -24,7 +24,7 @@ function build_docker_images() {
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s
git clone https://github.com/vllm-project/vllm.git && cd vllm
VLLM_VER="v0.8.3"
VLLM_VER=v0.9.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null
# Not change the pwd

View File

@@ -25,7 +25,7 @@ function build_docker_images() {
popd && sleep 1s
git clone https://github.com/vllm-project/vllm.git && cd vllm
VLLM_VER="v0.8.3"
VLLM_VER=v0.9.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null
# Not change the pwd

View File

@@ -42,7 +42,7 @@ function build_docker_images() {
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
git clone https://github.com/vllm-project/vllm.git && cd vllm
VLLM_VER="v0.8.3"
VLLM_VER=v0.9.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null
cd ../

View File

@@ -26,7 +26,7 @@ function build_docker_images() {
popd && sleep 1s
git clone https://github.com/vllm-project/vllm.git && cd vllm
VLLM_VER="v0.8.3"
VLLM_VER=v0.9.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null
cd ../

View File

@@ -21,35 +21,29 @@ This section describes how to quickly deploy and test the DocSum service manuall
6. [Test the Pipeline](#test-the-pipeline)
7. [Cleanup the Deployment](#cleanup-the-deployment)
### Access the Code
### Access the Code and Set Up Environment
Clone the GenAIExample repository and access the ChatQnA Intel Xeon platform Docker Compose files and supporting scripts:
```
```bash
git clone https://github.com/opea-project/GenAIExamples.git
cd GenAIExamples/DocSum/docker_compose/intel/cpu/xeon/
cd GenAIExamples/DocSum/docker_compose
source set_env.sh
cd intel/cpu/xeon/
```
Checkout a released version, such as v1.2:
NOTE: by default vLLM does "warmup" at start, to optimize its performance for the specified model and the underlying platform, which can take long time. For development (and e.g. autoscaling) it can be skipped with `export VLLM_SKIP_WARMUP=true`.
```
git checkout v1.2
Checkout a released version, such as v1.3:
```bash
git checkout v1.3
```
### Generate a HuggingFace Access Token
Some HuggingFace resources, such as some models, are only accessible if you have an access token. If you do not already have a HuggingFace access token, you can create one by first creating an account by following the steps provided at [HuggingFace](https://huggingface.co/) and then generating a [user access token](https://huggingface.co/docs/transformers.js/en/guides/private#step-1-generating-a-user-access-token).
### Configure the Deployment Environment
To set up environment variables for deploying DocSum services, source the _set_env.sh_ script in this directory:
```
source ./set_env.sh
```
The _set_env.sh_ script will prompt for required and optional environment variables used to configure the DocSum services. If a value is not entered, the script will use a default value for the same. It will also generate a _.env_ file defining the desired configuration. Consult the section on [DocSum Service configuration](#docsum-service-configuration) for information on how service specific configuration parameters affect deployments.
### Deploy the Services Using Docker Compose
To deploy the DocSum services, execute the `docker compose up` command with the appropriate arguments. For a default deployment, execute:
@@ -78,13 +72,13 @@ Please refer to the table below to build different microservices from source:
After running docker compose, check if all the containers launched via docker compose have started:
```
```bash
docker ps -a
```
For the default deployment, the following 5 containers should have started:
```
```bash
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
748f577b3c78 opea/whisper:latest "python whisper_s…" 5 minutes ago Up About a minute 0.0.0.0:7066->7066/tcp, :::7066->7066/tcp docsum-xeon-whisper-server
4eq8b7034fd9 opea/docsum-gradio-ui:latest "docker-entrypoint.s…" 5 minutes ago Up About a minute 0.0.0.0:5173->5173/tcp, :::5173->5173/tcp docsum-xeon-ui-server
@@ -109,7 +103,7 @@ curl -X POST http://${host_ip}:8888/v1/docsum \
To stop the containers associated with the deployment, execute the following command:
```
```bash
docker compose -f compose.yaml down
```

View File

@@ -23,35 +23,29 @@ This section describes how to quickly deploy and test the DocSum service manuall
6. [Test the Pipeline](#test-the-pipeline)
7. [Cleanup the Deployment](#cleanup-the-deployment)
### Access the Code
### Access the Code and Set Up Environment
Clone the GenAIExample repository and access the ChatQnA Intel® Gaudi® platform Docker Compose files and supporting scripts:
Clone the GenAIExample repository and access the DocSum Intel® Gaudi® platform Docker Compose files and supporting scripts:
```
```bash
git clone https://github.com/opea-project/GenAIExamples.git
cd GenAIExamples/DocSum/docker_compose/intel/hpu/gaudi/
cd GenAIExamples/DocSum/docker_compose
source set_env.sh
cd intel/hpu/gaudi/
```
Checkout a released version, such as v1.2:
NOTE: by default vLLM does "warmup" at start, to optimize its performance for the specified model and the underlying platform, which can take long time. For development (and e.g. autoscaling) it can be skipped with `export VLLM_SKIP_WARMUP=true`.
```
git checkout v1.2
Checkout a released version, such as v1.3:
```bash
git checkout v1.3
```
### Generate a HuggingFace Access Token
Some HuggingFace resources, such as some models, are only accessible if you have an access token. If you do not already have a HuggingFace access token, you can create one by first creating an account by following the steps provided at [HuggingFace](https://huggingface.co/) and then generating a [user access token](https://huggingface.co/docs/transformers.js/en/guides/private#step-1-generating-a-user-access-token).
### Configure the Deployment Environment
To set up environment variables for deploying DocSum services, source the _set_env.sh_ script in this directory:
```
source ./set_env.sh
```
The _set_env.sh_ script will prompt for required and optional environment variables used to configure the DocSum services. If a value is not entered, the script will use a default value for the same. It will also generate a _.env_ file defining the desired configuration. Consult the section on [DocSum Service configuration](#docsum-service-configuration) for information on how service specific configuration parameters affect deployments.
### Deploy the Services Using Docker Compose
To deploy the DocSum services, execute the `docker compose up` command with the appropriate arguments. For a default deployment, execute:
@@ -80,13 +74,13 @@ Please refer to the table below to build different microservices from source:
After running docker compose, check if all the containers launched via docker compose have started:
```
```bash
docker ps -a
```
For the default deployment, the following 5 containers should have started:
```
```bash
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
748f577b3c78 opea/whisper:latest "python whisper_s…" 5 minutes ago Up About a minute 0.0.0.0:7066->7066/tcp, :::7066->7066/tcp docsum-gaudi-whisper-server
4eq8b7034fd9 opea/docsum-gradio-ui:latest "docker-entrypoint.s…" 5 minutes ago Up About a minute 0.0.0.0:5173->5173/tcp, :::5173->5173/tcp docsum-gaudi-ui-server
@@ -111,7 +105,7 @@ curl -X POST http://${host_ip}:8888/v1/docsum \
To stop the containers associated with the deployment, execute the following command:
```
```bash
docker compose -f compose.yaml down
```

View File

@@ -18,6 +18,7 @@ services:
OMPI_MCA_btl_vader_single_copy_mechanism: none
LLM_MODEL_ID: ${LLM_MODEL_ID}
NUM_CARDS: ${NUM_CARDS}
VLLM_SKIP_WARMUP: ${VLLM_SKIP_WARMUP:-false}
VLLM_TORCH_PROFILER_DIR: "/mnt"
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:80/health || exit 1"]

View File

@@ -6,10 +6,10 @@ pushd "../../" > /dev/null
source .set_env.sh
popd > /dev/null
export host_ip=$(hostname -I | awk '{print $1}') # Example: host_ip="192.168.1.1"
export no_proxy="${no_proxy},${host_ip}" # Example: no_proxy="localhost, 127.0.0.1, 192.168.1.1"
export http_proxy=$http_proxy
export https_proxy=$https_proxy
export host_ip=$(hostname -I | awk '{print $1}') # Example: host_ip="192.168.1.1"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export LLM_ENDPOINT_PORT=8008
@@ -29,3 +29,8 @@ export BACKEND_SERVICE_PORT=8888
export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum"
export LOGFLAG=True
export NUM_CARDS=1
export BLOCK_SIZE=128
export MAX_NUM_SEQS=256
export MAX_SEQ_LEN_TO_CAPTURE=2048

View File

@@ -46,7 +46,7 @@ function build_docker_images() {
popd && sleep 1s
git clone https://github.com/vllm-project/vllm.git && cd vllm
VLLM_VER="v0.8.3"
VLLM_VER=v0.9.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null
cd ../

View File

@@ -18,12 +18,10 @@ declare module 'vue' {
AConfigProvider: typeof import('ant-design-vue/es')['ConfigProvider']
ADescriptions: typeof import('ant-design-vue/es')['Descriptions']
ADescriptionsItem: typeof import('ant-design-vue/es')['DescriptionsItem']
ADivider: typeof import('ant-design-vue/es')['Divider']
ADrawer: typeof import('ant-design-vue/es')['Drawer']
AEmpty: typeof import('ant-design-vue/es')['Empty']
AForm: typeof import('ant-design-vue/es')['Form']
AFormItem: typeof import('ant-design-vue/es')['FormItem']
AImage: typeof import('ant-design-vue/es')['Image']
AInput: typeof import('ant-design-vue/es')['Input']
AInputNumber: typeof import('ant-design-vue/es')['InputNumber']
ALayout: typeof import('ant-design-vue/es')['Layout']
@@ -31,7 +29,6 @@ declare module 'vue' {
ALayoutHeader: typeof import('ant-design-vue/es')['LayoutHeader']
AModal: typeof import('ant-design-vue/es')['Modal']
APagination: typeof import('ant-design-vue/es')['Pagination']
APopover: typeof import('ant-design-vue/es')['Popover']
ARadio: typeof import('ant-design-vue/es')['Radio']
ARadioGroup: typeof import('ant-design-vue/es')['RadioGroup']
ARow: typeof import('ant-design-vue/es')['Row']

View File

@@ -12,13 +12,16 @@
"@vueuse/i18n": "^4.0.0-beta.12",
"ant-design-vue": "^4.0.0-rc.6",
"axios": "^1.7.9",
"clipboard": "^2.0.11",
"dayjs": "^1.11.13",
"echarts": "^5.5.1",
"event-source-polyfill": "^1.0.31",
"highlight.js": "^11.11.1",
"http": "^0.0.1-security",
"js-cookie": "^3.0.5",
"lodash": "^4.17.21",
"marked": "^15.0.6",
"pinia": "^2.3.0",
"pinia": "^3.0.2",
"pinia-plugin-persistedstate": "^4.2.0",
"qs": "^6.13.1",
"socket.io-client": "^4.8.1",

View File

@@ -19,7 +19,7 @@ function build_vllm_docker_image() {
if [ ! -d "./vllm" ]; then
git clone https://github.com/vllm-project/vllm.git
cd vllm
VLLM_VER="v0.8.3"
VLLM_VER=v0.9.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null
git rev-parse HEAD