Files
GenAIExamples/comps/llms/text-generation/vllm/llama_index/dependency/Dockerfile.intel_hpu
lvliang-intel 618f45bab1 Upgrade habana docker version to 1.18.0 (#854)
* Upgrade habana docker version to 1.18.0

Signed-off-by: lvliang-intel <liang1.lv@intel.com>

* fix issues

Signed-off-by: lvliang-intel <liang1.lv@intel.com>

* fix ci issue

Signed-off-by: lvliang-intel <liang1.lv@intel.com>

---------

Signed-off-by: lvliang-intel <liang1.lv@intel.com>
2024-11-07 11:28:48 +08:00

25 lines
733 B
Docker

# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
FROM vault.habana.ai/gaudi-docker/1.18.0/ubuntu22.04/habanalabs/pytorch-installer-2.4.0 AS hpu
RUN useradd -m -s /bin/bash user && \
mkdir -p /home/user && \
chown -R user /home/user/
ENV LANG=en_US.UTF-8
RUN sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config && \
service ssh restart
USER user
WORKDIR /root
RUN pip install --no-cache-dir --upgrade-strategy eager optimum[habana]
RUN pip install --no-cache-dir -v git+https://github.com/HabanaAI/vllm-fork.git@cf6952d
RUN pip install --no-cache-dir setuptools
ENV PT_HPU_LAZY_ACC_PAR_MODE=0
ENV PT_HPU_ENABLE_LAZY_COLLECTIVES=true
CMD ["/bin/bash"]