Change /root/ to /home/user/. (#475)

* Change /root/ to /hmoe/user/.

Signed-off-by: zepan <ze.pan@intel.com>

* Fix issue.

Signed-off-by: zepan <ze.pan@intel.com>

---------

Signed-off-by: zepan <ze.pan@intel.com>
This commit is contained in:
ZePan110
2024-08-14 15:46:28 +08:00
committed by GitHub
parent 0bd821532f
commit 4a67d427bd
8 changed files with 20 additions and 20 deletions

View File

@@ -12,7 +12,7 @@ MILVUS_HOST = os.getenv("MILVUS", "localhost")
MILVUS_PORT = int(os.getenv("MILVUS_PORT", 19530))
COLLECTION_NAME = os.getenv("COLLECTION_NAME", "rag_milvus")
MOSEC_EMBEDDING_MODEL = os.environ.get("MOSEC_EMBEDDING_MODEL", "/root/bce-embedding-base_v1")
MOSEC_EMBEDDING_MODEL = os.environ.get("MOSEC_EMBEDDING_MODEL", "/home/user/bce-embedding-base_v1")
MOSEC_EMBEDDING_ENDPOINT = os.environ.get("MOSEC_EMBEDDING_ENDPOINT", "")
os.environ["OPENAI_API_BASE"] = MOSEC_EMBEDDING_ENDPOINT
os.environ["OPENAI_API_KEY"] = "Dummy key"

View File

@@ -67,7 +67,7 @@ if __name__ == "__main__":
MOSEC_EMBEDDING_ENDPOINT = os.environ.get("MOSEC_EMBEDDING_ENDPOINT", "http://127.0.0.1:8080")
os.environ["OPENAI_API_BASE"] = MOSEC_EMBEDDING_ENDPOINT
os.environ["OPENAI_API_KEY"] = "Dummy key"
MODEL_ID = "/root/bge-large-zh-v1.5"
MODEL_ID = "/home/user/bge-large-zh-v1.5"
embeddings = MosecEmbeddings(model=MODEL_ID)
print("Mosec Embedding initialized.")
opea_microservices["opea_service@embedding_mosec"].start()

View File

@@ -25,13 +25,13 @@ docker run -itd -p 8000:8000 embedding:latest
- Restful API by curl
```shell
curl -X POST http://127.0.0.1:8000/v1/embeddings -H "Content-Type: application/json" -d '{ "model": "/root/bge-large-zh-v1.5/", "input": "hello world"}'
curl -X POST http://127.0.0.1:8000/v1/embeddings -H "Content-Type: application/json" -d '{ "model": "/home/user/bge-large-zh-v1.5/", "input": "hello world"}'
```
- generate embedding from python
```python
DEFAULT_MODEL = "/root/bge-large-zh-v1.5/"
DEFAULT_MODEL = "/home/user/bge-large-zh-v1.5/"
SERVICE_URL = "http://127.0.0.1:8000"
INPUT_STR = "Hello world!"

View File

@@ -4,7 +4,7 @@
from openai import Client
DEFAULT_MODEL = "/root/bge-large-zh-v1.5/"
DEFAULT_MODEL = "/home/user/bge-large-zh-v1.5/"
SERVICE_URL = "http://127.0.0.1:8000"
INPUT_STR = "Hello world!"

View File

@@ -51,7 +51,7 @@ docker run -d --rm --name="vllm-openvino-server" \
-e HTTPS_PROXY=$https_proxy \
-e HTTP_PROXY=$https_proxy \
-e HF_TOKEN=${HUGGINGFACEHUB_API_TOKEN} \
-v $HOME/.cache/huggingface:/root/.cache/huggingface \
-v $HOME/.cache/huggingface:/home/user/.cache/huggingface \
vllm:openvino /bin/bash -c "\
cd / && \
export VLLM_CPU_KVCACHE_SPACE=50 && \

View File

@@ -6,10 +6,10 @@ FROM vault.habana.ai/gaudi-docker/1.16.0/ubuntu22.04/habanalabs/pytorch-installe
ENV LANG=en_US.UTF-8
WORKDIR /root/vllm-ray
WORKDIR /home/user/vllm-ray
# copy the source code to the package directory
COPY comps/llms/text-generation/vllm-ray /root/vllm-ray
COPY comps/llms/text-generation/vllm-ray /home/user/vllm-ray
RUN pip install --upgrade-strategy eager optimum[habana] && \
pip install git+https://github.com/HabanaAI/DeepSpeed.git@1.15.1
@@ -21,7 +21,7 @@ RUN sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/
service ssh restart
ENV no_proxy=localhost,127.0.0.1
ENV PYTHONPATH=$PYTHONPATH:/root:/root/vllm-ray
ENV PYTHONPATH=$PYTHONPATH:/root:/home/user/vllm-ray
# Required by DeepSpeed
ENV RAY_EXPERIMENTAL_NOSET_HABANA_VISIBLE_MODULES=1

View File

@@ -58,13 +58,13 @@ RUN cmake .. -DCMAKE_INSTALL_PREFIX=/usr/local/oneCCL \
RUN echo "source /usr/local/oneCCL/env/setvars.sh" >> ~/.bashrc
WORKDIR /root/
WORKDIR /home/user/
RUN rm -rf /tmp/oneCCL
RUN git clone https://github.com/intel/xFasterTransformer.git
SHELL ["/bin/bash", "-c"]
WORKDIR /root/xFasterTransformer
WORKDIR /home/user/xFasterTransformer
RUN git checkout ${TAG} \
&& export "LD_LIBRARY_PATH=/usr/local/mklml_lnx_2019.0.5.20190502/lib:$LD_LIBRARY_PATH" \
&& export "PATH=/usr/bin/python3.8:$PATH" \
@@ -75,23 +75,23 @@ RUN git checkout ${TAG} \
&& pip install --no-cache-dir dist/*
RUN mkdir -p /usr/local/xft/lib \
&& cp /root/xFasterTransformer/build/libxfastertransformer.so /usr/local/xft/lib \
&& cp /root/xFasterTransformer/build/libxft_comm_helper.so /usr/local/xft/lib \
&& cp -r /root/xFasterTransformer/include /usr/local/xft/ \
&& cp /home/user/xFasterTransformer/build/libxfastertransformer.so /usr/local/xft/lib \
&& cp /home/user/xFasterTransformer/build/libxft_comm_helper.so /usr/local/xft/lib \
&& cp -r /home/user/xFasterTransformer/include /usr/local/xft/ \
&& mkdir -p /usr/local/include/xft/ \
&& ln -s /usr/local/xft/include /usr/local/include/xft/include
RUN echo "export \$(python -c 'import xfastertransformer as xft; print(xft.get_env())')" >> ~/.bashrc
COPY comps /root/comps
COPY comps /home/user/comps
RUN pip install --no-cache-dir --upgrade pip && \
pip install --no-cache-dir -r /root/comps/llms/text-generation/vllm-xft/requirements.txt
pip install --no-cache-dir -r /home/user/comps/llms/text-generation/vllm-xft/requirements.txt
ENV PYTHONPATH=$PYTHONPATH:/root
RUN chmod +x /root/comps/llms/text-generation/vllm-xft/run.sh
RUN chmod +x /home/user/comps/llms/text-generation/vllm-xft/run.sh
WORKDIR /root/comps/llms/text-generation/vllm-xft/
WORKDIR /home/user/comps/llms/text-generation/vllm-xft/
ENTRYPOINT ["/root/comps/llms/text-generation/vllm-xft/run.sh"]
ENTRYPOINT ["/home/user/comps/llms/text-generation/vllm-xft/run.sh"]

View File

@@ -16,4 +16,4 @@ COLLECTION_NAME = os.getenv("COLLECTION_NAME", "rag_milvus")
MOSEC_EMBEDDING_ENDPOINT = os.environ.get("MOSEC_EMBEDDING_ENDPOINT", "")
os.environ["OPENAI_API_BASE"] = MOSEC_EMBEDDING_ENDPOINT
os.environ["OPENAI_API_KEY"] = "Dummy key"
MODEL_ID = "/root/bce-embedding-base_v1"
MODEL_ID = "/home/user/bce-embedding-base_v1"