Update aipc ollama docker compose and readme (#984)
Signed-off-by: lvliang-intel <liang1.lv@intel.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: chen, suyue <suyue.chen@intel.com>
This commit is contained in:
@@ -17,8 +17,6 @@ To set up environment variables for deploying ChatQnA services, follow these ste
|
||||
```bash
|
||||
# Example: host_ip="192.168.1.1"
|
||||
export host_ip="External_Public_IP"
|
||||
# Example: no_proxy="localhost, 127.0.0.1, 192.168.1.1"
|
||||
export no_proxy="Your_No_Proxy"
|
||||
export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token"
|
||||
```
|
||||
|
||||
@@ -27,6 +25,9 @@ To set up environment variables for deploying ChatQnA services, follow these ste
|
||||
```bash
|
||||
export http_proxy="Your_HTTP_Proxy"
|
||||
export https_proxy="Your_HTTPs_Proxy"
|
||||
# Example: no_proxy="localhost, 127.0.0.1, 192.168.1.1"
|
||||
# Example: no_proxy="localhost, 127.0.0.1, 192.168.1.1"
|
||||
export no_proxy="Your_No_Proxy",chatqna-xeon-ui-server,chatqna-xeon-backend-server,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm_service
|
||||
```
|
||||
|
||||
3. Set up other environment variables:
|
||||
@@ -218,8 +219,6 @@ For users in China who are unable to download models directly from Huggingface,
|
||||
```bash
|
||||
# Example: host_ip="192.168.1.1"
|
||||
export host_ip="External_Public_IP"
|
||||
# Example: no_proxy="localhost, 127.0.0.1, 192.168.1.1"
|
||||
export no_proxy="Your_No_Proxy"
|
||||
export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token"
|
||||
# Example: NGINX_PORT=80
|
||||
export NGINX_PORT=${your_nginx_port}
|
||||
@@ -230,6 +229,8 @@ For users in China who are unable to download models directly from Huggingface,
|
||||
```bash
|
||||
export http_proxy="Your_HTTP_Proxy"
|
||||
export https_proxy="Your_HTTPs_Proxy"
|
||||
# Example: no_proxy="localhost, 127.0.0.1, 192.168.1.1"
|
||||
export no_proxy="Your_No_Proxy",chatqna-xeon-ui-server,chatqna-xeon-backend-server,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm_service
|
||||
```
|
||||
|
||||
3. Set up other environment variables:
|
||||
|
||||
@@ -167,10 +167,10 @@ export host_ip="External_Public_IP"
|
||||
export your_hf_api_token="Your_Huggingface_API_Token"
|
||||
```
|
||||
|
||||
**Append the value of the public IP address to the no_proxy list**
|
||||
**Append the value of the public IP address to the no_proxy list if you are in a proxy environment**
|
||||
|
||||
```
|
||||
export your_no_proxy=${your_no_proxy},"External_Public_IP"
|
||||
export your_no_proxy=${your_no_proxy},"External_Public_IP",chatqna-xeon-ui-server,chatqna-xeon-backend-server,dataprep-qdrant-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service
|
||||
```
|
||||
|
||||
```bash
|
||||
|
||||
@@ -112,6 +112,7 @@ services:
|
||||
- RERANK_SERVER_PORT=${RERANK_SERVER_PORT:-80}
|
||||
- LLM_SERVER_HOST_IP=tgi-service
|
||||
- LLM_SERVER_PORT=${LLM_SERVER_PORT:-80}
|
||||
- LLM_MODEL=${LLM_MODEL_ID}
|
||||
- LOGFLAG=${LOGFLAG}
|
||||
ipc: host
|
||||
restart: always
|
||||
|
||||
@@ -111,6 +111,7 @@ services:
|
||||
- RERANK_SERVER_PORT=${RERANK_SERVER_PORT:-80}
|
||||
- LLM_SERVER_HOST_IP=tgi-service
|
||||
- LLM_SERVER_PORT=${LLM_SERVER_PORT:-80}
|
||||
- LLM_MODEL=${LLM_MODEL_ID}
|
||||
- LOGFLAG=${LOGFLAG}
|
||||
ipc: host
|
||||
restart: always
|
||||
|
||||
@@ -110,6 +110,7 @@ services:
|
||||
- RERANK_SERVER_PORT=${RERANK_SERVER_PORT:-80}
|
||||
- LLM_SERVER_HOST_IP=vllm_service
|
||||
- LLM_SERVER_PORT=${LLM_SERVER_PORT:-80}
|
||||
- LLM_MODEL=${LLM_MODEL_ID}
|
||||
- LOGFLAG=${LOGFLAG}
|
||||
ipc: host
|
||||
restart: always
|
||||
|
||||
@@ -93,6 +93,7 @@ services:
|
||||
- RETRIEVER_SERVICE_HOST_IP=retriever
|
||||
- LLM_SERVER_HOST_IP=tgi-service
|
||||
- LLM_SERVER_PORT=${LLM_SERVER_PORT:-80}
|
||||
- LLM_MODEL=${LLM_MODEL_ID}
|
||||
- LOGFLAG=${LOGFLAG}
|
||||
ipc: host
|
||||
restart: always
|
||||
|
||||
Reference in New Issue
Block a user