Replace environment variables with service name for ChatQnA (#977)
Signed-off-by: lvliang-intel <liang1.lv@intel.com>
This commit is contained in:
@@ -61,7 +61,7 @@ Run the command to download LLM models. The <host_ip> is the one set in [Ollama
|
||||
```
|
||||
export host_ip=<host_ip>
|
||||
export OLLAMA_HOST=http://${host_ip}:11434
|
||||
ollama pull llama3
|
||||
ollama pull llama3.2
|
||||
```
|
||||
|
||||
After downloaded the models, you can list the models by `ollama list`.
|
||||
@@ -69,8 +69,8 @@ After downloaded the models, you can list the models by `ollama list`.
|
||||
The output should be similar to the following:
|
||||
|
||||
```
|
||||
NAME ID SIZE MODIFIED
|
||||
llama3:latest 365c0bd3c000 4.7 GB 5 days ago
|
||||
NAME ID SIZE MODIFIED
|
||||
llama3.2:latest a80c4f17acd5 2.0 GB 2 minutes ago
|
||||
```
|
||||
|
||||
### Consume Ollama LLM Service
|
||||
@@ -78,25 +78,25 @@ llama3:latest 365c0bd3c000 4.7 GB 5 days ago
|
||||
Access ollama service to verify that the ollama is functioning correctly.
|
||||
|
||||
```bash
|
||||
curl http://${host_ip}:11434/api/generate -d '{"model": "llama3", "prompt":"What is Deep Learning?"}'
|
||||
curl http://${host_ip}:11434/api/generate -d '{"model": "llama3.2", "prompt":"What is Deep Learning?"}'
|
||||
```
|
||||
|
||||
The outputs are similar to these:
|
||||
|
||||
```
|
||||
{"model":"llama3","created_at":"2024-10-11T07:58:38.949268562Z","response":"Deep","done":false}
|
||||
{"model":"llama3","created_at":"2024-10-11T07:58:39.017625351Z","response":" learning","done":false}
|
||||
{"model":"llama3","created_at":"2024-10-11T07:58:39.102848076Z","response":" is","done":false}
|
||||
{"model":"llama3","created_at":"2024-10-11T07:58:39.171037991Z","response":" a","done":false}
|
||||
{"model":"llama3","created_at":"2024-10-11T07:58:39.243757952Z","response":" subset","done":false}
|
||||
{"model":"llama3","created_at":"2024-10-11T07:58:39.328708084Z","response":" of","done":false}
|
||||
{"model":"llama3","created_at":"2024-10-11T07:58:39.413844974Z","response":" machine","done":false}
|
||||
{"model":"llama3","created_at":"2024-10-11T07:58:39.486239329Z","response":" learning","done":false}
|
||||
{"model":"llama3","created_at":"2024-10-11T07:58:39.555960842Z","response":" that","done":false}
|
||||
{"model":"llama3","created_at":"2024-10-11T07:58:39.642418238Z","response":" involves","done":false}
|
||||
{"model":"llama3","created_at":"2024-10-11T07:58:39.714137478Z","response":" the","done":false}
|
||||
{"model":"llama3","created_at":"2024-10-11T07:58:39.798776679Z","response":" use","done":false}
|
||||
{"model":"llama3","created_at":"2024-10-11T07:58:39.883747938Z","response":" of","done":false}
|
||||
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.098813868Z","response":"Deep","done":false}
|
||||
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.124514468Z","response":" learning","done":false}
|
||||
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.149754216Z","response":" is","done":false}
|
||||
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.180420784Z","response":" a","done":false}
|
||||
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.229185873Z","response":" subset","done":false}
|
||||
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.263956118Z","response":" of","done":false}
|
||||
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.289097354Z","response":" machine","done":false}
|
||||
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.316838918Z","response":" learning","done":false}
|
||||
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.342309506Z","response":" that","done":false}
|
||||
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.367221264Z","response":" involves","done":false}
|
||||
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.39205893Z","response":" the","done":false}
|
||||
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.417933974Z","response":" use","done":false}
|
||||
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.443110388Z","response":" of","done":false}
|
||||
...
|
||||
```
|
||||
|
||||
@@ -155,13 +155,21 @@ cd ~/OPEA/GenAIExamples/ChatQnA/ui
|
||||
docker build --no-cache -t opea/chatqna-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile .
|
||||
```
|
||||
|
||||
Then run the command `docker images`, you will have the following 5 Docker Images:
|
||||
### 6. Build Nginx Docker Image
|
||||
|
||||
```bash
|
||||
cd GenAIComps
|
||||
docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile .
|
||||
```
|
||||
|
||||
Then run the command `docker images`, you will have the following 6 Docker Images:
|
||||
|
||||
1. `opea/dataprep-redis:latest`
|
||||
2. `opea/retriever-redis:latest`
|
||||
3. `opea/llm-ollama:latest`
|
||||
4. `opea/chatqna:latest`
|
||||
5. `opea/chatqna-ui:latest`
|
||||
6. `opea/nginx:latest`
|
||||
|
||||
## 🚀 Start Microservices
|
||||
|
||||
@@ -201,27 +209,10 @@ export http_proxy=${your_http_proxy}
|
||||
export https_proxy=${your_http_proxy}
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export TEI_EMBEDDING_ENDPOINT="http://${host_ip}:6006"
|
||||
export REDIS_URL="redis://${host_ip}:6379"
|
||||
export INDEX_NAME="rag-redis"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token}
|
||||
export MEGA_SERVICE_HOST_IP=${host_ip}
|
||||
export EMBEDDING_SERVER_HOST_IP=${host_ip}
|
||||
export RETRIEVER_SERVICE_HOST_IP=${host_ip}
|
||||
export RERANK_SERVER_HOST_IP=${host_ip}
|
||||
export LLM_SERVER_HOST_IP=${host_ip}
|
||||
export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:8888/v1/chatqna"
|
||||
export DATAPREP_SERVICE_ENDPOINT="http://${host_ip}:6007/v1/dataprep"
|
||||
export DATAPREP_GET_FILE_ENDPOINT="http://${host_ip}:6007/v1/dataprep/get_file"
|
||||
export DATAPREP_DELETE_FILE_ENDPOINT="http://${host_ip}:6007/v1/dataprep/delete_file"
|
||||
export FRONTEND_SERVICE_IP=${host_ip}
|
||||
export FRONTEND_SERVICE_PORT=5173
|
||||
export BACKEND_SERVICE_NAME=chatqna
|
||||
export BACKEND_SERVICE_IP=${host_ip}
|
||||
export BACKEND_SERVICE_PORT=8888
|
||||
|
||||
export OLLAMA_ENDPOINT=http://${host_ip}:11434
|
||||
export OLLAMA_MODEL="llama3"
|
||||
export OLLAMA_MODEL="llama3.2"
|
||||
```
|
||||
|
||||
- Windows PC
|
||||
@@ -229,27 +220,10 @@ export OLLAMA_MODEL="llama3"
|
||||
```bash
|
||||
set EMBEDDING_MODEL_ID=BAAI/bge-base-en-v1.5
|
||||
set RERANK_MODEL_ID=BAAI/bge-reranker-base
|
||||
set TEI_EMBEDDING_ENDPOINT=http://%host_ip%:6006
|
||||
set REDIS_URL=redis://%host_ip%:6379
|
||||
set INDEX_NAME=rag-redis
|
||||
set HUGGINGFACEHUB_API_TOKEN=%your_hf_api_token%
|
||||
set MEGA_SERVICE_HOST_IP=%host_ip%
|
||||
set EMBEDDING_SERVER_HOST_IP=%host_ip%
|
||||
set RETRIEVER_SERVICE_HOST_IP=%host_ip%
|
||||
set RERANK_SERVER_HOST_IP=%host_ip%
|
||||
set LLM_SERVER_HOST_IP=%host_ip%
|
||||
set BACKEND_SERVICE_ENDPOINT=http://%host_ip%:8888/v1/chatqna
|
||||
set DATAPREP_SERVICE_ENDPOINT=http://%host_ip%:6007/v1/dataprep
|
||||
set DATAPREP_GET_FILE_ENDPOINT="http://%host_ip%:6007/v1/dataprep/get_file"
|
||||
set DATAPREP_DELETE_FILE_ENDPOINT="http://%host_ip%:6007/v1/dataprep/delete_file"
|
||||
set FRONTEND_SERVICE_IP=%host_ip%
|
||||
set FRONTEND_SERVICE_PORT=5173
|
||||
set BACKEND_SERVICE_NAME=chatqna
|
||||
set BACKEND_SERVICE_IP=%host_ip%
|
||||
set BACKEND_SERVICE_PORT=8888
|
||||
|
||||
set OLLAMA_ENDPOINT=http://host.docker.internal:11434
|
||||
set OLLAMA_MODEL="llama3"
|
||||
set OLLAMA_MODEL="llama3.2"
|
||||
```
|
||||
|
||||
Note: Please replace with `host_ip` with you external IP address, do not use localhost.
|
||||
@@ -263,15 +237,6 @@ cd ~/OPEA/GenAIExamples/ChatQnA/docker_compose/intel/cpu/aipc/
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Let ollama service runs (if you have started ollama service in [Prerequisites](#Prerequisites), skip this step)
|
||||
|
||||
```bash
|
||||
# e.g. ollama run llama3
|
||||
OLLAMA_HOST=${host_ip}:11434 ollama run $OLLAMA_MODEL
|
||||
# for windows
|
||||
# ollama run %OLLAMA_MODEL%
|
||||
```
|
||||
|
||||
### Validate Microservices
|
||||
|
||||
Follow the instructions to validate MicroServices.
|
||||
@@ -309,7 +274,7 @@ For details on how to verify the correctness of the response, refer to [how-to-v
|
||||
4. Ollama Service
|
||||
|
||||
```bash
|
||||
curl http://${host_ip}:11434/api/generate -d '{"model": "llama3", "prompt":"What is Deep Learning?"}'
|
||||
curl http://${host_ip}:11434/api/generate -d '{"model": "llama3.2", "prompt":"What is Deep Learning?"}'
|
||||
```
|
||||
|
||||
5. LLM Microservice
|
||||
@@ -325,7 +290,7 @@ For details on how to verify the correctness of the response, refer to [how-to-v
|
||||
|
||||
```bash
|
||||
curl http://${host_ip}:8888/v1/chatqna -H "Content-Type: application/json" -d '{
|
||||
"messages": "What is the revenue of Nike in 2023?", "model": "'"${OLLAMA_MODEL}"'"
|
||||
"messages": "What is the revenue of Nike in 2023?"
|
||||
}'
|
||||
```
|
||||
|
||||
|
||||
@@ -13,15 +13,17 @@ services:
|
||||
container_name: dataprep-redis-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
- tei-embedding-service
|
||||
ports:
|
||||
- "6007:6007"
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
REDIS_URL: ${REDIS_URL}
|
||||
REDIS_URL: redis://redis-vector-db:6379
|
||||
REDIS_HOST: redis-vector-db
|
||||
INDEX_NAME: ${INDEX_NAME}
|
||||
TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
||||
TEI_ENDPOINT: http://tei-embedding-service:80
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
tei-embedding-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
@@ -48,9 +50,10 @@ services:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
REDIS_URL: ${REDIS_URL}
|
||||
REDIS_URL: redis://redis-vector-db:6379
|
||||
REDIS_HOST: redis-vector-db
|
||||
INDEX_NAME: ${INDEX_NAME}
|
||||
TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
||||
TEI_EMBEDDING_ENDPOINT: http://tei-embedding-service:80
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
tei-reranking-service:
|
||||
@@ -79,7 +82,6 @@ services:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
HF_HUB_DISABLE_PROGRESS_BARS: 1
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
@@ -90,6 +92,7 @@ services:
|
||||
container_name: chatqna-aipc-backend-server
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
- dataprep-redis-service
|
||||
- tei-embedding-service
|
||||
- retriever
|
||||
- tei-reranking-service
|
||||
@@ -100,14 +103,14 @@ services:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP}
|
||||
- EMBEDDING_SERVER_HOST_IP=${EMBEDDING_SERVICE_HOST_IP}
|
||||
- EMBEDDING_SERVER_PORT=${EMBEDDING_SERVICE_PORT:-6006}
|
||||
- RETRIEVER_SERVICE_HOST_IP=${RETRIEVER_SERVICE_HOST_IP}
|
||||
- RERANK_SERVER_HOST_IP=${RERANK_SERVICE_HOST_IP}
|
||||
- RERANK_SERVER_PORT=${RERANK_SERVICE_PORT:-8808}
|
||||
- LLM_SERVER_HOST_IP=${LLM_SERVICE_HOST_IP}
|
||||
- LLM_SERVER_PORT=${LLM_SERVICE_PORT:-9000}
|
||||
- MEGA_SERVICE_HOST_IP=chaqna-aipc-backend-server
|
||||
- EMBEDDING_SERVER_HOST_IP=tei-embedding-service
|
||||
- EMBEDDING_SERVER_PORT=80
|
||||
- RETRIEVER_SERVICE_HOST_IP=retriever
|
||||
- RERANK_SERVER_HOST_IP=tei-reranking-service
|
||||
- RERANK_SERVER_PORT=80
|
||||
- LLM_SERVER_HOST_IP=llm
|
||||
- LLM_SERVER_PORT=9000
|
||||
- LOGFLAG=${LOGFLAG}
|
||||
ipc: host
|
||||
restart: always
|
||||
@@ -122,10 +125,6 @@ services:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- CHAT_BASE_URL=${BACKEND_SERVICE_ENDPOINT}
|
||||
- UPLOAD_FILE_BASE_URL=${DATAPREP_SERVICE_ENDPOINT}
|
||||
- GET_FILE=${DATAPREP_GET_FILE_ENDPOINT}
|
||||
- DELETE_FILE=${DATAPREP_DELETE_FILE_ENDPOINT}
|
||||
ipc: host
|
||||
restart: always
|
||||
chaqna-aipc-nginx-server:
|
||||
@@ -140,11 +139,13 @@ services:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- FRONTEND_SERVICE_IP=${FRONTEND_SERVICE_IP}
|
||||
- FRONTEND_SERVICE_PORT=${FRONTEND_SERVICE_PORT}
|
||||
- BACKEND_SERVICE_NAME=${BACKEND_SERVICE_NAME}
|
||||
- BACKEND_SERVICE_IP=${BACKEND_SERVICE_IP}
|
||||
- BACKEND_SERVICE_PORT=${BACKEND_SERVICE_PORT}
|
||||
- FRONTEND_SERVICE_IP=chatqna-xeon-ui-server
|
||||
- FRONTEND_SERVICE_PORT=5173
|
||||
- BACKEND_SERVICE_NAME=chatqna
|
||||
- BACKEND_SERVICE_IP=chatqna-xeon-backend-server
|
||||
- BACKEND_SERVICE_PORT=8888
|
||||
- DATAPREP_SERVICE_IP=dataprep-redis-service
|
||||
- DATAPREP_SERVICE_PORT=6007
|
||||
ipc: host
|
||||
restart: always
|
||||
|
||||
|
||||
@@ -15,25 +15,6 @@ fi
|
||||
export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token}
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export TEI_EMBEDDING_ENDPOINT="http://${host_ip}:6006"
|
||||
export TEI_RERANKING_ENDPOINT="http://${host_ip}:8808"
|
||||
export REDIS_URL="redis://${host_ip}:6379"
|
||||
export INDEX_NAME="rag-redis"
|
||||
export REDIS_HOST=${host_ip}
|
||||
export MEGA_SERVICE_HOST_IP=${host_ip}
|
||||
export EMBEDDING_SERVICE_HOST_IP=${host_ip}
|
||||
export RETRIEVER_SERVICE_HOST_IP=${host_ip}
|
||||
export RERANK_SERVICE_HOST_IP=${host_ip}
|
||||
export LLM_SERVICE_HOST_IP=${host_ip}
|
||||
export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:8888/v1/chatqna"
|
||||
export DATAPREP_SERVICE_ENDPOINT="http://${host_ip}:6007/v1/dataprep"
|
||||
export DATAPREP_GET_FILE_ENDPOINT="http://${host_ip}:6007/v1/dataprep/get_file"
|
||||
export DATAPREP_DELETE_FILE_ENDPOINT="http://${host_ip}:6007/v1/dataprep/delete_file"
|
||||
export FRONTEND_SERVICE_IP=${host_ip}
|
||||
export FRONTEND_SERVICE_PORT=5173
|
||||
export BACKEND_SERVICE_NAME=chatqna
|
||||
export BACKEND_SERVICE_IP=${host_ip}
|
||||
export BACKEND_SERVICE_PORT=8888
|
||||
|
||||
export OLLAMA_ENDPOINT=http://${host_ip}:11434
|
||||
export OLLAMA_MODEL="llama3"
|
||||
export OLLAMA_MODEL="llama3.2"
|
||||
|
||||
@@ -118,12 +118,20 @@ docker build --no-cache -t opea/chatqna-conversation-ui:latest --build-arg https
|
||||
cd ../../../..
|
||||
```
|
||||
|
||||
Then run the command `docker images`, you will have the following 4 Docker Images:
|
||||
### 6. Build Nginx Docker Image
|
||||
|
||||
```bash
|
||||
cd GenAIComps
|
||||
docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile .
|
||||
```
|
||||
|
||||
Then run the command `docker images`, you will have the following 5 Docker Images:
|
||||
|
||||
1. `opea/dataprep-qdrant:latest`
|
||||
2. `opea/retriever-qdrant:latest`
|
||||
3. `opea/chatqna:latest`
|
||||
4. `opea/chatqna-ui:latest`
|
||||
5. `opea/nginx:latest`
|
||||
|
||||
## 🚀 Start Microservices
|
||||
|
||||
@@ -172,18 +180,7 @@ export https_proxy=${your_http_proxy}
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
|
||||
export TEI_EMBEDDING_ENDPOINT="http://${host_ip}:6040"
|
||||
export QDRANT_HOST=${host_ip}
|
||||
export QDRANT_PORT=6333
|
||||
export INDEX_NAME="rag-qdrant"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token}
|
||||
export EMBEDDING_SERVER_HOST_IP=${host_ip}
|
||||
export MEGA_SERVICE_HOST_IP=${host_ip}
|
||||
export RETRIEVER_SERVICE_HOST_IP=${host_ip}
|
||||
export RERANK_SERVER_HOST_IP=${host_ip}
|
||||
export LLM_SERVER_HOST_IP=${host_ip}
|
||||
export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:8912/v1/chatqna"
|
||||
export DATAPREP_SERVICE_ENDPOINT="http://${host_ip}:6043/v1/dataprep"
|
||||
```
|
||||
|
||||
Note: Please replace with `host_ip` with you external IP address, do not use localhost.
|
||||
|
||||
@@ -20,10 +20,10 @@ services:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
REDIS_URL: ${REDIS_URL}
|
||||
REDIS_HOST: ${REDIS_HOST}
|
||||
REDIS_URL: redis://redis-vector-db:6379
|
||||
REDIS_HOST: redis-vector-db
|
||||
INDEX_NAME: ${INDEX_NAME}
|
||||
TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
||||
TEI_ENDPOINT: http://tei-embedding-service:80
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
tei-embedding-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
@@ -50,9 +50,10 @@ services:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
REDIS_URL: ${REDIS_URL}
|
||||
REDIS_URL: redis://redis-vector-db:6379
|
||||
REDIS_HOST: redis-vector-db
|
||||
INDEX_NAME: ${INDEX_NAME}
|
||||
TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
||||
TEI_EMBEDDING_ENDPOINT: http://tei-embedding-service:80
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
tei-reranking-service:
|
||||
@@ -87,7 +88,7 @@ services:
|
||||
HF_HUB_DISABLE_PROGRESS_BARS: 1
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0
|
||||
chaqna-xeon-backend-server:
|
||||
chatqna-xeon-backend-server:
|
||||
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}
|
||||
container_name: chatqna-xeon-backend-server
|
||||
depends_on:
|
||||
@@ -103,51 +104,49 @@ services:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP}
|
||||
- EMBEDDING_SERVER_HOST_IP=${EMBEDDING_SERVER_HOST_IP}
|
||||
- EMBEDDING_SERVER_PORT=${EMBEDDING_SERVER_PORT:-6006}
|
||||
- RETRIEVER_SERVICE_HOST_IP=${RETRIEVER_SERVICE_HOST_IP}
|
||||
- RERANK_SERVER_HOST_IP=${RERANK_SERVER_HOST_IP}
|
||||
- RERANK_SERVER_PORT=${RERANK_SERVER_PORT:-8808}
|
||||
- LLM_SERVER_HOST_IP=${LLM_SERVER_HOST_IP}
|
||||
- LLM_SERVER_PORT=${LLM_SERVER_PORT:-9009}
|
||||
- MEGA_SERVICE_HOST_IP=chatqna-xeon-backend-server
|
||||
- EMBEDDING_SERVER_HOST_IP=tei-embedding-service
|
||||
- EMBEDDING_SERVER_PORT=${EMBEDDING_SERVER_PORT:-80}
|
||||
- RETRIEVER_SERVICE_HOST_IP=retriever
|
||||
- RERANK_SERVER_HOST_IP=tei-reranking-service
|
||||
- RERANK_SERVER_PORT=${RERANK_SERVER_PORT:-80}
|
||||
- LLM_SERVER_HOST_IP=tgi-service
|
||||
- LLM_SERVER_PORT=${LLM_SERVER_PORT:-80}
|
||||
- LOGFLAG=${LOGFLAG}
|
||||
ipc: host
|
||||
restart: always
|
||||
chaqna-xeon-ui-server:
|
||||
chatqna-xeon-ui-server:
|
||||
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}
|
||||
container_name: chatqna-xeon-ui-server
|
||||
depends_on:
|
||||
- chaqna-xeon-backend-server
|
||||
- chatqna-xeon-backend-server
|
||||
ports:
|
||||
- "5173:5173"
|
||||
environment:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- CHAT_BASE_URL=${BACKEND_SERVICE_ENDPOINT}
|
||||
- UPLOAD_FILE_BASE_URL=${DATAPREP_SERVICE_ENDPOINT}
|
||||
- GET_FILE=${DATAPREP_GET_FILE_ENDPOINT}
|
||||
- DELETE_FILE=${DATAPREP_DELETE_FILE_ENDPOINT}
|
||||
ipc: host
|
||||
restart: always
|
||||
chaqna-xeon-nginx-server:
|
||||
chatqna-xeon-nginx-server:
|
||||
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
|
||||
container_name: chaqna-xeon-nginx-server
|
||||
container_name: chatqna-xeon-nginx-server
|
||||
depends_on:
|
||||
- chaqna-xeon-backend-server
|
||||
- chaqna-xeon-ui-server
|
||||
- chatqna-xeon-backend-server
|
||||
- chatqna-xeon-ui-server
|
||||
ports:
|
||||
- "${NGINX_PORT:-80}:80"
|
||||
environment:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- FRONTEND_SERVICE_IP=${FRONTEND_SERVICE_IP}
|
||||
- FRONTEND_SERVICE_PORT=${FRONTEND_SERVICE_PORT}
|
||||
- BACKEND_SERVICE_NAME=${BACKEND_SERVICE_NAME}
|
||||
- BACKEND_SERVICE_IP=${BACKEND_SERVICE_IP}
|
||||
- BACKEND_SERVICE_PORT=${BACKEND_SERVICE_PORT}
|
||||
- FRONTEND_SERVICE_IP=chatqna-xeon-ui-server
|
||||
- FRONTEND_SERVICE_PORT=5173
|
||||
- BACKEND_SERVICE_NAME=chatqna
|
||||
- BACKEND_SERVICE_IP=chatqna-xeon-backend-server
|
||||
- BACKEND_SERVICE_PORT=8888
|
||||
- DATAPREP_SERVICE_IP=dataprep-redis-service
|
||||
- DATAPREP_SERVICE_PORT=6007
|
||||
ipc: host
|
||||
restart: always
|
||||
|
||||
|
||||
@@ -20,10 +20,10 @@ services:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
QDRANT_HOST: ${QDRANT_HOST}
|
||||
QDRANT_HOST: qdrant-vector-db
|
||||
QDRANT_PORT: 6333
|
||||
COLLECTION_NAME: ${INDEX_NAME}
|
||||
TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
||||
TEI_ENDPOINT: http://tei-embedding-service:80
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
tei-embedding-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
@@ -50,7 +50,7 @@ services:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
QDRANT_HOST: ${QDRANT_HOST}
|
||||
QDRANT_HOST: qdrant-vector-db
|
||||
QDRANT_PORT: 6333
|
||||
INDEX_NAME: ${INDEX_NAME}
|
||||
TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
||||
@@ -87,7 +87,7 @@ services:
|
||||
HF_HUB_DISABLE_PROGRESS_BARS: 1
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0
|
||||
chaqna-xeon-backend-server:
|
||||
chatqna-xeon-backend-server:
|
||||
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}
|
||||
container_name: chatqna-xeon-backend-server
|
||||
depends_on:
|
||||
@@ -102,52 +102,50 @@ services:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP}
|
||||
- EMBEDDING_SERVER_HOST_IP=${EMBEDDING_SERVER_HOST_IP}
|
||||
- EMBEDDING_SERVER_PORT=${EMBEDDING_SERVER_PORT:-6040}
|
||||
- RETRIEVER_SERVICE_HOST_IP=${RETRIEVER_SERVICE_HOST_IP}
|
||||
- RETRIEVER_SERVICE_PORT=${RETRIEVER_SERVICE_PORT:-6045}
|
||||
- RERANK_SERVER_HOST_IP=${RERANK_SERVER_HOST_IP}
|
||||
- RERANK_SERVER_PORT=${RERANK_SERVER_PORT:-6041}
|
||||
- LLM_SERVER_HOST_IP=${LLM_SERVER_HOST_IP}
|
||||
- LLM_SERVER_PORT=${LLM_SERVER_PORT:-6042}
|
||||
- MEGA_SERVICE_HOST_IP=chatqna-xeon-backend-server
|
||||
- EMBEDDING_SERVER_HOST_IP=tei-embedding-service
|
||||
- EMBEDDING_SERVER_PORT=${EMBEDDING_SERVER_PORT:-80}
|
||||
- RETRIEVER_SERVICE_HOST_IP=retriever
|
||||
- RETRIEVER_SERVICE_PORT=${RETRIEVER_SERVICE_PORT:-7000}
|
||||
- RERANK_SERVER_HOST_IP=tei-reranking-service
|
||||
- RERANK_SERVER_PORT=${RERANK_SERVER_PORT:-80}
|
||||
- LLM_SERVER_HOST_IP=tgi-service
|
||||
- LLM_SERVER_PORT=${LLM_SERVER_PORT:-80}
|
||||
- LOGFLAG=${LOGFLAG}
|
||||
ipc: host
|
||||
restart: always
|
||||
chaqna-xeon-ui-server:
|
||||
chatqna-xeon-ui-server:
|
||||
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}
|
||||
container_name: chatqna-xeon-ui-server
|
||||
depends_on:
|
||||
- chaqna-xeon-backend-server
|
||||
- chatqna-xeon-backend-server
|
||||
ports:
|
||||
- "5173:5173"
|
||||
environment:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- CHAT_BASE_URL=${BACKEND_SERVICE_ENDPOINT}
|
||||
- UPLOAD_FILE_BASE_URL=${DATAPREP_SERVICE_ENDPOINT}
|
||||
- GET_FILE=${DATAPREP_GET_FILE_ENDPOINT}
|
||||
- DELETE_FILE=${DATAPREP_DELETE_FILE_ENDPOINT}
|
||||
ipc: host
|
||||
restart: always
|
||||
chaqna-xeon-nginx-server:
|
||||
chatqna-xeon-nginx-server:
|
||||
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
|
||||
container_name: chaqna-xeon-nginx-server
|
||||
container_name: chatqna-xeon-nginx-server
|
||||
depends_on:
|
||||
- chaqna-xeon-backend-server
|
||||
- chaqna-xeon-ui-server
|
||||
- chatqna-xeon-backend-server
|
||||
- chatqna-xeon-ui-server
|
||||
ports:
|
||||
- "${NGINX_PORT:-80}:80"
|
||||
environment:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- FRONTEND_SERVICE_IP=${FRONTEND_SERVICE_IP}
|
||||
- FRONTEND_SERVICE_PORT=${FRONTEND_SERVICE_PORT}
|
||||
- BACKEND_SERVICE_NAME=${BACKEND_SERVICE_NAME}
|
||||
- BACKEND_SERVICE_IP=${BACKEND_SERVICE_IP}
|
||||
- BACKEND_SERVICE_PORT=${BACKEND_SERVICE_PORT}
|
||||
- FRONTEND_SERVICE_IP=chatqna-xeon-ui-server
|
||||
- FRONTEND_SERVICE_PORT=5173
|
||||
- BACKEND_SERVICE_NAME=chatqna
|
||||
- BACKEND_SERVICE_IP=chatqna-xeon-backend-server
|
||||
- BACKEND_SERVICE_PORT=8888
|
||||
- DATAPREP_SERVICE_IP=dataprep-qdrant-service
|
||||
- DATAPREP_SERVICE_PORT=6007
|
||||
ipc: host
|
||||
restart: always
|
||||
|
||||
|
||||
@@ -20,9 +20,10 @@ services:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
REDIS_URL: ${REDIS_URL}
|
||||
REDIS_URL: redis://redis-vector-db:6379
|
||||
REDIS_HOST: redis-vector-db
|
||||
INDEX_NAME: ${INDEX_NAME}
|
||||
TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
||||
TEI_ENDPOINT: http://tei-embedding-service:80
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
tei-embedding-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
@@ -49,12 +50,10 @@ services:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
REDIS_URL: ${REDIS_URL}
|
||||
REDIS_URL: redis://redis-vector-db:6379
|
||||
REDIS_HOST: redis-vector-db
|
||||
INDEX_NAME: ${INDEX_NAME}
|
||||
TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
||||
LANGCHAIN_API_KEY: ${LANGCHAIN_API_KEY}
|
||||
LANGCHAIN_TRACING_V2: ${LANGCHAIN_TRACING_V2}
|
||||
LANGCHAIN_PROJECT: "opea-retriever-service"
|
||||
TEI_EMBEDDING_ENDPOINT: http://tei-embedding-service:80
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
tei-reranking-service:
|
||||
@@ -88,7 +87,7 @@ services:
|
||||
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
LLM_MODEL_ID: ${LLM_MODEL_ID}
|
||||
command: --model $LLM_MODEL_ID --host 0.0.0.0 --port 80
|
||||
chaqna-xeon-backend-server:
|
||||
chatqna-xeon-backend-server:
|
||||
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}
|
||||
container_name: chatqna-xeon-backend-server
|
||||
depends_on:
|
||||
@@ -103,51 +102,49 @@ services:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP}
|
||||
- EMBEDDING_SERVER_HOST_IP=${EMBEDDING_SERVER_HOST_IP}
|
||||
- EMBEDDING_SERVER_PORT=${EMBEDDING_SERVER_PORT:-6006}
|
||||
- RETRIEVER_SERVICE_HOST_IP=${RETRIEVER_SERVICE_HOST_IP}
|
||||
- RERANK_SERVER_HOST_IP=${RERANK_SERVER_HOST_IP}
|
||||
- RERANK_SERVER_PORT=${RERANK_SERVER_PORT:-8808}
|
||||
- LLM_SERVER_HOST_IP=${LLM_SERVER_HOST_IP}
|
||||
- LLM_SERVER_PORT=${LLM_SERVER_PORT:-9009}
|
||||
- MEGA_SERVICE_HOST_IP=chatqna-xeon-backend-server
|
||||
- EMBEDDING_SERVER_HOST_IP=tei-embedding-service
|
||||
- EMBEDDING_SERVER_PORT=${EMBEDDING_SERVER_PORT:-80}
|
||||
- RETRIEVER_SERVICE_HOST_IP=retriever
|
||||
- RERANK_SERVER_HOST_IP=tei-reranking-service
|
||||
- RERANK_SERVER_PORT=${RERANK_SERVER_PORT:-80}
|
||||
- LLM_SERVER_HOST_IP=vllm_service
|
||||
- LLM_SERVER_PORT=${LLM_SERVER_PORT:-80}
|
||||
- LOGFLAG=${LOGFLAG}
|
||||
ipc: host
|
||||
restart: always
|
||||
chaqna-xeon-ui-server:
|
||||
chatqna-xeon-ui-server:
|
||||
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}
|
||||
container_name: chatqna-xeon-ui-server
|
||||
depends_on:
|
||||
- chaqna-xeon-backend-server
|
||||
- chatqna-xeon-backend-server
|
||||
ports:
|
||||
- "5173:5173"
|
||||
environment:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- CHAT_BASE_URL=${BACKEND_SERVICE_ENDPOINT}
|
||||
- UPLOAD_FILE_BASE_URL=${DATAPREP_SERVICE_ENDPOINT}
|
||||
- GET_FILE=${DATAPREP_GET_FILE_ENDPOINT}
|
||||
- DELETE_FILE=${DATAPREP_DELETE_FILE_ENDPOINT}
|
||||
ipc: host
|
||||
restart: always
|
||||
chaqna-xeon-nginx-server:
|
||||
chatqna-xeon-nginx-server:
|
||||
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
|
||||
container_name: chaqna-xeon-nginx-server
|
||||
container_name: chatqna-xeon-nginx-server
|
||||
depends_on:
|
||||
- chaqna-xeon-backend-server
|
||||
- chaqna-xeon-ui-server
|
||||
- chatqna-xeon-backend-server
|
||||
- chatqna-xeon-ui-server
|
||||
ports:
|
||||
- "${NGINX_PORT:-80}:80"
|
||||
environment:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- FRONTEND_SERVICE_IP=${FRONTEND_SERVICE_IP}
|
||||
- FRONTEND_SERVICE_PORT=${FRONTEND_SERVICE_PORT}
|
||||
- BACKEND_SERVICE_NAME=${BACKEND_SERVICE_NAME}
|
||||
- BACKEND_SERVICE_IP=${BACKEND_SERVICE_IP}
|
||||
- BACKEND_SERVICE_PORT=${BACKEND_SERVICE_PORT}
|
||||
- FRONTEND_SERVICE_IP=chatqna-xeon-ui-server
|
||||
- FRONTEND_SERVICE_PORT=5173
|
||||
- BACKEND_SERVICE_NAME=chatqna
|
||||
- BACKEND_SERVICE_IP=chatqna-xeon-backend-server
|
||||
- BACKEND_SERVICE_PORT=8888
|
||||
- DATAPREP_SERVICE_IP=dataprep-redis-service
|
||||
- DATAPREP_SERVICE_PORT=6007
|
||||
ipc: host
|
||||
restart: always
|
||||
|
||||
|
||||
@@ -20,10 +20,10 @@ services:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
REDIS_URL: ${REDIS_URL}
|
||||
REDIS_HOST: ${REDIS_HOST}
|
||||
REDIS_URL: redis://redis-vector-db:6379
|
||||
REDIS_HOST: redis-vector-db
|
||||
INDEX_NAME: ${INDEX_NAME}
|
||||
TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
||||
TEI_ENDPOINT: http://tei-embedding-service:80
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
tei-embedding-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
@@ -50,9 +50,10 @@ services:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
REDIS_URL: ${REDIS_URL}
|
||||
REDIS_URL: redis://redis-vector-db:6379
|
||||
REDIS_HOST: redis-vector-db
|
||||
INDEX_NAME: ${INDEX_NAME}
|
||||
TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
|
||||
TEI_EMBEDDING_ENDPOINT: http://tei-embedding-service:80
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
tgi-service:
|
||||
@@ -71,7 +72,7 @@ services:
|
||||
HF_HUB_DISABLE_PROGRESS_BARS: 1
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0
|
||||
chaqna-xeon-backend-server:
|
||||
chatqna-xeon-backend-server:
|
||||
image: ${REGISTRY:-opea}/chatqna-without-rerank:${TAG:-latest}
|
||||
container_name: chatqna-xeon-backend-server
|
||||
depends_on:
|
||||
@@ -86,49 +87,47 @@ services:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP}
|
||||
- EMBEDDING_SERVER_HOST_IP=${EMBEDDING_SERVER_HOST_IP}
|
||||
- EMBEDDING_SERVER_PORT=${EMBEDDING_SERVER_PORT:-6006}
|
||||
- RETRIEVER_SERVICE_HOST_IP=${RETRIEVER_SERVICE_HOST_IP}
|
||||
- LLM_SERVER_HOST_IP=${LLM_SERVER_HOST_IP}
|
||||
- LLM_SERVER_PORT=${LLM_SERVER_PORT:-9009}
|
||||
- MEGA_SERVICE_HOST_IP=chatqna-xeon-backend-server
|
||||
- EMBEDDING_SERVER_HOST_IP=tei-embedding-service
|
||||
- EMBEDDING_SERVER_PORT=${EMBEDDING_SERVER_PORT:-80}
|
||||
- RETRIEVER_SERVICE_HOST_IP=retriever
|
||||
- LLM_SERVER_HOST_IP=tgi-service
|
||||
- LLM_SERVER_PORT=${LLM_SERVER_PORT:-80}
|
||||
- LOGFLAG=${LOGFLAG}
|
||||
ipc: host
|
||||
restart: always
|
||||
chaqna-xeon-ui-server:
|
||||
chatqna-xeon-ui-server:
|
||||
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}
|
||||
container_name: chatqna-xeon-ui-server
|
||||
depends_on:
|
||||
- chaqna-xeon-backend-server
|
||||
- chatqna-xeon-backend-server
|
||||
ports:
|
||||
- "5173:5173"
|
||||
environment:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- CHAT_BASE_URL=${BACKEND_SERVICE_ENDPOINT}
|
||||
- UPLOAD_FILE_BASE_URL=${DATAPREP_SERVICE_ENDPOINT}
|
||||
- GET_FILE=${DATAPREP_GET_FILE_ENDPOINT}
|
||||
- DELETE_FILE=${DATAPREP_DELETE_FILE_ENDPOINT}
|
||||
ipc: host
|
||||
restart: always
|
||||
chaqna-xeon-nginx-server:
|
||||
chatqna-xeon-nginx-server:
|
||||
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
|
||||
container_name: chaqna-xeon-nginx-server
|
||||
container_name: chatqna-xeon-nginx-server
|
||||
depends_on:
|
||||
- chaqna-xeon-backend-server
|
||||
- chaqna-xeon-ui-server
|
||||
- chatqna-xeon-backend-server
|
||||
- chatqna-xeon-ui-server
|
||||
ports:
|
||||
- "${NGINX_PORT:-80}:80"
|
||||
environment:
|
||||
- no_proxy=${no_proxy}
|
||||
- https_proxy=${https_proxy}
|
||||
- http_proxy=${http_proxy}
|
||||
- FRONTEND_SERVICE_IP=${FRONTEND_SERVICE_IP}
|
||||
- FRONTEND_SERVICE_PORT=${FRONTEND_SERVICE_PORT}
|
||||
- BACKEND_SERVICE_NAME=${BACKEND_SERVICE_NAME}
|
||||
- BACKEND_SERVICE_IP=${BACKEND_SERVICE_IP}
|
||||
- BACKEND_SERVICE_PORT=${BACKEND_SERVICE_PORT}
|
||||
- FRONTEND_SERVICE_IP=chatqna-xeon-ui-server
|
||||
- FRONTEND_SERVICE_PORT=5173
|
||||
- BACKEND_SERVICE_NAME=chatqna
|
||||
- BACKEND_SERVICE_IP=chatqna-xeon-backend-server
|
||||
- BACKEND_SERVICE_PORT=8888
|
||||
- DATAPREP_SERVICE_IP=dataprep-redis-service
|
||||
- DATAPREP_SERVICE_PORT=6007
|
||||
ipc: host
|
||||
restart: always
|
||||
|
||||
|
||||
@@ -7,21 +7,4 @@
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
|
||||
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
|
||||
export TEI_EMBEDDING_ENDPOINT="http://${host_ip}:6006"
|
||||
export REDIS_URL="redis://${host_ip}:6379"
|
||||
export INDEX_NAME="rag-redis"
|
||||
export REDIS_HOST=${host_ip}
|
||||
export MEGA_SERVICE_HOST_IP=${host_ip}
|
||||
export EMBEDDING_SERVER_HOST_IP=${host_ip}
|
||||
export RETRIEVER_SERVICE_HOST_IP=${host_ip}
|
||||
export RERANK_SERVER_HOST_IP=${host_ip}
|
||||
export LLM_SERVER_HOST_IP=${host_ip}
|
||||
export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:8888/v1/chatqna"
|
||||
export DATAPREP_SERVICE_ENDPOINT="http://${host_ip}:6007/v1/dataprep"
|
||||
export DATAPREP_GET_FILE_ENDPOINT="http://${host_ip}:6007/v1/dataprep/get_file"
|
||||
export DATAPREP_DELETE_FILE_ENDPOINT="http://${host_ip}:6007/v1/dataprep/delete_file"
|
||||
export FRONTEND_SERVICE_IP=${host_ip}
|
||||
export FRONTEND_SERVICE_PORT=5173
|
||||
export BACKEND_SERVICE_NAME=chatqna
|
||||
export BACKEND_SERVICE_IP=${host_ip}
|
||||
export BACKEND_SERVICE_PORT=8888
|
||||
|
||||
Reference in New Issue
Block a user