Refactor web retrievers links (#1338)

This commit is contained in:
Sihan Chen
2025-01-08 16:19:50 +08:00
committed by GitHub
parent b3c405a5f6
commit 5128c2d650
8 changed files with 16 additions and 16 deletions

View File

@@ -15,7 +15,7 @@ docker build --no-cache -t opea/embedding:latest --build-arg https_proxy=$https_
### 2. Build Retriever Image
```bash
docker build --no-cache -t opea/web-retriever-chroma:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/web_retrievers/chroma/langchain/Dockerfile .
docker build --no-cache -t opea/web-retriever:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/web_retrievers/src/Dockerfile .
```
### 3. Build Rerank Image
@@ -52,7 +52,7 @@ docker build --no-cache -t opea/opea/searchqna-ui:latest --build-arg https_proxy
Then run the command `docker images`, you will have following images ready:
1. `opea/embedding:latest`
2. `opea/web-retriever-chroma:latest`
2. `opea/web-retriever:latest`
3. `opea/reranking:latest`
4. `opea/llm-textgen:latest`
5. `opea/searchqna:latest`

View File

@@ -39,8 +39,8 @@ services:
LOGFLAG: ${LOGFLAG}
restart: unless-stopped
web-retriever:
image: ${REGISTRY:-opea}/web-retriever-chroma:${TAG:-latest}
container_name: web-retriever-chroma-server
image: ${REGISTRY:-opea}/web-retriever:${TAG:-latest}
container_name: web-retriever-server
ports:
- "3003:7077"
ipc: host

View File

@@ -17,7 +17,7 @@ docker build --no-cache -t opea/embedding:latest --build-arg https_proxy=$https_
### 2. Build Retriever Image
```bash
docker build --no-cache -t opea/web-retriever-chroma:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/web_retrievers/chroma/langchain/Dockerfile .
docker build --no-cache -t opea/web-retriever:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/web_retrievers/src/Dockerfile .
```
### 3. Build Rerank Image
@@ -52,7 +52,7 @@ docker build --no-cache -t opea/searchqna:latest --build-arg https_proxy=$https_
Then run the command `docker images`, you will have
1. `opea/embedding:latest`
2. `opea/web-retriever-chroma:latest`
2. `opea/web-retriever:latest`
3. `opea/reranking:latest`
4. `opea/llm-textgen:latest`
5. `opea/searchqna:latest`

View File

@@ -47,8 +47,8 @@ services:
LOGFLAG: ${LOGFLAG}
restart: unless-stopped
web-retriever:
image: ${REGISTRY:-opea}/web-retriever-chroma:${TAG:-latest}
container_name: web-retriever-chroma-server
image: ${REGISTRY:-opea}/web-retriever:${TAG:-latest}
container_name: web-retriever-server
ports:
- "3003:7077"
ipc: host

View File

@@ -23,12 +23,12 @@ services:
dockerfile: comps/embeddings/src/Dockerfile
extends: searchqna
image: ${REGISTRY:-opea}/embedding:${TAG:-latest}
web-retriever-chroma:
web-retriever:
build:
context: GenAIComps
dockerfile: comps/web_retrievers/chroma/langchain/Dockerfile
dockerfile: comps/web_retrievers/src/Dockerfile
extends: searchqna
image: ${REGISTRY:-opea}/web-retriever-chroma:${TAG:-latest}
image: ${REGISTRY:-opea}/web-retriever:${TAG:-latest}
reranking:
build:
context: GenAIComps

View File

@@ -19,7 +19,7 @@ function build_docker_images() {
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="searchqna searchqna-ui embedding web-retriever-chroma reranking llm-textgen"
service_list="searchqna searchqna-ui embedding web-retriever reranking llm-textgen"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
@@ -72,7 +72,7 @@ function validate_megaservice() {
result=$(curl http://${ip_address}:3008/v1/searchqna -X POST -d '{"messages": "What is the capital of China?", "stream": "False"}' -H 'Content-Type: application/json')
echo $result
docker logs web-retriever-chroma-server > ${LOG_PATH}/web-retriever-chroma-server.log
docker logs web-retriever-server > ${LOG_PATH}/web-retriever-server.log
docker logs searchqna-gaudi-backend-server > ${LOG_PATH}/searchqna-gaudi-backend-server.log
docker logs tei-embedding-gaudi-server > ${LOG_PATH}/tei-embedding-gaudi-server.log
docker logs embedding-gaudi-server > ${LOG_PATH}/embedding-gaudi-server.log

View File

@@ -19,7 +19,7 @@ function build_docker_images() {
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="searchqna searchqna-ui embedding web-retriever-chroma reranking llm-textgen"
service_list="searchqna searchqna-ui embedding web-retriever reranking llm-textgen"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
@@ -70,7 +70,7 @@ function validate_megaservice() {
result=$(curl http://${ip_address}:3008/v1/searchqna -X POST -d '{"messages": "What is the capital of China?", "stream": "False"}' -H 'Content-Type: application/json')
echo $result
docker logs web-retriever-chroma-server > ${LOG_PATH}/web_retriever.log
docker logs web-retriever-server > ${LOG_PATH}/web_retriever.log
docker logs searchqna-xeon-backend-server > ${LOG_PATH}/searchqna_backend.log
if [[ $result == *"capital"* ]]; then

View File

@@ -96,6 +96,6 @@ Take ChatQnA for example. ChatQnA is a chatbot application service based on the
| [opea/vllm](https://hub.docker.com/r/opea/vllm) | [Link](https://github.com/vllm-project/vllm/blob/main/Dockerfile.cpu) | The docker image powered by vllm-project for deploying and serving vllm Models |
| [opea/vllm-gaudi]() | [Link](https://github.com/HabanaAI/vllm-fork/blob/habana_main/Dockerfile.hpu) | The docker image powered by vllm-fork for deploying and serving vllm-gaudi Models |
| [opea/vllm-openvino](https://hub.docker.com/r/opea/vllm-openvino) | [Link](https://github.com/vllm-project/vllm/blob/main/Dockerfile.openvino) | The docker image powered by vllm-project for deploying and serving vllm Models of the Openvino Framework |
| [opea/web-retriever-chroma](https://hub.docker.com/r/opea/web-retriever-chroma) | [Link](https://github.com/opea-project/GenAIComps/tree/main/comps/web_retrievers/chroma/langchain/Dockerfile) | The docker image exposed the OPEA retrieval microservice based on chroma vectordb for GenAI application use |
| [opea/web-retriever]() | [Link](https://github.com/opea-project/GenAIComps/tree/main/comps/web_retrievers/src/Dockerfile) | The docker image exposed the OPEA web retrieval microservice based on a search engine and vector DB |
| [opea/whisper](https://hub.docker.com/r/opea/whisper) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/asr/src/integrations/dependency/whisper/Dockerfile) | The docker image exposed the OPEA Whisper service for GenAI application use |
| [opea/whisper-gaudi](https://hub.docker.com/r/opea/whisper-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/asr/src/integrations/dependency/whisper/Dockerfile.intel_hpu) | The docker image exposed the OPEA Whisper service on Gaudi2 for GenAI application use |