Exclude dockerfile under tests and exclude check Dockerfile under tests. (#1354)
Signed-off-by: ZePan110 <ze.pan@intel.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
@@ -60,7 +60,7 @@ jobs:
|
||||
shopt -s globstar
|
||||
no_add="FALSE"
|
||||
cd ${{github.workspace}}
|
||||
Dockerfiles=$(realpath $(find ./ -name '*Dockerfile*'))
|
||||
Dockerfiles=$(realpath $(find ./ -name '*Dockerfile*' ! -path './tests/*'))
|
||||
if [ -n "$Dockerfiles" ]; then
|
||||
for dockerfile in $Dockerfiles; do
|
||||
service=$(echo "$dockerfile" | awk -F '/GenAIExamples/' '{print $2}' | awk -F '/' '{print $2}')
|
||||
|
||||
@@ -21,7 +21,7 @@ function build_docker_images_for_retrieval_tool(){
|
||||
# git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
|
||||
get_genai_comps
|
||||
echo "Build all the images with --no-cache..."
|
||||
service_list="doc-index-retriever dataprep-redis embedding-tei retriever-redis reranking-tei"
|
||||
service_list="doc-index-retriever dataprep-redis embedding retriever-redis reranking-tei"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ Here is the output:
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
28d9a5570246 opea/chatqna-ui:latest "docker-entrypoint.s…" 2 minutes ago Up 2 minutes 0.0.0.0:5173->5173/tcp, :::5173->5173/tcp chatqna-gaudi-ui-server
|
||||
bee1132464cd opea/chatqna:latest "python chatqna.py" 2 minutes ago Up 2 minutes 0.0.0.0:8888->8888/tcp, :::8888->8888/tcp chatqna-gaudi-backend-server
|
||||
f810f3b4d329 opea/embedding-tei:latest "python embedding_te…" 2 minutes ago Up 2 minutes 0.0.0.0:6000->6000/tcp, :::6000->6000/tcp embedding-tei-server
|
||||
f810f3b4d329 opea/embedding:latest "python embedding_te…" 2 minutes ago Up 2 minutes 0.0.0.0:6000->6000/tcp, :::6000->6000/tcp embedding-server
|
||||
325236a01f9b opea/llm-textgen:latest "python llm.py" 2 minutes ago Up 2 minutes 0.0.0.0:9000->9000/tcp, :::9000->9000/tcp llm-textgen-gaudi-server
|
||||
2fa17d84605f opea/dataprep-redis:latest "python prepare_doc_…" 2 minutes ago Up 2 minutes 0.0.0.0:6007->6007/tcp, :::6007->6007/tcp dataprep-redis-server
|
||||
69e1fb59e92c opea/retriever-redis:latest "/home/user/comps/re…" 2 minutes ago Up 2 minutes 0.0.0.0:7000->7000/tcp, :::7000->7000/tcp retriever-redis-server
|
||||
|
||||
@@ -41,12 +41,12 @@ services:
|
||||
dockerfile: ./docker/Dockerfile.react
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/chatqna-conversation-ui:${TAG:-latest}
|
||||
embedding-tei:
|
||||
embedding:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/embeddings/src/Dockerfile
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
image: ${REGISTRY:-opea}/embedding:${TAG:-latest}
|
||||
retriever-redis:
|
||||
build:
|
||||
context: GenAIComps
|
||||
|
||||
@@ -9,7 +9,7 @@ DocRetriever are the most widely adopted use case for leveraging the different m
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build -t opea/embedding-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/Dockerfile .
|
||||
docker build -t opea/embedding:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/Dockerfile .
|
||||
```
|
||||
|
||||
- Retriever Vector store Image
|
||||
@@ -125,7 +125,7 @@ curl http://${host_ip}:8889/v1/retrievaltool -X POST -H "Content-Type: applicati
|
||||
-X POST \
|
||||
-d '{"text":"Explain the OPEA project"}' \
|
||||
-H 'Content-Type: application/json' > query
|
||||
docker container logs embedding-tei-server
|
||||
docker container logs embedding-server
|
||||
|
||||
# if you used tei-gaudi
|
||||
docker container logs tei-embedding-gaudi-server
|
||||
|
||||
@@ -50,8 +50,8 @@ services:
|
||||
timeout: 10s
|
||||
retries: 60
|
||||
embedding:
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
container_name: embedding-tei-server
|
||||
image: ${REGISTRY:-opea}/embedding:${TAG:-latest}
|
||||
container_name: embedding-server
|
||||
ports:
|
||||
- "6000:6000"
|
||||
ipc: host
|
||||
|
||||
@@ -50,8 +50,8 @@ services:
|
||||
timeout: 10s
|
||||
retries: 60
|
||||
embedding:
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
container_name: embedding-tei-server
|
||||
image: ${REGISTRY:-opea}/embedding:${TAG:-latest}
|
||||
container_name: embedding-server
|
||||
ports:
|
||||
- "6000:6000"
|
||||
ipc: host
|
||||
|
||||
@@ -9,7 +9,7 @@ DocRetriever are the most widely adopted use case for leveraging the different m
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build -t opea/embedding-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/Dockerfile .
|
||||
docker build -t opea/embedding:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/Dockerfile .
|
||||
```
|
||||
|
||||
- Retriever Vector store Image
|
||||
@@ -115,7 +115,7 @@ curl http://${host_ip}:8889/v1/retrievaltool -X POST -H "Content-Type: applicati
|
||||
-X POST \
|
||||
-d '{"text":"Explain the OPEA project"}' \
|
||||
-H 'Content-Type: application/json' > query
|
||||
docker container logs embedding-tei-server
|
||||
docker container logs embedding-server
|
||||
|
||||
# if you used tei-gaudi
|
||||
docker container logs tei-embedding-gaudi-server
|
||||
|
||||
@@ -55,8 +55,8 @@ services:
|
||||
timeout: 10s
|
||||
retries: 60
|
||||
embedding:
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
container_name: embedding-tei-server
|
||||
image: ${REGISTRY:-opea}/embedding:${TAG:-latest}
|
||||
container_name: embedding-server
|
||||
ports:
|
||||
- "6000:6000"
|
||||
ipc: host
|
||||
|
||||
@@ -11,12 +11,12 @@ services:
|
||||
context: ../
|
||||
dockerfile: ./Dockerfile
|
||||
image: ${REGISTRY:-opea}/doc-index-retriever:${TAG:-latest}
|
||||
embedding-tei:
|
||||
embedding:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/embeddings/src/Dockerfile
|
||||
extends: doc-index-retriever
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
image: ${REGISTRY:-opea}/embedding:${TAG:-latest}
|
||||
retriever-redis:
|
||||
build:
|
||||
context: GenAIComps
|
||||
|
||||
@@ -21,7 +21,7 @@ function build_docker_images() {
|
||||
echo "Cloning GenAIComps repository"
|
||||
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
|
||||
fi
|
||||
service_list="dataprep-redis embedding-tei retriever-redis reranking-tei doc-index-retriever"
|
||||
service_list="dataprep-redis embedding retriever-redis reranking-tei doc-index-retriever"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
@@ -98,7 +98,7 @@ function validate_megaservice() {
|
||||
echo "return value is $EXIT_CODE"
|
||||
if [ "$EXIT_CODE" == "1" ]; then
|
||||
echo "=============Embedding container log=================="
|
||||
docker logs embedding-tei-server | tee -a ${LOG_PATH}/doc-index-retriever-service-xeon.log
|
||||
docker logs embedding-server | tee -a ${LOG_PATH}/doc-index-retriever-service-xeon.log
|
||||
echo "=============Retriever container log=================="
|
||||
docker logs retriever-redis-server | tee -a ${LOG_PATH}/doc-index-retriever-service-xeon.log
|
||||
echo "=============TEI Reranking log=================="
|
||||
|
||||
@@ -21,7 +21,7 @@ function build_docker_images() {
|
||||
echo "Cloning GenAIComps repository"
|
||||
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
|
||||
fi
|
||||
service_list="dataprep-redis embedding-tei retriever-redis doc-index-retriever"
|
||||
service_list="dataprep-redis embedding retriever-redis doc-index-retriever"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
@@ -92,7 +92,7 @@ function validate_megaservice() {
|
||||
echo "return value is $EXIT_CODE"
|
||||
if [ "$EXIT_CODE" == "1" ]; then
|
||||
echo "=============Embedding container log=================="
|
||||
docker logs embedding-tei-server | tee -a ${LOG_PATH}/doc-index-retriever-service-xeon.log
|
||||
docker logs embedding-server | tee -a ${LOG_PATH}/doc-index-retriever-service-xeon.log
|
||||
echo "=============Retriever container log=================="
|
||||
docker logs retriever-redis-server | tee -a ${LOG_PATH}/doc-index-retriever-service-xeon.log
|
||||
echo "=============Doc-index-retriever container log=================="
|
||||
|
||||
@@ -100,12 +100,12 @@ In the below, we provide a table that describes for each microservice component
|
||||
|
||||
By default, the embedding and LVM models are set to a default value as listed below:
|
||||
|
||||
| Service | HW | Model |
|
||||
| ------------- | ----- | ----------------------------------------- |
|
||||
| embedding-tei | Xeon | BridgeTower/bridgetower-large-itm-mlm-itc |
|
||||
| LVM | Xeon | llava-hf/llava-1.5-7b-hf |
|
||||
| embedding-tei | Gaudi | BridgeTower/bridgetower-large-itm-mlm-itc |
|
||||
| LVM | Gaudi | llava-hf/llava-v1.6-vicuna-13b-hf |
|
||||
| Service | HW | Model |
|
||||
| --------- | ----- | ----------------------------------------- |
|
||||
| embedding | Xeon | BridgeTower/bridgetower-large-itm-mlm-itc |
|
||||
| LVM | Xeon | llava-hf/llava-1.5-7b-hf |
|
||||
| embedding | Gaudi | BridgeTower/bridgetower-large-itm-mlm-itc |
|
||||
| LVM | Gaudi | llava-hf/llava-v1.6-vicuna-13b-hf |
|
||||
|
||||
You can choose other LVM models, such as `llava-hf/llava-1.5-7b-hf ` and `llava-hf/llava-1.5-13b-hf`, as needed.
|
||||
|
||||
|
||||
@@ -28,10 +28,10 @@ cd GenAIComps
|
||||
docker build --no-cache -t opea/embedding-multimodal-bridgetower:latest --build-arg EMBEDDER_PORT=$EMBEDDER_PORT --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/integrations/dependency/bridgetower/Dockerfile .
|
||||
```
|
||||
|
||||
Build embedding-tei microservice image
|
||||
Build embedding microservice image
|
||||
|
||||
```bash
|
||||
docker build --no-cache -t opea/embedding-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/Dockerfile .
|
||||
docker build --no-cache -t opea/embedding:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/Dockerfile .
|
||||
```
|
||||
|
||||
### 2. Build LVM Images
|
||||
@@ -87,7 +87,7 @@ Then run the command `docker images`, you will have the following 8 Docker Image
|
||||
2. `ghcr.io/huggingface/text-generation-inference:2.4.1-rocm`
|
||||
3. `opea/lvm-tgi:latest`
|
||||
4. `opea/retriever-multimodal-redis:latest`
|
||||
5. `opea/embedding-tei:latest`
|
||||
5. `opea/embedding:latest`
|
||||
6. `opea/embedding-multimodal-bridgetower:latest`
|
||||
7. `opea/multimodalqna:latest`
|
||||
8. `opea/multimodalqna-ui:latest`
|
||||
@@ -98,11 +98,11 @@ Then run the command `docker images`, you will have the following 8 Docker Image
|
||||
|
||||
By default, the multimodal-embedding and LVM models are set to a default value as listed below:
|
||||
|
||||
| Service | Model |
|
||||
| ------------- | ------------------------------------------- |
|
||||
| embedding-tei | BridgeTower/bridgetower-large-itm-mlm-gaudi |
|
||||
| LVM | llava-hf/llava-1.5-7b-hf |
|
||||
| LVM | Xkev/Llama-3.2V-11B-cot |
|
||||
| Service | Model |
|
||||
| --------- | ------------------------------------------- |
|
||||
| embedding | BridgeTower/bridgetower-large-itm-mlm-gaudi |
|
||||
| LVM | llava-hf/llava-1.5-7b-hf |
|
||||
| LVM | Xkev/Llama-3.2V-11B-cot |
|
||||
|
||||
Note:
|
||||
|
||||
@@ -158,7 +158,7 @@ curl http://${host_ip}:${EMBEDDER_PORT}/v1/encode \
|
||||
-d '{"text":"This is example", "img_b64_str": "iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8/5+hnoEIwDiqkL4KAcT9GO0U4BxoAAAAAElFTkSuQmCC"}'
|
||||
```
|
||||
|
||||
2. embedding-tei
|
||||
2. embedding
|
||||
|
||||
```bash
|
||||
curl http://${host_ip}:$MM_EMBEDDING_PORT_MICROSERVICE/v1/embeddings \
|
||||
|
||||
@@ -55,9 +55,9 @@ services:
|
||||
start_period: 30s
|
||||
entrypoint: ["python", "bridgetower_server.py", "--device", "cpu", "--model_name_or_path", $EMBEDDING_MODEL_ID]
|
||||
restart: unless-stopped
|
||||
embedding-tei:
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
container_name: embedding-tei
|
||||
embedding:
|
||||
image: ${REGISTRY:-opea}/embedding:${TAG:-latest}
|
||||
container_name: embedding
|
||||
depends_on:
|
||||
embedding-multimodal-bridgetower:
|
||||
condition: service_healthy
|
||||
@@ -138,7 +138,7 @@ services:
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
- dataprep-multimodal-redis
|
||||
- embedding-tei
|
||||
- embedding
|
||||
- retriever-redis
|
||||
- lvm-tgi
|
||||
ports:
|
||||
|
||||
@@ -24,7 +24,7 @@ embedding-multimodal-bridgetower
|
||||
=====================
|
||||
Port 6006 - Open to 0.0.0.0/0
|
||||
|
||||
embedding-tei
|
||||
embedding
|
||||
=========
|
||||
Port 6000 - Open to 0.0.0.0/0
|
||||
|
||||
@@ -115,10 +115,10 @@ cd GenAIComps
|
||||
docker build --no-cache -t opea/embedding-multimodal-bridgetower:latest --build-arg EMBEDDER_PORT=$EMBEDDER_PORT --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/integrations/dependency/bridgetower/Dockerfile .
|
||||
```
|
||||
|
||||
Build embedding-tei microservice image
|
||||
Build embedding microservice image
|
||||
|
||||
```bash
|
||||
docker build --no-cache -t opea/embedding-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/Dockerfile .
|
||||
docker build --no-cache -t opea/embedding:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/Dockerfile .
|
||||
```
|
||||
|
||||
### 2. Build retriever-multimodal-redis Image
|
||||
@@ -184,7 +184,7 @@ Then run the command `docker images`, you will have the following 11 Docker Imag
|
||||
4. `opea/retriever-multimodal-redis:latest`
|
||||
5. `opea/whisper:latest`
|
||||
6. `opea/redis-vector-db`
|
||||
7. `opea/embedding-tei:latest`
|
||||
7. `opea/embedding:latest`
|
||||
8. `opea/embedding-multimodal-bridgetower:latest`
|
||||
9. `opea/multimodalqna:latest`
|
||||
10. `opea/multimodalqna-ui:latest`
|
||||
@@ -195,10 +195,10 @@ Then run the command `docker images`, you will have the following 11 Docker Imag
|
||||
|
||||
By default, the multimodal-embedding and LVM models are set to a default value as listed below:
|
||||
|
||||
| Service | Model |
|
||||
| ------------- | ------------------------------------------- |
|
||||
| embedding-tei | BridgeTower/bridgetower-large-itm-mlm-gaudi |
|
||||
| LVM | llava-hf/llava-1.5-7b-hf |
|
||||
| Service | Model |
|
||||
| --------- | ------------------------------------------- |
|
||||
| embedding | BridgeTower/bridgetower-large-itm-mlm-gaudi |
|
||||
| LVM | llava-hf/llava-1.5-7b-hf |
|
||||
|
||||
### Start all the services Docker Containers
|
||||
|
||||
@@ -227,7 +227,7 @@ curl http://${host_ip}:${EMBEDDER_PORT}/v1/encode \
|
||||
-d '{"text":"This is example", "img_b64_str": "iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8/5+hnoEIwDiqkL4KAcT9GO0U4BxoAAAAAElFTkSuQmCC"}'
|
||||
```
|
||||
|
||||
2. embedding-tei
|
||||
2. embedding
|
||||
|
||||
```bash
|
||||
curl http://${host_ip}:$MM_EMBEDDING_PORT_MICROSERVICE/v1/embeddings \
|
||||
|
||||
@@ -55,9 +55,9 @@ services:
|
||||
start_period: 30s
|
||||
entrypoint: ["python", "bridgetower_server.py", "--device", "cpu", "--model_name_or_path", $EMBEDDING_MODEL_ID]
|
||||
restart: unless-stopped
|
||||
embedding-tei:
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
container_name: embedding-tei
|
||||
embedding:
|
||||
image: ${REGISTRY:-opea}/embedding:${TAG:-latest}
|
||||
container_name: embedding
|
||||
depends_on:
|
||||
embedding-multimodal-bridgetower:
|
||||
condition: service_healthy
|
||||
@@ -120,7 +120,7 @@ services:
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
- dataprep-multimodal-redis
|
||||
- embedding-tei
|
||||
- embedding
|
||||
- retriever-redis
|
||||
- lvm-llava-svc
|
||||
ports:
|
||||
|
||||
@@ -66,10 +66,10 @@ cd GenAIComps
|
||||
docker build --no-cache -t opea/embedding-multimodal-bridgetower:latest --build-arg EMBEDDER_PORT=$EMBEDDER_PORT --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/integrations/dependency/bridgetower/Dockerfile .
|
||||
```
|
||||
|
||||
Build embedding-tei microservice image
|
||||
Build embedding microservice image
|
||||
|
||||
```bash
|
||||
docker build --no-cache -t opea/embedding-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/Dockerfile .
|
||||
docker build --no-cache -t opea/embedding:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/Dockerfile .
|
||||
```
|
||||
|
||||
### 2. Build retriever-multimodal-redis Image
|
||||
@@ -133,7 +133,7 @@ Then run the command `docker images`, you will have the following 11 Docker Imag
|
||||
4. `opea/retriever-multimodal-redis:latest`
|
||||
5. `opea/whisper:latest`
|
||||
6. `opea/redis-vector-db`
|
||||
7. `opea/embedding-tei:latest`
|
||||
7. `opea/embedding:latest`
|
||||
8. `opea/embedding-multimodal-bridgetower:latest`
|
||||
9. `opea/multimodalqna:latest`
|
||||
10. `opea/multimodalqna-ui:latest`
|
||||
@@ -144,10 +144,10 @@ Then run the command `docker images`, you will have the following 11 Docker Imag
|
||||
|
||||
By default, the multimodal-embedding and LVM models are set to a default value as listed below:
|
||||
|
||||
| Service | Model |
|
||||
| ------------- | ------------------------------------------- |
|
||||
| embedding-tei | BridgeTower/bridgetower-large-itm-mlm-gaudi |
|
||||
| LVM | llava-hf/llava-v1.6-vicuna-13b-hf |
|
||||
| Service | Model |
|
||||
| --------- | ------------------------------------------- |
|
||||
| embedding | BridgeTower/bridgetower-large-itm-mlm-gaudi |
|
||||
| LVM | llava-hf/llava-v1.6-vicuna-13b-hf |
|
||||
|
||||
### Start all the services Docker Containers
|
||||
|
||||
@@ -176,7 +176,7 @@ curl http://${host_ip}:${EMBEDDER_PORT}/v1/encode \
|
||||
-d '{"text":"This is example", "img_b64_str": "iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8/5+hnoEIwDiqkL4KAcT9GO0U4BxoAAAAAElFTkSuQmCC"}'
|
||||
```
|
||||
|
||||
2. embedding-tei
|
||||
2. embedding
|
||||
|
||||
```bash
|
||||
curl http://${host_ip}:$MM_EMBEDDING_PORT_MICROSERVICE/v1/embeddings \
|
||||
|
||||
@@ -55,9 +55,9 @@ services:
|
||||
start_period: 30s
|
||||
entrypoint: ["python", "bridgetower_server.py", "--device", "hpu", "--model_name_or_path", $EMBEDDING_MODEL_ID]
|
||||
restart: unless-stopped
|
||||
embedding-tei:
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
container_name: embedding-tei
|
||||
embedding:
|
||||
image: ${REGISTRY:-opea}/embedding:${TAG:-latest}
|
||||
container_name: embedding
|
||||
depends_on:
|
||||
embedding-multimodal-bridgetower:
|
||||
condition: service_healthy
|
||||
@@ -137,7 +137,7 @@ services:
|
||||
depends_on:
|
||||
- redis-vector-db
|
||||
- dataprep-multimodal-redis
|
||||
- embedding-tei
|
||||
- embedding
|
||||
- retriever-redis
|
||||
- lvm-tgi
|
||||
ports:
|
||||
|
||||
@@ -23,12 +23,12 @@ services:
|
||||
dockerfile: comps/embeddings/src/integrations/dependency/bridgetower/Dockerfile
|
||||
extends: multimodalqna
|
||||
image: ${REGISTRY:-opea}/embedding-multimodal-bridgetower:${TAG:-latest}
|
||||
embedding-tei:
|
||||
embedding:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/embeddings/src/Dockerfile
|
||||
extends: multimodalqna
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
image: ${REGISTRY:-opea}/embedding:${TAG:-latest}
|
||||
retriever-redis:
|
||||
build:
|
||||
context: GenAIComps
|
||||
|
||||
@@ -22,7 +22,7 @@ function build_docker_images() {
|
||||
cd $WORKPATH/docker_image_build
|
||||
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="multimodalqna multimodalqna-ui embedding-multimodal-bridgetower embedding-tei retriever-redis lvm-tgi dataprep-multimodal-redis whisper"
|
||||
service_list="multimodalqna multimodalqna-ui embedding-multimodal-bridgetower embedding retriever-redis lvm-tgi dataprep-multimodal-redis whisper"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.6
|
||||
@@ -144,19 +144,19 @@ function validate_microservices() {
|
||||
'{"text":"This is example", "img_b64_str": "iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8/5+hnoEIwDiqkL4KAcT9GO0U4BxoAAAAAElFTkSuQmCC"}'
|
||||
|
||||
# embedding microservice
|
||||
echo "Validating embedding-tei"
|
||||
echo "Validating embedding"
|
||||
validate_service \
|
||||
"http://${host_ip}:$MM_EMBEDDING_PORT_MICROSERVICE/v1/embeddings" \
|
||||
'"embedding":[' \
|
||||
"embedding-tei" \
|
||||
"embedding-tei" \
|
||||
"embedding" \
|
||||
"embedding" \
|
||||
'{"text" : "This is some sample text."}'
|
||||
|
||||
validate_service \
|
||||
"http://${host_ip}:$MM_EMBEDDING_PORT_MICROSERVICE/v1/embeddings" \
|
||||
'"embedding":[' \
|
||||
"embedding-tei" \
|
||||
"embedding-tei" \
|
||||
"embedding" \
|
||||
"embedding" \
|
||||
'{"text": {"text" : "This is some sample text."}, "image" : {"url": "https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true"}}'
|
||||
|
||||
sleep 1m # retrieval can't curl as expected, try to wait for more time
|
||||
|
||||
@@ -23,7 +23,7 @@ function build_docker_images() {
|
||||
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="multimodalqna multimodalqna-ui embedding-multimodal-bridgetower embedding-tei retriever-redis lvm-tgi lvm-llava-svc dataprep-multimodal-redis whisper"
|
||||
service_list="multimodalqna multimodalqna-ui embedding-multimodal-bridgetower embedding retriever-redis lvm-tgi lvm-llava-svc dataprep-multimodal-redis whisper"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker images && sleep 1m
|
||||
@@ -150,19 +150,19 @@ function validate_microservices() {
|
||||
'{"text":"This is example", "img_b64_str": "iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8/5+hnoEIwDiqkL4KAcT9GO0U4BxoAAAAAElFTkSuQmCC"}'
|
||||
|
||||
# embedding microservice
|
||||
echo "Validating embedding-tei"
|
||||
echo "Validating embedding"
|
||||
validate_service \
|
||||
"http://${host_ip}:$MM_EMBEDDING_PORT_MICROSERVICE/v1/embeddings" \
|
||||
'"embedding":[' \
|
||||
"embedding-tei" \
|
||||
"embedding-tei" \
|
||||
"embedding" \
|
||||
"embedding" \
|
||||
'{"text" : "This is some sample text."}'
|
||||
|
||||
validate_service \
|
||||
"http://${host_ip}:$MM_EMBEDDING_PORT_MICROSERVICE/v1/embeddings" \
|
||||
'"embedding":[' \
|
||||
"embedding-tei" \
|
||||
"embedding-tei" \
|
||||
"embedding" \
|
||||
"embedding" \
|
||||
'{"text": {"text" : "This is some sample text."}, "image" : {"url": "https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true"}}'
|
||||
|
||||
sleep 1m # retrieval can't curl as expected, try to wait for more time
|
||||
|
||||
@@ -22,7 +22,7 @@ function build_docker_images() {
|
||||
cd $WORKPATH/docker_image_build
|
||||
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="multimodalqna multimodalqna-ui embedding-multimodal-bridgetower embedding-tei retriever-redis lvm-llava lvm-llava-svc dataprep-multimodal-redis whisper"
|
||||
service_list="multimodalqna multimodalqna-ui embedding-multimodal-bridgetower embedding retriever-redis lvm-llava lvm-llava-svc dataprep-multimodal-redis whisper"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker images && sleep 1m
|
||||
@@ -142,19 +142,19 @@ function validate_microservices() {
|
||||
'{"text":"This is example", "img_b64_str": "iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8/5+hnoEIwDiqkL4KAcT9GO0U4BxoAAAAAElFTkSuQmCC"}'
|
||||
|
||||
# embedding microservice
|
||||
echo "Validating embedding-tei"
|
||||
echo "Validating embedding"
|
||||
validate_service \
|
||||
"http://${host_ip}:$MM_EMBEDDING_PORT_MICROSERVICE/v1/embeddings" \
|
||||
'"embedding":[' \
|
||||
"embedding-tei" \
|
||||
"embedding-tei" \
|
||||
"embedding" \
|
||||
"embedding" \
|
||||
'{"text" : "This is some sample text."}'
|
||||
|
||||
validate_service \
|
||||
"http://${host_ip}:$MM_EMBEDDING_PORT_MICROSERVICE/v1/embeddings" \
|
||||
'"embedding":[' \
|
||||
"embedding-tei" \
|
||||
"embedding-tei" \
|
||||
"embedding" \
|
||||
"embedding" \
|
||||
'{"text": {"text" : "This is some sample text."}, "image" : {"url": "https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true"}}'
|
||||
|
||||
sleep 1m # retrieval can't curl as expected, try to wait for more time
|
||||
|
||||
@@ -13,7 +13,7 @@ First of all, you need to build Docker Images locally and install the python pac
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build --no-cache -t opea/embedding-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/Dockerfile .
|
||||
docker build --no-cache -t opea/embedding:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/Dockerfile .
|
||||
```
|
||||
|
||||
### 2. Build Retriever Image
|
||||
|
||||
@@ -52,8 +52,8 @@ services:
|
||||
timeout: 10s
|
||||
retries: 60
|
||||
embedding:
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
container_name: embedding-tei-server
|
||||
image: ${REGISTRY:-opea}/embedding:${TAG:-latest}
|
||||
container_name: embedding-server
|
||||
depends_on:
|
||||
tei-embedding-service:
|
||||
condition: service_healthy
|
||||
|
||||
@@ -11,12 +11,12 @@ services:
|
||||
context: ../../ChatQnA/
|
||||
dockerfile: ./Dockerfile
|
||||
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}
|
||||
embedding-tei:
|
||||
embedding:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/embeddings/src/Dockerfile
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
image: ${REGISTRY:-opea}/embedding:${TAG:-latest}
|
||||
retriever-redis:
|
||||
build:
|
||||
context: GenAIComps
|
||||
|
||||
@@ -169,7 +169,7 @@ function validate_microservices() {
|
||||
"${ip_address}:6000/v1/embeddings" \
|
||||
'"embedding":[' \
|
||||
"embedding-microservice" \
|
||||
"embedding-tei-server" \
|
||||
"embedding-server" \
|
||||
'{"input":"What is Deep Learning?"}'
|
||||
|
||||
sleep 1m # retrieval can't curl as expected, try to wait for more time
|
||||
|
||||
@@ -9,7 +9,7 @@ This document outlines the deployment process for a SearchQnA application utiliz
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build --no-cache -t opea/embedding-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/Dockerfile .
|
||||
docker build --no-cache -t opea/embedding:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/Dockerfile .
|
||||
```
|
||||
|
||||
### 2. Build Retriever Image
|
||||
@@ -51,7 +51,7 @@ docker build --no-cache -t opea/opea/searchqna-ui:latest --build-arg https_proxy
|
||||
|
||||
Then run the command `docker images`, you will have following images ready:
|
||||
|
||||
1. `opea/embedding-tei:latest`
|
||||
1. `opea/embedding:latest`
|
||||
2. `opea/web-retriever-chroma:latest`
|
||||
3. `opea/reranking-tei:latest`
|
||||
4. `opea/llm-textgen:latest`
|
||||
|
||||
@@ -22,8 +22,8 @@ services:
|
||||
timeout: 10s
|
||||
retries: 60
|
||||
embedding:
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
container_name: embedding-tei-server
|
||||
image: ${REGISTRY:-opea}/embedding:${TAG:-latest}
|
||||
container_name: embedding-server
|
||||
depends_on:
|
||||
tei-embedding-service:
|
||||
condition: service_healthy
|
||||
|
||||
@@ -11,7 +11,7 @@ First of all, you need to build Docker Images locally. This step can be ignored
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build --no-cache -t opea/embedding-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/Dockerfile .
|
||||
docker build --no-cache -t opea/embedding:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/Dockerfile .
|
||||
```
|
||||
|
||||
### 2. Build Retriever Image
|
||||
@@ -51,7 +51,7 @@ docker build --no-cache -t opea/searchqna:latest --build-arg https_proxy=$https_
|
||||
|
||||
Then run the command `docker images`, you will have
|
||||
|
||||
1. `opea/embedding-tei:latest`
|
||||
1. `opea/embedding:latest`
|
||||
2. `opea/web-retriever-chroma:latest`
|
||||
3. `opea/reranking-tei:latest`
|
||||
4. `opea/llm-textgen:latest`
|
||||
|
||||
@@ -30,8 +30,8 @@ services:
|
||||
timeout: 10s
|
||||
retries: 60
|
||||
embedding:
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
container_name: embedding-tei-gaudi-server
|
||||
image: ${REGISTRY:-opea}/embedding:${TAG:-latest}
|
||||
container_name: embedding-gaudi-server
|
||||
depends_on:
|
||||
tei-embedding-service:
|
||||
condition: service_healthy
|
||||
|
||||
@@ -17,12 +17,12 @@ services:
|
||||
dockerfile: ./docker/Dockerfile
|
||||
extends: searchqna
|
||||
image: ${REGISTRY:-opea}/searchqna-ui:${TAG:-latest}
|
||||
embedding-tei:
|
||||
embedding:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/embeddings/src/Dockerfile
|
||||
extends: searchqna
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
image: ${REGISTRY:-opea}/embedding:${TAG:-latest}
|
||||
web-retriever-chroma:
|
||||
build:
|
||||
context: GenAIComps
|
||||
|
||||
@@ -19,7 +19,7 @@ function build_docker_images() {
|
||||
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="searchqna searchqna-ui embedding-tei web-retriever-chroma reranking-tei llm-textgen"
|
||||
service_list="searchqna searchqna-ui embedding web-retriever-chroma reranking-tei llm-textgen"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
@@ -75,7 +75,7 @@ function validate_megaservice() {
|
||||
docker logs web-retriever-chroma-server > ${LOG_PATH}/web-retriever-chroma-server.log
|
||||
docker logs searchqna-gaudi-backend-server > ${LOG_PATH}/searchqna-gaudi-backend-server.log
|
||||
docker logs tei-embedding-gaudi-server > ${LOG_PATH}/tei-embedding-gaudi-server.log
|
||||
docker logs embedding-tei-gaudi-server > ${LOG_PATH}/embedding-tei-gaudi-server.log
|
||||
docker logs embedding-gaudi-server > ${LOG_PATH}/embedding-gaudi-server.log
|
||||
|
||||
if [[ $result == *"capital"* ]]; then
|
||||
echo "Result correct."
|
||||
|
||||
@@ -19,7 +19,7 @@ function build_docker_images() {
|
||||
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="searchqna searchqna-ui embedding-tei web-retriever-chroma reranking-tei llm-textgen"
|
||||
service_list="searchqna searchqna-ui embedding web-retriever-chroma reranking-tei llm-textgen"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
A list of released OPEA docker images in https://hub.docker.com/, contains all relevant images from the GenAIExamples, GenAIComps and GenAIInfra projects. Please expect more public available images in the future release.
|
||||
|
||||
Take ChatQnA for example. ChatQnA is a chatbot application service based on the Retrieval Augmented Generation (RAG) architecture. It consists of [opea/embedding-tei](https://hub.docker.com/r/opea/embedding-tei), [opea/retriever-redis](https://hub.docker.com/r/opea/retriever-redis), [opea/reranking-tei](https://hub.docker.com/r/opea/reranking-tei), [opea/llm-textgen](https://hub.docker.com/r/opea/llm-textgen), [opea/dataprep-redis](https://hub.docker.com/r/opea/dataprep-redis), [opea/chatqna](https://hub.docker.com/r/opea/chatqna), [opea/chatqna-ui](https://hub.docker.com/r/opea/chatqna-ui) and [opea/chatqna-conversation-ui](https://hub.docker.com/r/opea/chatqna-conversation-ui) (Optional) multiple microservices. Other services are similar, see the corresponding README for details.
|
||||
Take ChatQnA for example. ChatQnA is a chatbot application service based on the Retrieval Augmented Generation (RAG) architecture. It consists of [opea/embedding](https://hub.docker.com/r/opea/embedding), [opea/retriever-redis](https://hub.docker.com/r/opea/retriever-redis), [opea/reranking-tei](https://hub.docker.com/r/opea/reranking-tei), [opea/llm-textgen](https://hub.docker.com/r/opea/llm-textgen), [opea/dataprep-redis](https://hub.docker.com/r/opea/dataprep-redis), [opea/chatqna](https://hub.docker.com/r/opea/chatqna), [opea/chatqna-ui](https://hub.docker.com/r/opea/chatqna-ui) and [opea/chatqna-conversation-ui](https://hub.docker.com/r/opea/chatqna-conversation-ui) (Optional) multiple microservices. Other services are similar, see the corresponding README for details.
|
||||
|
||||
## Example images
|
||||
|
||||
@@ -57,10 +57,9 @@ Take ChatQnA for example. ChatQnA is a chatbot application service based on the
|
||||
| [opea/dataprep-vdms](https://hub.docker.com/r/opea/dataprep-vdms) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/vdms/langchain/Dockerfile) | This docker image exposes an OPEA dataprep microservice based on VDMS vectordb for use by GenAI applications. |
|
||||
| [opea/embedding-langchain-mosec](https://hub.docker.com/r/opea/embedding-langchain-mosec) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/3rd_parties/nginx/src/Dockerfile) | The docker image exposed the OPEA mosec embedding microservice base on Langchain framework for GenAI application use |
|
||||
| [opea/embedding-multimodal-clip](https://hub.docker.com/r/opea/embedding-multimodal-clip) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/src/integrations/dependency/clip/Dockerfile) | The docker image exposes OPEA multimodal CLIP-based embedded microservices for use by GenAI applications |
|
||||
| [opea/embedding-tei](https://hub.docker.com/r/opea/embedding-tei) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/src/Dockerfile) | The docker image exposes OPEA multimodal embedded microservices for use by GenAI applications |
|
||||
| [opea/embedding](https://hub.docker.com/r/opea/embedding) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/src/Dockerfile) | The docker image exposes OPEA multimodal embedded microservices for use by GenAI applications |
|
||||
| [opea/embedding-multimodal-bridgetower](https://hub.docker.com/r/opea/embedding-multimodal-bridgetower) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/src/integrations/dependency/bridgetower/Dockerfile) | The docker image exposes OPEA multimodal embedded microservices based on bridgetower for use by GenAI applications |
|
||||
| [opea/embedding-multimodal-bridgetower-gaudi](https://hub.docker.com/r/opea/embedding-multimodal-bridgetower-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/src/integrations/dependency/bridgetower/Dockerfile.intel_hpu) | The docker image exposes OPEA multimodal embedded microservices based on bridgetower for use by GenAI applications on the Gaudi |
|
||||
| [opea/embedding-tei](https://hub.docker.com/r/opea/embedding-tei) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/src/Dockerfile) | The docker image exposed the OPEA embedding microservice upon tei docker image for GenAI application use |
|
||||
| [opea/feedbackmanagement](https://hub.docker.com/r/opea/feedbackmanagement) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/feedback_management/mongo/Dockerfile) | The docker image exposes that the OPEA feedback management microservice uses a MongoDB database for GenAI applications. |
|
||||
| [opea/finetuning](https://hub.docker.com/r/opea/finetuning) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/finetuning/Dockerfile) | The docker image exposed the OPEA Fine-tuning microservice for GenAI application use |
|
||||
| [opea/finetuning-gaudi](https://hub.docker.com/r/opea/finetuning-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/finetuning/Dockerfile.intel_hpu) | The docker image exposed the OPEA Fine-tuning microservice for GenAI application use on the Gaudi |
|
||||
|
||||
Reference in New Issue
Block a user