Refactor lvm related examples (#1333)
This commit is contained in:
@@ -41,7 +41,7 @@ First of all, you need to build Docker Images locally and install the python pac
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build --no-cache -t opea/lvm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/tgi-llava/Dockerfile .
|
||||
docker build --no-cache -t opea/lvm:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/src/Dockerfile .
|
||||
docker build --no-cache -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/third_parties/nginx/src/Dockerfile .
|
||||
```
|
||||
|
||||
@@ -73,7 +73,7 @@ docker pull ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu
|
||||
Then run the command `docker images`, you will have the following 5 Docker Images:
|
||||
|
||||
1. `ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu`
|
||||
2. `opea/lvm-tgi:latest`
|
||||
2. `opea/lvm:latest`
|
||||
3. `opea/visualqna:latest`
|
||||
4. `opea/visualqna-ui:latest`
|
||||
5. `opea/nginx`
|
||||
|
||||
@@ -23,9 +23,9 @@ services:
|
||||
timeout: 10s
|
||||
retries: 60
|
||||
command: --model-id ${LVM_MODEL_ID} --max-input-length 4096 --max-total-tokens 8192 --cuda-graphs 0
|
||||
lvm-tgi:
|
||||
image: ${REGISTRY:-opea}/lvm-tgi:${TAG:-latest}
|
||||
container_name: lvm-tgi-xeon-server
|
||||
lvm:
|
||||
image: ${REGISTRY:-opea}/lvm:${TAG:-latest}
|
||||
container_name: lvm-xeon-server
|
||||
depends_on:
|
||||
llava-tgi-service:
|
||||
condition: service_healthy
|
||||
@@ -37,6 +37,7 @@ services:
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
LVM_ENDPOINT: ${LVM_ENDPOINT}
|
||||
LVM_COMPONENT_NAME: "OPEA_TGI_LLAVA_LVM"
|
||||
HF_HUB_DISABLE_PROGRESS_BARS: 1
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
restart: unless-stopped
|
||||
@@ -45,7 +46,7 @@ services:
|
||||
container_name: visualqna-xeon-backend-server
|
||||
depends_on:
|
||||
- llava-tgi-service
|
||||
- lvm-tgi
|
||||
- lvm
|
||||
ports:
|
||||
- "8888:8888"
|
||||
environment:
|
||||
|
||||
@@ -11,7 +11,7 @@ First of all, you need to build Docker Images locally. This step can be ignored
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build --no-cache -t opea/lvm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/tgi-llava/Dockerfile .
|
||||
docker build --no-cache -t opea/lvm:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/src/Dockerfile .
|
||||
docker build --no-cache -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/third_parties/nginx/src/Dockerfile .
|
||||
```
|
||||
|
||||
@@ -44,7 +44,7 @@ docker build --no-cache -t opea/visualqna-ui:latest --build-arg https_proxy=$htt
|
||||
Then run the command `docker images`, you will have the following 5 Docker Images:
|
||||
|
||||
1. `ghcr.io/huggingface/tgi-gaudi:2.0.6`
|
||||
2. `opea/lvm-tgi:latest`
|
||||
2. `opea/lvm:latest`
|
||||
3. `opea/visualqna:latest`
|
||||
4. `opea/visualqna-ui:latest`
|
||||
5. `opea/nginx`
|
||||
|
||||
@@ -27,9 +27,9 @@ services:
|
||||
- SYS_NICE
|
||||
ipc: host
|
||||
command: --model-id ${LVM_MODEL_ID} --max-input-length 4096 --max-total-tokens 8192
|
||||
lvm-tgi:
|
||||
image: ${REGISTRY:-opea}/lvm-tgi:${TAG:-latest}
|
||||
container_name: lvm-tgi-gaudi-server
|
||||
lvm:
|
||||
image: ${REGISTRY:-opea}/lvm:${TAG:-latest}
|
||||
container_name: lvm-gaudi-server
|
||||
depends_on:
|
||||
- llava-tgi-service
|
||||
ports:
|
||||
@@ -40,6 +40,7 @@ services:
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
LVM_ENDPOINT: ${LVM_ENDPOINT}
|
||||
LVM_COMPONENT_NAME: "OPEA_TGI_LLAVA_LVM"
|
||||
HF_HUB_DISABLE_PROGRESS_BARS: 1
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
restart: unless-stopped
|
||||
@@ -48,7 +49,7 @@ services:
|
||||
container_name: visualqna-gaudi-backend-server
|
||||
depends_on:
|
||||
- llava-tgi-service
|
||||
- lvm-tgi
|
||||
- lvm
|
||||
ports:
|
||||
- "8888:8888"
|
||||
environment:
|
||||
|
||||
Reference in New Issue
Block a user