Compare commits
10 Commits
update_vLL
...
genaicomps
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
50c5af5612 | ||
|
|
40be38f68b | ||
|
|
fb51d9f2ed | ||
|
|
be4e9ad000 | ||
|
|
fe90ca172f | ||
|
|
3146d5d69d | ||
|
|
744f7c9519 | ||
|
|
bac73f4e1a | ||
|
|
1a80dcf4d1 | ||
|
|
5d302d7501 |
1
.github/workflows/_run-docker-compose.yml
vendored
1
.github/workflows/_run-docker-compose.yml
vendored
@@ -134,6 +134,7 @@ jobs:
|
||||
SERVING_TOKEN: ${{ secrets.SERVING_TOKEN }}
|
||||
IMAGE_REPO: ${{ inputs.registry }}
|
||||
IMAGE_TAG: ${{ inputs.tag }}
|
||||
opea_branch: "refactor_comps"
|
||||
example: ${{ inputs.example }}
|
||||
hardware: ${{ inputs.hardware }}
|
||||
test_case: ${{ matrix.test_case }}
|
||||
|
||||
4
.github/workflows/pr-docker-compose-e2e.yml
vendored
4
.github/workflows/pr-docker-compose-e2e.yml
vendored
@@ -4,8 +4,8 @@
|
||||
name: E2E test with docker compose
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
branches: ["main", "*rc"]
|
||||
pull_request:
|
||||
branches: ["main", "*rc", "genaicomps_refactor"]
|
||||
types: [opened, reopened, ready_for_review, synchronize] # added `ready_for_review` since draft is skipped
|
||||
paths:
|
||||
- "**/Dockerfile**"
|
||||
|
||||
@@ -22,6 +22,7 @@ jobs:
|
||||
run: |
|
||||
cd ..
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
git checkout refactor_comps
|
||||
|
||||
- name: Check for Missing Dockerfile Paths in GenAIComps
|
||||
run: |
|
||||
|
||||
@@ -18,10 +18,20 @@ TTS_SERVICE_HOST_IP = os.getenv("TTS_SERVICE_HOST_IP", "0.0.0.0")
|
||||
TTS_SERVICE_PORT = int(os.getenv("TTS_SERVICE_PORT", 9088))
|
||||
|
||||
|
||||
def align_inputs(self, inputs, cur_node, runtime_graph, llm_parameters_dict, **kwargs):
|
||||
if self.services[cur_node].service_type == ServiceType.TTS:
|
||||
new_inputs = {}
|
||||
new_inputs["text"] = inputs["choices"][0]["text"]
|
||||
return new_inputs
|
||||
else:
|
||||
return inputs
|
||||
|
||||
|
||||
class AudioQnAService:
|
||||
def __init__(self, host="0.0.0.0", port=8000):
|
||||
self.host = host
|
||||
self.port = port
|
||||
ServiceOrchestrator.align_inputs = align_inputs
|
||||
self.megaservice = ServiceOrchestrator()
|
||||
self.endpoint = str(MegaServiceEndpoint.AUDIO_QNA)
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ docker build -t opea/asr:latest --build-arg https_proxy=$https_proxy --build-arg
|
||||
### 3. Build LLM Image
|
||||
|
||||
```bash
|
||||
docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile .
|
||||
```
|
||||
|
||||
Note:
|
||||
|
||||
@@ -59,6 +59,12 @@ services:
|
||||
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
HF_HUB_DISABLE_PROGRESS_BARS: 1
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
host_ip: ${host_ip}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://$host_ip:3006/health || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 100
|
||||
command: --model-id ${LLM_MODEL_ID}
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
@@ -71,7 +77,8 @@ services:
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
container_name: llm-tgi-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
tgi-service:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "3007:9000"
|
||||
ipc: host
|
||||
@@ -79,7 +86,8 @@ services:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
|
||||
LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
|
||||
LLM_MODEL_ID: ${LLM_MODEL_ID}
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
audioqna-backend-server:
|
||||
|
||||
@@ -23,7 +23,7 @@ docker build -t opea/asr:latest --build-arg https_proxy=$https_proxy --build-arg
|
||||
### 3. Build LLM Image
|
||||
|
||||
```bash
|
||||
docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile .
|
||||
```
|
||||
|
||||
### 4. Build TTS Image
|
||||
|
||||
@@ -53,12 +53,19 @@ services:
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
host_ip: ${host_ip}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://$host_ip:3006/health || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 100
|
||||
command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0
|
||||
llm:
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
container_name: llm-tgi-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
tgi-service:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "3007:9000"
|
||||
ipc: host
|
||||
@@ -66,7 +73,8 @@ services:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
|
||||
LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
|
||||
LLM_MODEL_ID: ${LLM_MODEL_ID}
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
audioqna-xeon-backend-server:
|
||||
|
||||
@@ -23,7 +23,7 @@ docker build -t opea/asr:latest --build-arg https_proxy=$https_proxy --build-arg
|
||||
### 3. Build LLM Image
|
||||
|
||||
```bash
|
||||
docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile .
|
||||
```
|
||||
|
||||
### 4. Build TTS Image
|
||||
|
||||
@@ -74,12 +74,18 @@ services:
|
||||
cap_add:
|
||||
- SYS_NICE
|
||||
ipc: host
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "sleep 500 && exit 0"]
|
||||
interval: 1s
|
||||
timeout: 505s
|
||||
retries: 1
|
||||
command: --model-id ${LLM_MODEL_ID} --max-input-length 1024 --max-total-tokens 2048
|
||||
llm:
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
container_name: llm-tgi-gaudi-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
tgi-service:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "3007:9000"
|
||||
ipc: host
|
||||
@@ -87,7 +93,8 @@ services:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
|
||||
LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
|
||||
LLM_MODEL_ID: ${LLM_MODEL_ID}
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
audioqna-gaudi-backend-server:
|
||||
|
||||
@@ -44,7 +44,7 @@ services:
|
||||
llm-tgi:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/text-generation/tgi/Dockerfile
|
||||
dockerfile: comps/llms/src/text-generation/Dockerfile
|
||||
extends: audioqna
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
speecht5-gaudi:
|
||||
|
||||
@@ -40,6 +40,7 @@ function start_services() {
|
||||
export ASR_SERVICE_HOST_IP=${ip_address}
|
||||
export TTS_SERVICE_HOST_IP=${ip_address}
|
||||
export LLM_SERVICE_HOST_IP=${ip_address}
|
||||
export host_ip=${ip_address}
|
||||
|
||||
export ASR_SERVICE_PORT=3001
|
||||
export TTS_SERVICE_PORT=3002
|
||||
@@ -49,25 +50,7 @@ function start_services() {
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
n=0
|
||||
until [[ "$n" -ge 100 ]]; do
|
||||
docker logs tgi-gaudi-server > $LOG_PATH/tgi_service_start.log
|
||||
if grep -q Connected $LOG_PATH/tgi_service_start.log; then
|
||||
break
|
||||
fi
|
||||
sleep 5s
|
||||
n=$((n+1))
|
||||
done
|
||||
|
||||
n=0
|
||||
until [[ "$n" -ge 100 ]]; do
|
||||
docker logs whisper-service > $LOG_PATH/whisper_service_start.log
|
||||
if grep -q "Uvicorn server setup on port" $LOG_PATH/whisper_service_start.log; then
|
||||
break
|
||||
fi
|
||||
sleep 5s
|
||||
n=$((n+1))
|
||||
done
|
||||
sleep 20s
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Advanced Micro Devices, Inc.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -ex
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
@@ -40,6 +40,7 @@ function start_services() {
|
||||
export ASR_SERVICE_HOST_IP=${ip_address}
|
||||
export TTS_SERVICE_HOST_IP=${ip_address}
|
||||
export LLM_SERVICE_HOST_IP=${ip_address}
|
||||
export host_ip=${ip_address}
|
||||
|
||||
export ASR_SERVICE_PORT=3001
|
||||
export TTS_SERVICE_PORT=3002
|
||||
@@ -49,15 +50,7 @@ function start_services() {
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
n=0
|
||||
until [[ "$n" -ge 100 ]]; do
|
||||
docker logs tgi-service > $LOG_PATH/tgi_service_start.log
|
||||
if grep -q Connected $LOG_PATH/tgi_service_start.log; then
|
||||
break
|
||||
fi
|
||||
sleep 5s
|
||||
n=$((n+1))
|
||||
done
|
||||
sleep 20s
|
||||
}
|
||||
function validate_megaservice() {
|
||||
result=$(http_proxy="" curl http://${ip_address}:3008/v1/audioqna -XPOST -d '{"audio": "UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA", "max_tokens":64}' -H 'Content-Type: application/json')
|
||||
|
||||
@@ -39,6 +39,7 @@ function start_services() {
|
||||
export ASR_SERVICE_HOST_IP=${ip_address}
|
||||
export TTS_SERVICE_HOST_IP=${ip_address}
|
||||
export LLM_SERVICE_HOST_IP=${ip_address}
|
||||
export host_ip=${ip_address}
|
||||
|
||||
export ASR_SERVICE_PORT=3001
|
||||
export TTS_SERVICE_PORT=3002
|
||||
@@ -48,15 +49,7 @@ function start_services() {
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
n=0
|
||||
until [[ "$n" -ge 100 ]]; do
|
||||
docker logs tgi-service > $LOG_PATH/tgi_service_start.log
|
||||
if grep -q Connected $LOG_PATH/tgi_service_start.log; then
|
||||
break
|
||||
fi
|
||||
sleep 5s
|
||||
n=$((n+1))
|
||||
done
|
||||
sleep 20s
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ docker build -t opea/asr:latest --build-arg https_proxy=$https_proxy --build-arg
|
||||
### 3. Build LLM Image
|
||||
|
||||
```bash
|
||||
docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile .
|
||||
```
|
||||
|
||||
### 4. Build TTS Image
|
||||
|
||||
@@ -23,7 +23,7 @@ docker build -t opea/asr:latest --build-arg https_proxy=$https_proxy --build-arg
|
||||
### 3. Build LLM Image
|
||||
|
||||
```bash
|
||||
docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile .
|
||||
```
|
||||
|
||||
### 4. Build TTS Image
|
||||
|
||||
@@ -32,7 +32,7 @@ services:
|
||||
llm-tgi:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/text-generation/tgi/Dockerfile
|
||||
dockerfile: comps/llms/src/text-generation/Dockerfile
|
||||
extends: avatarchatbot
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
speecht5-gaudi:
|
||||
|
||||
@@ -138,7 +138,7 @@ cd ../../../..
|
||||
|
||||
```bash
|
||||
cd GenAIComps
|
||||
docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile .
|
||||
docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/3rd_parties/nginx/deployment/docker/Dockerfile .
|
||||
```
|
||||
|
||||
Then run the command `docker images`, you will have the following 5 Docker Images:
|
||||
|
||||
@@ -55,7 +55,7 @@ docker build --no-cache -t opea/chatqna-ui:latest --build-arg https_proxy=$https
|
||||
|
||||
```bash
|
||||
cd GenAIComps
|
||||
docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile .
|
||||
docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/3rd_parties/nginx/deployment/docker/Dockerfile .
|
||||
```
|
||||
|
||||
Then run the command `docker images`, you will have the following 6 Docker Images:
|
||||
|
||||
@@ -161,7 +161,7 @@ docker build --no-cache -t opea/chatqna-conversation-ui:latest --build-arg https
|
||||
|
||||
```bash
|
||||
cd GenAIComps
|
||||
docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile .
|
||||
docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/3rd_parties/nginx/deployment/docker/Dockerfile .
|
||||
```
|
||||
|
||||
Then run the command `docker images`, you will have the following 5 Docker Images:
|
||||
|
||||
@@ -164,7 +164,7 @@ docker build --no-cache -t opea/chatqna-conversation-ui:latest --build-arg https
|
||||
|
||||
```bash
|
||||
cd GenAIComps
|
||||
docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile .
|
||||
docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/3rd_parties/nginx/deployment/docker/Dockerfile .
|
||||
```
|
||||
|
||||
Then run the command `docker images`, you will have the following 5 Docker Images:
|
||||
|
||||
@@ -122,7 +122,7 @@ cd ../../../..
|
||||
|
||||
```bash
|
||||
cd GenAIComps
|
||||
docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile .
|
||||
docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/3rd_parties/nginx/deployment/docker/Dockerfile .
|
||||
```
|
||||
|
||||
Then run the command `docker images`, you will have the following 5 Docker Images:
|
||||
|
||||
@@ -151,7 +151,7 @@ docker build --no-cache -t opea/chatqna-conversation-ui:latest --build-arg https
|
||||
|
||||
```bash
|
||||
cd GenAIComps
|
||||
docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile .
|
||||
docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/3rd_parties/nginx/deployment/docker/Dockerfile .
|
||||
```
|
||||
|
||||
Then run the command `docker images`, you will have the following 5 Docker Images:
|
||||
|
||||
@@ -148,7 +148,7 @@ cd ../../..
|
||||
|
||||
```bash
|
||||
cd GenAIComps
|
||||
docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile .
|
||||
docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/3rd_parties/nginx/deployment/docker/Dockerfile .
|
||||
```
|
||||
|
||||
Then run the command `docker images`, you will have the following 5 Docker Images:
|
||||
|
||||
@@ -44,7 +44,7 @@ services:
|
||||
embedding-tei:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/embeddings/tei/langchain/Dockerfile
|
||||
dockerfile: comps/embeddings/src/Dockerfile
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
retriever-redis:
|
||||
@@ -68,25 +68,25 @@ services:
|
||||
reranking-tei:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/reranks/tei/Dockerfile
|
||||
dockerfile: comps/reranks/src/Dockerfile
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/reranking-tei:${TAG:-latest}
|
||||
llm-tgi:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/text-generation/tgi/Dockerfile
|
||||
dockerfile: comps/llms/src/text-generation/Dockerfile
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
llm-ollama:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/text-generation/ollama/langchain/Dockerfile
|
||||
dockerfile: comps/llms/src/text-generation/Dockerfile
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/llm-ollama:${TAG:-latest}
|
||||
llm-vllm:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/text-generation/vllm/langchain/Dockerfile
|
||||
dockerfile: comps/llms/src/text-generation/Dockerfile
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/llm-vllm:${TAG:-latest}
|
||||
dataprep-redis:
|
||||
@@ -128,6 +128,6 @@ services:
|
||||
nginx:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/nginx/Dockerfile
|
||||
dockerfile: comps/3rd_parties/nginx/src/Dockerfile
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Advanced Micro Devices, Inc.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -xe
|
||||
set -e
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
set -xe
|
||||
IMAGE_REPO=${IMAGE_REPO:-"opea"}
|
||||
IMAGE_TAG=${IMAGE_TAG:-"latest"}
|
||||
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
|
||||
|
||||
@@ -10,7 +10,7 @@ git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
|
||||
### Build Docker image
|
||||
docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile .
|
||||
```
|
||||
|
||||
### Build the MegaService Docker Image
|
||||
|
||||
@@ -19,7 +19,7 @@ Should the Docker image you seek not yet be available on Docker Hub, you can bui
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile .
|
||||
```
|
||||
|
||||
### 2. Build the MegaService Docker Image
|
||||
|
||||
@@ -11,7 +11,7 @@ First of all, you need to build the Docker images locally. This step can be igno
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile .
|
||||
```
|
||||
|
||||
### 2. Build the MegaService Docker Image
|
||||
|
||||
@@ -26,6 +26,6 @@ services:
|
||||
llm-tgi:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/text-generation/tgi/Dockerfile
|
||||
dockerfile: comps/llms/src/text-generation/Dockerfile
|
||||
extends: codegen
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
|
||||
@@ -10,7 +10,7 @@ git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
|
||||
### Build Docker image
|
||||
docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile .
|
||||
```
|
||||
|
||||
### Build the MegaService Docker Image
|
||||
|
||||
@@ -19,7 +19,7 @@ First of all, you need to build Docker Images locally and install the python pac
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile .
|
||||
```
|
||||
|
||||
### 2. Build MegaService Docker Image
|
||||
@@ -41,7 +41,7 @@ docker build -t opea/codetrans-ui:latest --build-arg https_proxy=$https_proxy --
|
||||
|
||||
```bash
|
||||
cd GenAIComps
|
||||
docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile .
|
||||
docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/3rd_parties/nginx/deployment/docker/Dockerfile .
|
||||
```
|
||||
|
||||
Then run the command `docker images`, you will have the following Docker Images:
|
||||
|
||||
@@ -11,7 +11,7 @@ First of all, you need to build Docker Images locally and install the python pac
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build -t opea/llm-tgi:latest --no-cache --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
docker build -t opea/llm-tgi:latest --no-cache --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile .
|
||||
```
|
||||
|
||||
### 2. Build MegaService Docker Image
|
||||
@@ -33,7 +33,7 @@ docker build -t opea/codetrans-ui:latest --build-arg https_proxy=$https_proxy --
|
||||
|
||||
```bash
|
||||
cd GenAIComps
|
||||
docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile .
|
||||
docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/3rd_parties/nginx/deployment/docker/Dockerfile .
|
||||
```
|
||||
|
||||
Then run the command `docker images`, you will have the following Docker Images:
|
||||
|
||||
@@ -20,12 +20,12 @@ services:
|
||||
llm-tgi:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/text-generation/tgi/Dockerfile
|
||||
dockerfile: comps/llms/src/text-generation/Dockerfile
|
||||
extends: codetrans
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
nginx:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/nginx/Dockerfile
|
||||
dockerfile: comps/3rd_parties/nginx/deployment/docker/Dockerfile
|
||||
extends: codetrans
|
||||
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
|
||||
|
||||
@@ -9,7 +9,7 @@ DocRetriever are the most widely adopted use case for leveraging the different m
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build -t opea/embedding-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/tei/langchain/Dockerfile .
|
||||
docker build -t opea/embedding-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/Dockerfile .
|
||||
```
|
||||
|
||||
- Retriever Vector store Image
|
||||
@@ -21,7 +21,7 @@ DocRetriever are the most widely adopted use case for leveraging the different m
|
||||
- Rerank TEI Image
|
||||
|
||||
```bash
|
||||
docker build -t opea/reranking-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/reranks/tei/Dockerfile .
|
||||
docker build -t opea/reranking-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/reranks/src/Dockerfile .
|
||||
```
|
||||
|
||||
- Dataprep Image
|
||||
|
||||
@@ -9,7 +9,7 @@ DocRetriever are the most widely adopted use case for leveraging the different m
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build -t opea/embedding-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/tei/langchain/Dockerfile .
|
||||
docker build -t opea/embedding-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/Dockerfile .
|
||||
```
|
||||
|
||||
- Retriever Vector store Image
|
||||
@@ -21,7 +21,7 @@ DocRetriever are the most widely adopted use case for leveraging the different m
|
||||
- Rerank TEI Image
|
||||
|
||||
```bash
|
||||
docker build -t opea/reranking-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/reranks/tei/Dockerfile .
|
||||
docker build -t opea/reranking-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/reranks/src/Dockerfile .
|
||||
```
|
||||
|
||||
- Dataprep Image
|
||||
|
||||
@@ -14,7 +14,7 @@ services:
|
||||
embedding-tei:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/embeddings/tei/langchain/Dockerfile
|
||||
dockerfile: comps/embeddings/src/Dockerfile
|
||||
extends: doc-index-retriever
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
retriever-redis:
|
||||
@@ -26,7 +26,7 @@ services:
|
||||
reranking-tei:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/reranks/tei/Dockerfile
|
||||
dockerfile: comps/reranks/src/Dockerfile
|
||||
extends: doc-index-retriever
|
||||
image: ${REGISTRY:-opea}/reranking-tei:${TAG:-latest}
|
||||
dataprep-redis:
|
||||
|
||||
@@ -10,7 +10,7 @@ git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
|
||||
### Build Docker image
|
||||
docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile .
|
||||
```
|
||||
|
||||
## 🚀 Start Microservices and MegaService
|
||||
|
||||
@@ -36,7 +36,7 @@ services:
|
||||
https_proxy: ${https_proxy}
|
||||
no_proxy: ${no_proxy}
|
||||
context: GenAIComps
|
||||
dockerfile: comps/nginx/Dockerfile
|
||||
dockerfile: comps/3rd_parties/nginx/deployment/docker/Dockerfile
|
||||
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
|
||||
graphrag-ui:
|
||||
build:
|
||||
|
||||
@@ -25,13 +25,13 @@ Build embedding-multimodal-bridgetower docker image
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build --no-cache -t opea/embedding-multimodal-bridgetower:latest --build-arg EMBEDDER_PORT=$EMBEDDER_PORT --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/multimodal/bridgetower/Dockerfile .
|
||||
docker build --no-cache -t opea/embedding-multimodal-bridgetower:latest --build-arg EMBEDDER_PORT=$EMBEDDER_PORT --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/integrations/dependency/bridgetower/Dockerfile .
|
||||
```
|
||||
|
||||
Build embedding-multimodal microservice image
|
||||
|
||||
```bash
|
||||
docker build --no-cache -t opea/embedding-multimodal:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/multimodal/multimodal_langchain/Dockerfile .
|
||||
docker build --no-cache -t opea/embedding-multimodal:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/Dockerfile .
|
||||
```
|
||||
|
||||
### 2. Build LVM Images
|
||||
|
||||
@@ -112,13 +112,13 @@ Build embedding-multimodal-bridgetower docker image
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build --no-cache -t opea/embedding-multimodal-bridgetower:latest --build-arg EMBEDDER_PORT=$EMBEDDER_PORT --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/multimodal/bridgetower/Dockerfile .
|
||||
docker build --no-cache -t opea/embedding-multimodal-bridgetower:latest --build-arg EMBEDDER_PORT=$EMBEDDER_PORT --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/native/multimodal/bridgetower/dependency/Dockerfile .
|
||||
```
|
||||
|
||||
Build embedding-multimodal microservice image
|
||||
|
||||
```bash
|
||||
docker build --no-cache -t opea/embedding-multimodal:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/multimodal/multimodal_langchain/Dockerfile .
|
||||
docker build --no-cache -t opea/embedding-multimodal:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/native/multimodal/bridgetower/wrapper/Dockerfile .
|
||||
```
|
||||
|
||||
### 2. Build retriever-multimodal-redis Image
|
||||
|
||||
@@ -63,13 +63,13 @@ Build embedding-multimodal-bridgetower docker image
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build --no-cache -t opea/embedding-multimodal-bridgetower:latest --build-arg EMBEDDER_PORT=$EMBEDDER_PORT --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/multimodal/bridgetower/Dockerfile .
|
||||
docker build --no-cache -t opea/embedding-multimodal-bridgetower:latest --build-arg EMBEDDER_PORT=$EMBEDDER_PORT --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/native/multimodal/bridgetower/dependency/Dockerfile .
|
||||
```
|
||||
|
||||
Build embedding-multimodal microservice image
|
||||
|
||||
```bash
|
||||
docker build --no-cache -t opea/embedding-multimodal:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/multimodal/multimodal_langchain/Dockerfile .
|
||||
docker build --no-cache -t opea/embedding-multimodal:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/native/multimodal/bridgetower/wrapper/Dockerfile .
|
||||
```
|
||||
|
||||
### 2. Build retriever-multimodal-redis Image
|
||||
|
||||
@@ -20,13 +20,13 @@ services:
|
||||
embedding-multimodal-bridgetower:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/embeddings/multimodal/bridgetower/Dockerfile
|
||||
dockerfile: comps/embeddings/native/multimodal/bridgetower/dependency/Dockerfile
|
||||
extends: multimodalqna
|
||||
image: ${REGISTRY:-opea}/embedding-multimodal-bridgetower:${TAG:-latest}
|
||||
embedding-multimodal:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/embeddings/multimodal/multimodal_langchain/Dockerfile
|
||||
dockerfile: comps/embeddings/native/multimodal/bridgetower/wrapper/Dockerfile
|
||||
extends: multimodalqna
|
||||
image: ${REGISTRY:-opea}/embedding-multimodal:${TAG:-latest}
|
||||
retriever-redis:
|
||||
|
||||
@@ -13,7 +13,7 @@ First of all, you need to build Docker Images locally and install the python pac
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build --no-cache -t opea/embedding-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/tei/langchain/Dockerfile .
|
||||
docker build --no-cache -t opea/embedding-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/Dockerfile .
|
||||
```
|
||||
|
||||
### 2. Build Retriever Image
|
||||
@@ -25,7 +25,7 @@ docker build --no-cache -t opea/retriever-redis:latest --build-arg https_proxy=$
|
||||
### 3. Build Rerank Image
|
||||
|
||||
```bash
|
||||
docker build --no-cache -t opea/reranking-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/reranks/tei/Dockerfile .
|
||||
docker build --no-cache -t opea/reranking-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/reranks/src/Dockerfile .
|
||||
```
|
||||
|
||||
### 4. Build LLM Image
|
||||
@@ -33,7 +33,7 @@ docker build --no-cache -t opea/reranking-tei:latest --build-arg https_proxy=$ht
|
||||
#### Use TGI as backend
|
||||
|
||||
```bash
|
||||
docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile .
|
||||
```
|
||||
|
||||
### 5. Build Dataprep Image
|
||||
|
||||
@@ -14,7 +14,7 @@ services:
|
||||
embedding-tei:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/embeddings/tei/langchain/Dockerfile
|
||||
dockerfile: comps/embeddings/src/Dockerfile
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
retriever-redis:
|
||||
@@ -26,13 +26,13 @@ services:
|
||||
reranking-tei:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/reranks/tei/Dockerfile
|
||||
dockerfile: comps/reranks/src/Dockerfile
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/reranking-tei:${TAG:-latest}
|
||||
llm-tgi:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/text-generation/tgi/Dockerfile
|
||||
dockerfile: comps/llms/src/text-generation/Dockerfile
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
dataprep-redis:
|
||||
|
||||
@@ -9,7 +9,7 @@ This document outlines the deployment process for a SearchQnA application utiliz
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build --no-cache -t opea/embedding-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/tei/langchain/Dockerfile .
|
||||
docker build --no-cache -t opea/embedding-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/Dockerfile .
|
||||
```
|
||||
|
||||
### 2. Build Retriever Image
|
||||
@@ -21,13 +21,13 @@ docker build --no-cache -t opea/web-retriever-chroma:latest --build-arg https_pr
|
||||
### 3. Build Rerank Image
|
||||
|
||||
```bash
|
||||
docker build --no-cache -t opea/reranking-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/reranks/tei/Dockerfile .
|
||||
docker build --no-cache -t opea/reranking-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/reranks/src/Dockerfile .
|
||||
```
|
||||
|
||||
### 4. Build LLM Image
|
||||
|
||||
```bash
|
||||
docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile .
|
||||
```
|
||||
|
||||
### 5. Build MegaService Docker Image
|
||||
|
||||
@@ -11,7 +11,7 @@ First of all, you need to build Docker Images locally. This step can be ignored
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build --no-cache -t opea/embedding-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/tei/langchain/Dockerfile .
|
||||
docker build --no-cache -t opea/embedding-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/src/Dockerfile .
|
||||
```
|
||||
|
||||
### 2. Build Retriever Image
|
||||
@@ -23,13 +23,13 @@ docker build --no-cache -t opea/web-retriever-chroma:latest --build-arg https_pr
|
||||
### 3. Build Rerank Image
|
||||
|
||||
```bash
|
||||
docker build --no-cache -t opea/reranking-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/reranks/tei/Dockerfile .
|
||||
docker build --no-cache -t opea/reranking-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/reranks/src/Dockerfile .
|
||||
```
|
||||
|
||||
### 4. Build LLM Image
|
||||
|
||||
```bash
|
||||
docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile .
|
||||
```
|
||||
|
||||
### 5. Build MegaService Docker Image
|
||||
|
||||
@@ -20,7 +20,7 @@ services:
|
||||
embedding-tei:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/embeddings/tei/langchain/Dockerfile
|
||||
dockerfile: comps/embeddings/src/Dockerfile
|
||||
extends: searchqna
|
||||
image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
|
||||
web-retriever-chroma:
|
||||
@@ -32,12 +32,12 @@ services:
|
||||
reranking-tei:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/reranks/tei/Dockerfile
|
||||
dockerfile: comps/reranks/src/Dockerfile
|
||||
extends: searchqna
|
||||
image: ${REGISTRY:-opea}/reranking-tei:${TAG:-latest}
|
||||
llm-tgi:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/text-generation/tgi/Dockerfile
|
||||
dockerfile: comps/llms/src/text-generation/Dockerfile
|
||||
extends: searchqna
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
|
||||
@@ -34,7 +34,7 @@ Follow the instructions below to build the docker images from source.
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile .
|
||||
```
|
||||
|
||||
### 2. Build MegaService Docker Image
|
||||
@@ -60,7 +60,7 @@ docker build -t opea/translation-ui:latest --build-arg https_proxy=$https_proxy
|
||||
|
||||
```bash
|
||||
cd GenAIComps
|
||||
docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile .
|
||||
docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/3rd_parties/nginx/deployment/docker/Dockerfile .
|
||||
```
|
||||
|
||||
Then run the command `docker images`, you will have the following Docker Images:
|
||||
|
||||
@@ -14,6 +14,12 @@ services:
|
||||
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
HF_HUB_DISABLE_PROGRESS_BARS: 1
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
host_ip: ${host_ip}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://$host_ip:8008/health || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 100
|
||||
volumes:
|
||||
- "./data:/data"
|
||||
shm_size: 1g
|
||||
@@ -22,7 +28,8 @@ services:
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
container_name: llm-tgi-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
tgi-service:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "9000:9000"
|
||||
ipc: host
|
||||
@@ -30,7 +37,8 @@ services:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
|
||||
LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
|
||||
LLM_MODEL_ID: ${LLM_MODEL_ID}
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
HF_HUB_DISABLE_PROGRESS_BARS: 1
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
|
||||
@@ -26,7 +26,7 @@ Follow the instructions below to build the docker images from source.
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
|
||||
docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile .
|
||||
```
|
||||
|
||||
### 2. Build MegaService Docker Image
|
||||
@@ -52,7 +52,7 @@ docker build -t opea/translation-ui:latest --build-arg https_proxy=$https_proxy
|
||||
|
||||
```bash
|
||||
cd GenAIComps
|
||||
docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile .
|
||||
docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/3rd_parties/nginx/deployment/docker/Dockerfile .
|
||||
```
|
||||
|
||||
Then run the command `docker images`, you will have the following four Docker Images:
|
||||
|
||||
@@ -20,6 +20,11 @@ services:
|
||||
LIMIT_HPU_GRAPH: true
|
||||
USE_FLASH_ATTENTION: true
|
||||
FLASH_ATTENTION_RECOMPUTE: true
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "sleep 500 && exit 0"]
|
||||
interval: 1s
|
||||
timeout: 505s
|
||||
retries: 1
|
||||
runtime: habana
|
||||
cap_add:
|
||||
- SYS_NICE
|
||||
@@ -31,7 +36,8 @@ services:
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
container_name: llm-tgi-gaudi-server
|
||||
depends_on:
|
||||
- tgi-service
|
||||
tgi-service:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "9000:9000"
|
||||
ipc: host
|
||||
@@ -39,7 +45,8 @@ services:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
|
||||
LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
|
||||
LLM_MODEL_ID: ${LLM_MODEL_ID}
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
HF_HUB_DISABLE_PROGRESS_BARS: 1
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
|
||||
@@ -20,12 +20,12 @@ services:
|
||||
llm-tgi:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/text-generation/tgi/Dockerfile
|
||||
dockerfile: comps/llms/src/text-generation/Dockerfile
|
||||
extends: translation
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
nginx:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/nginx/Dockerfile
|
||||
dockerfile: comps/3rd_parties/nginx/src/Dockerfile
|
||||
extends: translation
|
||||
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
|
||||
|
||||
@@ -41,6 +41,7 @@ function start_services() {
|
||||
export BACKEND_SERVICE_NAME=translation
|
||||
export BACKEND_SERVICE_IP=${ip_address}
|
||||
export BACKEND_SERVICE_PORT=8888
|
||||
export host_ip=${ip_address}
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
|
||||
|
||||
@@ -167,7 +168,7 @@ function main() {
|
||||
|
||||
validate_microservices
|
||||
validate_megaservice
|
||||
validate_frontend
|
||||
# validate_frontend
|
||||
|
||||
stop_docker
|
||||
echo y | docker system prune
|
||||
|
||||
@@ -1 +1 @@
|
||||
BASE_URL = 'http://10.7.5.135:8888/v1/translation'
|
||||
BASE_URL = 'http://backend_address:8888/v1/translation'
|
||||
|
||||
@@ -53,7 +53,7 @@ First of all, you need to build Docker Images locally and install the python pac
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build -t opea/embedding-multimodal-clip:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/multimodal_clip/Dockerfile .
|
||||
docker build -t opea/embedding-multimodal-clip:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/native/multimodal/clip/Dockerfile .
|
||||
```
|
||||
|
||||
### 2. Build Retriever Image
|
||||
|
||||
@@ -26,7 +26,7 @@ services:
|
||||
embedding-multimodal-clip:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/embeddings/multimodal_clip/Dockerfile
|
||||
dockerfile: comps/embeddings/src/integrations/dependency/clip/Dockerfile
|
||||
extends: videoqna
|
||||
image: ${REGISTRY:-opea}/embedding-multimodal-clip:${TAG:-latest}
|
||||
retriever-vdms:
|
||||
|
||||
@@ -248,4 +248,4 @@ function main() {
|
||||
|
||||
}
|
||||
|
||||
main
|
||||
# main
|
||||
|
||||
@@ -12,7 +12,7 @@ First of all, you need to build Docker Images locally and install the python pac
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build --no-cache -t opea/lvm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/tgi-llava/Dockerfile .
|
||||
docker build --no-cache -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile .
|
||||
docker build --no-cache -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/3rd_parties/nginx/deployment/docker/Dockerfile .
|
||||
```
|
||||
|
||||
### 2. Build MegaService Docker Image
|
||||
|
||||
@@ -42,7 +42,7 @@ First of all, you need to build Docker Images locally and install the python pac
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build --no-cache -t opea/lvm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/tgi-llava/Dockerfile .
|
||||
docker build --no-cache -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile .
|
||||
docker build --no-cache -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/3rd_parties/nginx/deployment/docker/Dockerfile .
|
||||
```
|
||||
|
||||
### 2. Build MegaService Docker Image
|
||||
|
||||
@@ -16,12 +16,19 @@ services:
|
||||
https_proxy: ${https_proxy}
|
||||
HF_HUB_DISABLE_PROGRESS_BARS: 1
|
||||
HF_HUB_ENABLE_HF_TRANSFER: 0
|
||||
host_ip: ${host_ip}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://$host_ip:8399/health || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 60
|
||||
command: --model-id ${LVM_MODEL_ID} --max-input-length 4096 --max-total-tokens 8192 --cuda-graphs 0
|
||||
lvm-tgi:
|
||||
image: ${REGISTRY:-opea}/lvm-tgi:${TAG:-latest}
|
||||
container_name: lvm-tgi-xeon-server
|
||||
depends_on:
|
||||
- llava-tgi-service
|
||||
llava-tgi-service:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "9399:9399"
|
||||
ipc: host
|
||||
|
||||
@@ -12,7 +12,7 @@ First of all, you need to build Docker Images locally. This step can be ignored
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build --no-cache -t opea/lvm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/tgi-llava/Dockerfile .
|
||||
docker build --no-cache -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile .
|
||||
docker build --no-cache -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/3rd_parties/nginx/deployment/docker/Dockerfile .
|
||||
```
|
||||
|
||||
### 2. Pull TGI Gaudi Image
|
||||
|
||||
@@ -26,6 +26,6 @@ services:
|
||||
nginx:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/nginx/Dockerfile
|
||||
dockerfile: comps/3rd_parties/nginx/src/Dockerfile
|
||||
extends: visualqna
|
||||
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
|
||||
|
||||
@@ -41,6 +41,7 @@ function start_services() {
|
||||
export BACKEND_SERVICE_IP=${ip_address}
|
||||
export BACKEND_SERVICE_PORT=8888
|
||||
export NGINX_PORT=80
|
||||
export host_ip=${ip_address}
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
|
||||
|
||||
@@ -48,7 +49,7 @@ function start_services() {
|
||||
docker compose up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
n=0
|
||||
until [[ "$n" -ge 100 ]]; do
|
||||
until [[ "$n" -ge 200 ]]; do
|
||||
docker logs lvm-tgi-xeon-server > ${LOG_PATH}/lvm_tgi_service_start.log
|
||||
if grep -q Connected ${LOG_PATH}/lvm_tgi_service_start.log; then
|
||||
break
|
||||
|
||||
@@ -40,73 +40,71 @@ Take ChatQnA for example. ChatQnA is a chatbot application service based on the
|
||||
|
||||
## Microservice images
|
||||
|
||||
| Microservice Images | Dockerfile | Description |
|
||||
| ------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| [opea/agent-langchain](https://hub.docker.com/r/opea/comps-agent-langchain) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/agent/langchain/Dockerfile) | The docker image exposed the OPEA agent microservice for GenAI application use |
|
||||
| [opea/asr](https://hub.docker.com/r/opea/asr) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/asr/whisper/Dockerfile) | The docker image exposed the OPEA Audio-Speech-Recognition microservice for GenAI application use |
|
||||
| [opea/chathistory-mongo-server](https://hub.docker.com/r/opea/chathistory-mongo-server) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/chathistory/mongo/Dockerfile) | The docker image exposes OPEA Chat History microservice which based on MongoDB database, designed to allow user to store, retrieve and manage chat conversations |
|
||||
| [opea/dataprep-milvus](https://hub.docker.com/r/opea/dataprep-milvus) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/milvus/langchain/Dockerfile) | The docker image exposed the OPEA dataprep microservice based on milvus vectordb for GenAI application use |
|
||||
| [opea/dataprep-multimodal-vdms](https://hub.docker.com/r/opea/dataprep-multimodal-vdms) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/vdms/multimodal_langchain/Dockerfile) | This docker image exposes an OPEA dataprep microservice based on a multi-modal VDMS for use by GenAI applications. |
|
||||
| [opea/dataprep-multimodal-redis](https://hub.docker.com/r/opea/dataprep-multimodal-redis) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/redis/langchain/Dockerfile) | This docker image exposes an OPEA dataprep microservice based on a multi-modal redis for use by GenAI applications. |
|
||||
| [opea/dataprep-on-ray-redis](https://hub.docker.com/r/opea/dataprep-on-ray-redis) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/redis/langchain_ray/Dockerfile) | The docker image exposed the OPEA dataprep microservice based on redis vectordb and optimized ray for GenAI application use |
|
||||
| [opea/dataprep-pgvector](https://hub.docker.com/r/opea/dataprep-pgvector) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/pgvector/langchain/Dockerfile) | The docker image exposed the OPEA dataprep microservice based on pgvector vectordb for GenAI application use |
|
||||
| [opea/dataprep-pinecone](https://hub.docker.com/r/opea/dataprep-pinecone) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/pinecone/langchain/Dockerfile) | The docker image exposed the OPEA dataprep microservice based on pincone vectordb for GenAI application use |
|
||||
| [opea/dataprep-qdrant](https://hub.docker.com/r/opea/dataprep-qdrant) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/qdrant/langchain/Dockerfile) | The docker image exposed the OPEA dataprep microservice based on qdrant vectordb for GenAI application use |
|
||||
| [opea/dataprep-redis](https://hub.docker.com/r/opea/dataprep-redis) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/redis/langchain/Dockerfile) | The docker image exposed the OPEA dataprep microservice based on redis vectordb Langchain framework for GenAI application use |
|
||||
| [opea/dataprep-redis-llama-index](https://hub.docker.com/r/opea/dataprep-redis-llama-index) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/redis/llama_index/Dockerfile) | The docker image exposed the OPEA dataprep microservice based on redis vectordb LlamaIndex framework for GenAI application use |
|
||||
| [opea/dataprep-vdms](https://hub.docker.com/r/opea/dataprep-vdms) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/vdms/langchain/Dockerfile) | This docker image exposes an OPEA dataprep microservice based on VDMS vectordb for use by GenAI applications. |
|
||||
| [opea/embedding-langchain-mosec](https://hub.docker.com/r/opea/embedding-langchain-mosec) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/mosec/langchain/Dockerfile) | The docker image exposed the OPEA mosec embedding microservice base on Langchain framework for GenAI application use |
|
||||
| [opea/embedding-langchain-mosec-endpoint](https://hub.docker.com/r/opea/embedding-langchain-mosec-endpoint) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/mosec/langchain/dependency/Dockerfile) | The docker image exposed the OPEA mosec embedding endpoint microservice base on Langchain framework for GenAI application use |
|
||||
| [opea/embedding-multimodal-clip](https://hub.docker.com/r/opea/embedding-multimodal-clip) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/multimodal_clip/Dockerfile) | The docker image exposes OPEA multimodal CLIP-based embedded microservices for use by GenAI applications |
|
||||
| [opea/embedding-multimodal](https://hub.docker.com/r/opea/embedding-multimodal) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/multimodal/multimodal_langchain/Dockerfile) | The docker image exposes OPEA multimodal embedded microservices for use by GenAI applications |
|
||||
| [opea/embedding-multimodal-bridgetower](https://hub.docker.com/r/opea/embedding-multimodal-bridgetower) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/multimodal/bridgetower/Dockerfile) | The docker image exposes OPEA multimodal embedded microservices based on bridgetower for use by GenAI applications |
|
||||
| [opea/embedding-multimodal-bridgetower-gaudi](https://hub.docker.com/r/opea/embedding-multimodal-bridgetower-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/multimodal/bridgetower/Dockerfile.intel_hpu) | The docker image exposes OPEA multimodal embedded microservices based on bridgetower for use by GenAI applications on the Gaudi |
|
||||
| [opea/embedding-tei](https://hub.docker.com/r/opea/embedding-tei) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/tei/langchain/Dockerfile) | The docker image exposed the OPEA embedding microservice upon tei docker image for GenAI application use |
|
||||
| [opea/embedding-tei-llama-index](https://hub.docker.com/r/opea/embedding-tei-llama-index) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/tei/llama_index/Dockerfile) | The docker image exposed the OPEA embedding microservice upon tei docker image base on LlamaIndex framework for GenAI application use |
|
||||
| [opea/feedbackmanagement](https://hub.docker.com/r/opea/feedbackmanagement) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/feedback_management/mongo/Dockerfile) | The docker image exposes that the OPEA feedback management microservice uses a MongoDB database for GenAI applications. |
|
||||
| [opea/finetuning](https://hub.docker.com/r/opea/finetuning) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/finetuning/Dockerfile) | The docker image exposed the OPEA Fine-tuning microservice for GenAI application use |
|
||||
| [opea/finetuning-gaudi](https://hub.docker.com/r/opea/finetuning-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/finetuning/Dockerfile.intel_hpu) | The docker image exposed the OPEA Fine-tuning microservice for GenAI application use on the Gaudi |
|
||||
| [opea/gmcrouter](https://hub.docker.com/r/opea/gmcrouter) | [Link](https://github.com/opea-project/GenAIInfra/blob/main/microservices-connector/Dockerfile.manager) | The docker image served as one of key parts of the OPEA GenAI Microservice Connector(GMC) to route the traffic among the microservices defined in GMC |
|
||||
| [opea/gmcmanager](https://hub.docker.com/r/opea/gmcmanager) | [Link](https://github.com/opea-project/GenAIInfra/blob/main/microservices-connector/Dockerfile.router) | The docker image served as one of key parts of the OPEA GenAI Microservice Connector(GMC) to be controller manager to handle GMC CRD |
|
||||
| [opea/guardrails-tgi](https://hub.docker.com/r/opea/guardrails-tgi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/llama_guard/langchain/Dockerfile) | The docker image exposed the OPEA guardrail microservice to provide content review for GenAI application use |
|
||||
| [opea/guardrails-toxicity-detection](https://hub.docker.com/r/opea/guardrails-toxicity-detection) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/toxicity_detection/Dockerfile) | The docker image exposed the OPEA guardrail microservice to provide toxicity detection for GenAI application use |
|
||||
| [opea/guardrails-pii-detection](https://hub.docker.com/r/opea/guardrails-pii-detection) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/pii_detection/Dockerfile) | The docker image exposed the OPEA guardrail microservice to provide PII detection for GenAI application use |
|
||||
| [opea/llm-docsum-tgi](https://hub.docker.com/r/opea/llm-docsum-tgi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/summarization/tgi/langchain/Dockerfile) | This docker image is designed to build a document summarization microservice using the HuggingFace Text Generation Inference(TGI) framework. The microservice accepts document input and generates a document summary. |
|
||||
| [opea/llm-faqgen-tgi](https://hub.docker.com/r/opea/llm-faqgen-tgi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/faq-generation/tgi/langchain/Dockerfile) | This docker image is designed to build a frequently asked questions microservice using the HuggingFace Text Generation Inference(TGI) framework. The microservice accepts document input and generates a FAQ. |
|
||||
| [opea/llm-native](https://hub.docker.com/r/opea/llm-native) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/text-generation/native/langchain/Dockerfile) | The docker image exposed the OPEA LLM microservice based on native for GenAI application use |
|
||||
| [opea/llm-ollama](https://hub.docker.com/r/opea/llm-ollama) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/text-generation/ollama/langchain/Dockerfile) | The docker image exposed the OPEA LLM microservice based on ollama for GenAI application use |
|
||||
| [opea/llm-tgi](https://hub.docker.com/r/opea/llm-tgi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/text-generation/tgi/Dockerfile) | The docker image exposed the OPEA LLM microservice upon TGI docker image for GenAI application use |
|
||||
| [opea/llm-vllm](https://hub.docker.com/r/opea/llm-vllm) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/text-generation/vllm/langchain/Dockerfile) | The docker image exposed the OPEA LLM microservice upon vLLM docker image for GenAI application use |
|
||||
| [opea/llm-vllm-llamaindex](https://hub.docker.com/r/opea/llm-vllm-llamaindex) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/text-generation/vllm/llama_index/Dockerfile) | This docker image exposes OPEA LLM microservices to the llamaindex framework's vLLM Docker image for use by GenAI applications |
|
||||
| [opea/llava-gaudi](https://hub.docker.com/r/opea/llava-hpu) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/llava/dependency/Dockerfile.intel_hpu) | The docker image exposed the OPEA microservice running LLaVA as a large visual model (LVM) service for GenAI application use on the Gaudi |
|
||||
| [opea/lvm-tgi](https://hub.docker.com/r/opea/lvm-tgi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/tgi-llava/Dockerfile) | This docker image is designed to build a large visual model (LVM) microservice using the HuggingFace Text Generation Inference(TGI) framework. The microservice accepts document input and generates a answer to question. |
|
||||
| [opea/lvm-llava](https://hub.docker.com/r/opea/lvm-llava) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/llava/dependency/Dockerfile) | The docker image exposed the OPEA microservice running LLaVA as a large visual model (LVM) server for GenAI application use |
|
||||
| [opea/lvm-llava-svc](https://hub.docker.com/r/opea/lvm-llava-svc) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/llava/Dockerfile) | The docker image exposed the OPEA microservice running LLaVA as a large visual model (LVM) service for GenAI application use |
|
||||
| [opea/lvm-video-llama](https://hub.docker.com/r/opea/lvm-video-llama) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/video-llama/Dockerfile) | The docker image exposed the OPEA microservice running Video-Llama as a large visual model (LVM) for GenAI application use |
|
||||
| [opea/nginx](https://hub.docker.com/r/opea/nginx) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/nginx/Dockerfile) | The docker image exposed the OPEA nginx microservice for GenAI application use |
|
||||
| [opea/promptregistry-mongo-server](https://hub.docker.com/r/opea/promptregistry-mongo-server) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/prompt_registry/mongo/Dockerfile) | The docker image exposes the OPEA Prompt Registry microservices which based on MongoDB database, designed to store and retrieve user's preferred prompts |
|
||||
| [opea/reranking-videoqna](https://hub.docker.com/r/opea/reranking-videoqna) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/reranks/videoqna/Dockerfile) | The docker image exposed the OPEA reranking microservice for reranking the results of VideoQnA use casesfor GenAI application use |
|
||||
| [opea/reranking-fastrag](https://hub.docker.com/r/opea/reranking-fastrag) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/reranks/fastrag/Dockerfile) | The docker image exposed the OPEA reranking microservice base on fastrag for GenAI application use |
|
||||
| [opea/reranking-langchain-mosec](https://hub.docker.com/r/opea/reranking-langchain-mosec) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/reranks/mosec/langchain/Dockerfile) | The docker image exposed the OPEA mosec reranking microservice base on Langchain framework for GenAI application use |
|
||||
| [opea/reranking-langchain-mosec-endpoint](https://hub.docker.com/r/opea/reranking-langchain-mosec-endpoint) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/reranks/mosec/langchain/dependency/Dockerfile) | The docker image exposed the OPEA mosec reranking endpoint microservice base on Langchain framework for GenAI application use |
|
||||
| [opea/reranking-tei](https://hub.docker.com/r/opea/reranking-tei) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/reranks/tei/Dockerfile) | The docker image exposed the OPEA reranking microservice based on tei docker image for GenAI application use |
|
||||
| [opea/retriever-milvus](https://hub.docker.com/r/opea/retriever-milvus) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/retrievers/milvus/langchain/Dockerfile) | The docker image exposed the OPEA retrieval microservice based on milvus vectordb for GenAI application use |
|
||||
| [opea/retriever-pathway](https://hub.docker.com/r/opea/retriever-pathway) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/retrievers/pathway/langchain/Dockerfile) | The docker image exposed the OPEA retrieval microservice with pathway for GenAI application use |
|
||||
| [opea/retriever-pgvector](https://hub.docker.com/r/opea/retriever-pgvector) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/retrievers/pgvector/langchain/Dockerfile) | The docker image exposed the OPEA retrieval microservice based on pgvector vectordb for GenAI application use |
|
||||
| [opea/retriever-pinecone](https://hub.docker.com/r/opea/retriever-pinecone) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/retrievers/pinecone/langchain/Dockerfile) | The docker image exposed the OPEA retrieval microservice based on pinecone vectordb for GenAI application use |
|
||||
| [opea/retriever-qdrant](https://hub.docker.com/r/opea/retriever-qdrant) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/retrievers/qdrant/haystack/Dockerfile) | The docker image exposed the OPEA retrieval microservice based on qdrant vectordb for GenAI application use |
|
||||
| [opea/retriever-redis](https://hub.docker.com/r/opea/retriever-redis) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/retrievers/redis/langchain/Dockerfile) | The docker image exposed the OPEA retrieval microservice based on redis vectordb for GenAI application use |
|
||||
| [opea/retriever-redis-llamaindex](https://hub.docker.com/r/opea/retriever-redis-llamaindex) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/retrievers/redis/llama_index/Dockerfile) | The docker image exposed the OPEA retriever service based on LlamaIndex for GenAI application use |
|
||||
| [opea/retriever-vdms](https://hub.docker.com/r/opea/retriever-vdms) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/retrievers/vdms/langchain/Dockerfile) | The docker image exposed the OPEA retriever service based on Visual Data Management System for GenAI application use |
|
||||
| [opea/speecht5](https://hub.docker.com/r/opea/speecht5) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/tts/speecht5/dependency/Dockerfile) | The docker image exposed the OPEA SpeechT5 service for GenAI application use |
|
||||
| [opea/speecht5-gaudi](https://hub.docker.com/r/opea/speecht5-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/tts/speecht5/dependency/Dockerfile.intel_hpu) | The docker image exposed the OPEA SpeechT5 service on Gaudi2 for GenAI application use |
|
||||
| [opea/tei-gaudi](https://hub.docker.com/r/opea/tei-gaudi/tags) | [Link](https://github.com/huggingface/tei-gaudi/blob/habana-main/Dockerfile-hpu) | The docker image powered by HuggingFace Text Embedding Inference (TEI) on Gaudi2 for deploying and serving Embedding Models |
|
||||
| [opea/vectorstore-pathway](https://hub.docker.com/r/opea/vectorstore-pathway) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/vectorstores/pathway/Dockerfile) | The docker image exposed the OPEA Vectorstores microservice with Pathway for GenAI application use |
|
||||
| [opea/video-llama-lvm-server](https://hub.docker.com/r/opea/video-llama-lvm-server) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/video-llama/dependency/Dockerfile) | The docker image exposed the OPEA microservice running Video-Llama as a large visual model (LVM) server for GenAI application use |
|
||||
| [opea/tts](https://hub.docker.com/r/opea/tts) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/tts/speecht5/Dockerfile) | The docker image exposed the OPEA Text-To-Speech microservice for GenAI application use |
|
||||
| [opea/vllm](https://hub.docker.com/r/opea/vllm) | [Link](https://github.com/vllm-project/vllm/blob/main/Dockerfile.cpu) | The docker image powered by vllm-project for deploying and serving vllm Models |
|
||||
| [opea/vllm-gaudi]() | [Link](https://github.com/HabanaAI/vllm-fork/blob/habana_main/Dockerfile.hpu) | The docker image powered by vllm-fork for deploying and serving vllm-gaudi Models |
|
||||
| [opea/vllm-openvino](https://hub.docker.com/r/opea/vllm-openvino) | [Link](https://github.com/vllm-project/vllm/blob/main/Dockerfile.openvino) | The docker image powered by vllm-project for deploying and serving vllm Models of the Openvino Framework |
|
||||
| [opea/web-retriever-chroma](https://hub.docker.com/r/opea/web-retriever-chroma) | [Link](https://github.com/opea-project/GenAIComps/tree/main/comps/web_retrievers/chroma/langchain/Dockerfile) | The docker image exposed the OPEA retrieval microservice based on chroma vectordb for GenAI application use |
|
||||
| [opea/whisper](https://hub.docker.com/r/opea/whisper) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/asr/whisper/dependency/Dockerfile) | The docker image exposed the OPEA Whisper service for GenAI application use |
|
||||
| [opea/whisper-gaudi](https://hub.docker.com/r/opea/whisper-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/asr/whisper/dependency/Dockerfile.intel_hpu) | The docker image exposed the OPEA Whisper service on Gaudi2 for GenAI application use |
|
||||
| Microservice Images | Dockerfile | Description |
|
||||
| ------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| [opea/agent-langchain](https://hub.docker.com/r/opea/comps-agent-langchain) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/agent/langchain/Dockerfile) | The docker image exposed the OPEA agent microservice for GenAI application use |
|
||||
| [opea/asr](https://hub.docker.com/r/opea/asr) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/asr/whisper/Dockerfile) | The docker image exposed the OPEA Audio-Speech-Recognition microservice for GenAI application use |
|
||||
| [opea/chathistory-mongo-server](https://hub.docker.com/r/opea/chathistory-mongo-server) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/chathistory/mongo/Dockerfile) | The docker image exposes OPEA Chat History microservice which based on MongoDB database, designed to allow user to store, retrieve and manage chat conversations |
|
||||
| [opea/dataprep-milvus](https://hub.docker.com/r/opea/dataprep-milvus) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/milvus/langchain/Dockerfile) | The docker image exposed the OPEA dataprep microservice based on milvus vectordb for GenAI application use |
|
||||
| [opea/dataprep-multimodal-vdms](https://hub.docker.com/r/opea/dataprep-multimodal-vdms) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/vdms/multimodal_langchain/Dockerfile) | This docker image exposes an OPEA dataprep microservice based on a multi-modal VDMS for use by GenAI applications. |
|
||||
| [opea/dataprep-multimodal-redis](https://hub.docker.com/r/opea/dataprep-multimodal-redis) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/redis/langchain/Dockerfile) | This docker image exposes an OPEA dataprep microservice based on a multi-modal redis for use by GenAI applications. |
|
||||
| [opea/dataprep-on-ray-redis](https://hub.docker.com/r/opea/dataprep-on-ray-redis) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/redis/langchain_ray/Dockerfile) | The docker image exposed the OPEA dataprep microservice based on redis vectordb and optimized ray for GenAI application use |
|
||||
| [opea/dataprep-pgvector](https://hub.docker.com/r/opea/dataprep-pgvector) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/pgvector/langchain/Dockerfile) | The docker image exposed the OPEA dataprep microservice based on pgvector vectordb for GenAI application use |
|
||||
| [opea/dataprep-pinecone](https://hub.docker.com/r/opea/dataprep-pinecone) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/pinecone/langchain/Dockerfile) | The docker image exposed the OPEA dataprep microservice based on pincone vectordb for GenAI application use |
|
||||
| [opea/dataprep-qdrant](https://hub.docker.com/r/opea/dataprep-qdrant) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/qdrant/langchain/Dockerfile) | The docker image exposed the OPEA dataprep microservice based on qdrant vectordb for GenAI application use |
|
||||
| [opea/dataprep-redis](https://hub.docker.com/r/opea/dataprep-redis) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/redis/langchain/Dockerfile) | The docker image exposed the OPEA dataprep microservice based on redis vectordb Langchain framework for GenAI application use |
|
||||
| [opea/dataprep-redis-llama-index](https://hub.docker.com/r/opea/dataprep-redis-llama-index) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/redis/llama_index/Dockerfile) | The docker image exposed the OPEA dataprep microservice based on redis vectordb LlamaIndex framework for GenAI application use |
|
||||
| [opea/dataprep-vdms](https://hub.docker.com/r/opea/dataprep-vdms) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/vdms/langchain/Dockerfile) | This docker image exposes an OPEA dataprep microservice based on VDMS vectordb for use by GenAI applications. |
|
||||
| [opea/embedding-langchain-mosec](https://hub.docker.com/r/opea/embedding-langchain-mosec) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/3rd_parties/nginx/deployment/docker/Dockerfile) | The docker image exposed the OPEA mosec embedding microservice base on Langchain framework for GenAI application use |
|
||||
| [opea/embedding-langchain-mosec-endpoint](https://hub.docker.com/r/opea/embedding-langchain-mosec-endpoint) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/3rd_parties/mosec/deployment/docker/Dockerfile) | The docker image exposed the OPEA mosec embedding endpoint microservice base on Langchain framework for GenAI application use |
|
||||
| [opea/embedding-multimodal-clip](https://hub.docker.com/r/opea/embedding-multimodal-clip) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/native/multimodal/clip/Dockerfile) | The docker image exposes OPEA multimodal CLIP-based embedded microservices for use by GenAI applications |
|
||||
| [opea/embedding-multimodal](https://hub.docker.com/r/opea/embedding-multimodal) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/native/multimodal/bridgetower/wrapper/Dockerfile) | The docker image exposes OPEA multimodal embedded microservices for use by GenAI applications |
|
||||
| [opea/embedding-multimodal-bridgetower](https://hub.docker.com/r/opea/embedding-multimodal-bridgetower) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/native/multimodal/bridgetower/dependency/Dockerfile) | The docker image exposes OPEA multimodal embedded microservices based on bridgetower for use by GenAI applications |
|
||||
| [opea/embedding-multimodal-bridgetower-gaudi](https://hub.docker.com/r/opea/embedding-multimodal-bridgetower-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/native/multimodal/bridgetower/dependency/Dockerfile.intel_hpu) | The docker image exposes OPEA multimodal embedded microservices based on bridgetower for use by GenAI applications on the Gaudi |
|
||||
| [opea/embedding-tei](https://hub.docker.com/r/opea/embedding-tei) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/src/Dockerfile) | The docker image exposed the OPEA embedding microservice upon tei docker image for GenAI application use |
|
||||
| [opea/feedbackmanagement](https://hub.docker.com/r/opea/feedbackmanagement) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/feedback_management/mongo/Dockerfile) | The docker image exposes that the OPEA feedback management microservice uses a MongoDB database for GenAI applications. |
|
||||
| [opea/finetuning](https://hub.docker.com/r/opea/finetuning) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/finetuning/Dockerfile) | The docker image exposed the OPEA Fine-tuning microservice for GenAI application use |
|
||||
| [opea/finetuning-gaudi](https://hub.docker.com/r/opea/finetuning-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/finetuning/Dockerfile.intel_hpu) | The docker image exposed the OPEA Fine-tuning microservice for GenAI application use on the Gaudi |
|
||||
| [opea/gmcrouter](https://hub.docker.com/r/opea/gmcrouter) | [Link](https://github.com/opea-project/GenAIInfra/blob/main/microservices-connector/Dockerfile.manager) | The docker image served as one of key parts of the OPEA GenAI Microservice Connector(GMC) to route the traffic among the microservices defined in GMC |
|
||||
| [opea/gmcmanager](https://hub.docker.com/r/opea/gmcmanager) | [Link](https://github.com/opea-project/GenAIInfra/blob/main/microservices-connector/Dockerfile.router) | The docker image served as one of key parts of the OPEA GenAI Microservice Connector(GMC) to be controller manager to handle GMC CRD |
|
||||
| [opea/guardrails-tgi](https://hub.docker.com/r/opea/guardrails-tgi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/llama_guard/langchain/Dockerfile) | The docker image exposed the OPEA guardrail microservice to provide content review for GenAI application use |
|
||||
| [opea/guardrails-toxicity-detection](https://hub.docker.com/r/opea/guardrails-toxicity-detection) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/toxicity_detection/Dockerfile) | The docker image exposed the OPEA guardrail microservice to provide toxicity detection for GenAI application use |
|
||||
| [opea/guardrails-pii-detection](https://hub.docker.com/r/opea/guardrails-pii-detection) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/pii_detection/Dockerfile) | The docker image exposed the OPEA guardrail microservice to provide PII detection for GenAI application use |
|
||||
| [opea/llm-docsum-tgi](https://hub.docker.com/r/opea/llm-docsum-tgi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/summarization/tgi/langchain/Dockerfile) | This docker image is designed to build a document summarization microservice using the HuggingFace Text Generation Inference(TGI) framework. The microservice accepts document input and generates a document summary. |
|
||||
| [opea/llm-faqgen-tgi](https://hub.docker.com/r/opea/llm-faqgen-tgi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/faq-generation/tgi/langchain/Dockerfile) | This docker image is designed to build a frequently asked questions microservice using the HuggingFace Text Generation Inference(TGI) framework. The microservice accepts document input and generates a FAQ. |
|
||||
| [opea/llm-native](https://hub.docker.com/r/opea/llm-native) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/text-generation/Dockerfile) | The docker image exposed the OPEA LLM microservice based on native for GenAI application use |
|
||||
| [opea/llm-ollama](https://hub.docker.com/r/opea/llm-ollama) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/text-generation/Dockerfile) | The docker image exposed the OPEA LLM microservice based on ollama for GenAI application use |
|
||||
| [opea/llm-tgi](https://hub.docker.com/r/opea/llm-tgi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/text-generation/Dockerfile) | The docker image exposed the OPEA LLM microservice upon TGI docker image for GenAI application use |
|
||||
| [opea/llm-vllm](https://hub.docker.com/r/opea/llm-vllm) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/text-generation/Dockerfile) | The docker image exposed the OPEA LLM microservice upon vLLM docker image for GenAI application use |
|
||||
| [opea/llava-gaudi](https://hub.docker.com/r/opea/llava-hpu) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/llava/dependency/Dockerfile.intel_hpu) | The docker image exposed the OPEA microservice running LLaVA as a large visual model (LVM) service for GenAI application use on the Gaudi |
|
||||
| [opea/lvm-tgi](https://hub.docker.com/r/opea/lvm-tgi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/tgi-llava/Dockerfile) | This docker image is designed to build a large visual model (LVM) microservice using the HuggingFace Text Generation Inference(TGI) framework. The microservice accepts document input and generates a answer to question. |
|
||||
| [opea/lvm-llava](https://hub.docker.com/r/opea/lvm-llava) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/llava/dependency/Dockerfile) | The docker image exposed the OPEA microservice running LLaVA as a large visual model (LVM) server for GenAI application use |
|
||||
| [opea/lvm-llava-svc](https://hub.docker.com/r/opea/lvm-llava-svc) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/llava/Dockerfile) | The docker image exposed the OPEA microservice running LLaVA as a large visual model (LVM) service for GenAI application use |
|
||||
| [opea/lvm-video-llama](https://hub.docker.com/r/opea/lvm-video-llama) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/video-llama/Dockerfile) | The docker image exposed the OPEA microservice running Video-Llama as a large visual model (LVM) for GenAI application use |
|
||||
| [opea/nginx](https://hub.docker.com/r/opea/nginx) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/3rd_parties/nginx/deployment/docker/Dockerfile) | The docker image exposed the OPEA nginx microservice for GenAI application use |
|
||||
| [opea/promptregistry-mongo-server](https://hub.docker.com/r/opea/promptregistry-mongo-server) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/prompt_registry/mongo/Dockerfile) | The docker image exposes the OPEA Prompt Registry microservices which based on MongoDB database, designed to store and retrieve user's preferred prompts |
|
||||
| [opea/reranking-videoqna](https://hub.docker.com/r/opea/reranking-videoqna) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/reranks/videoqna/Dockerfile) | The docker image exposed the OPEA reranking microservice for reranking the results of VideoQnA use casesfor GenAI application use |
|
||||
| [opea/reranking-fastrag](https://hub.docker.com/r/opea/reranking-fastrag) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/reranks/src/Dockerfile) | The docker image exposed the OPEA reranking microservice base on fastrag for GenAI application use |
|
||||
| [opea/reranking-langchain-mosec](https://hub.docker.com/r/opea/reranking-langchain-mosec) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/reranks/src/Dockerfile) | The docker image exposed the OPEA mosec reranking microservice base on Langchain framework for GenAI application use |
|
||||
| [opea/reranking-langchain-mosec-endpoint](https://hub.docker.com/r/opea/reranking-langchain-mosec-endpoint) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/reranks/src/Dockerfile) | The docker image exposed the OPEA mosec reranking endpoint microservice base on Langchain framework for GenAI application use |
|
||||
| [opea/reranking-tei](https://hub.docker.com/r/opea/reranking-tei) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/reranks/src/Dockerfile) | The docker image exposed the OPEA reranking microservice based on tei docker image for GenAI application use |
|
||||
| [opea/retriever-milvus](https://hub.docker.com/r/opea/retriever-milvus) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/retrievers/milvus/langchain/Dockerfile) | The docker image exposed the OPEA retrieval microservice based on milvus vectordb for GenAI application use |
|
||||
| [opea/retriever-pathway](https://hub.docker.com/r/opea/retriever-pathway) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/retrievers/pathway/langchain/Dockerfile) | The docker image exposed the OPEA retrieval microservice with pathway for GenAI application use |
|
||||
| [opea/retriever-pgvector](https://hub.docker.com/r/opea/retriever-pgvector) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/retrievers/pgvector/langchain/Dockerfile) | The docker image exposed the OPEA retrieval microservice based on pgvector vectordb for GenAI application use |
|
||||
| [opea/retriever-pinecone](https://hub.docker.com/r/opea/retriever-pinecone) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/retrievers/pinecone/langchain/Dockerfile) | The docker image exposed the OPEA retrieval microservice based on pinecone vectordb for GenAI application use |
|
||||
| [opea/retriever-qdrant](https://hub.docker.com/r/opea/retriever-qdrant) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/retrievers/qdrant/haystack/Dockerfile) | The docker image exposed the OPEA retrieval microservice based on qdrant vectordb for GenAI application use |
|
||||
| [opea/retriever-redis](https://hub.docker.com/r/opea/retriever-redis) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/retrievers/redis/langchain/Dockerfile) | The docker image exposed the OPEA retrieval microservice based on redis vectordb for GenAI application use |
|
||||
| [opea/retriever-redis-llamaindex](https://hub.docker.com/r/opea/retriever-redis-llamaindex) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/retrievers/redis/llama_index/Dockerfile) | The docker image exposed the OPEA retriever service based on LlamaIndex for GenAI application use |
|
||||
| [opea/retriever-vdms](https://hub.docker.com/r/opea/retriever-vdms) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/retrievers/vdms/langchain/Dockerfile) | The docker image exposed the OPEA retriever service based on Visual Data Management System for GenAI application use |
|
||||
| [opea/speecht5](https://hub.docker.com/r/opea/speecht5) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/tts/speecht5/dependency/Dockerfile) | The docker image exposed the OPEA SpeechT5 service for GenAI application use |
|
||||
| [opea/speecht5-gaudi](https://hub.docker.com/r/opea/speecht5-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/tts/speecht5/dependency/Dockerfile.intel_hpu) | The docker image exposed the OPEA SpeechT5 service on Gaudi2 for GenAI application use |
|
||||
| [opea/tei-gaudi](https://hub.docker.com/r/opea/tei-gaudi/tags) | [Link](https://github.com/huggingface/tei-gaudi/blob/habana-main/Dockerfile-hpu) | The docker image powered by HuggingFace Text Embedding Inference (TEI) on Gaudi2 for deploying and serving Embedding Models |
|
||||
| [opea/vectorstore-pathway](https://hub.docker.com/r/opea/vectorstore-pathway) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/vectorstores/pathway/Dockerfile) | The docker image exposed the OPEA Vectorstores microservice with Pathway for GenAI application use |
|
||||
| [opea/video-llama-lvm-server](https://hub.docker.com/r/opea/video-llama-lvm-server) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/video-llama/dependency/Dockerfile) | The docker image exposed the OPEA microservice running Video-Llama as a large visual model (LVM) server for GenAI application use |
|
||||
| [opea/tts](https://hub.docker.com/r/opea/tts) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/tts/speecht5/Dockerfile) | The docker image exposed the OPEA Text-To-Speech microservice for GenAI application use |
|
||||
| [opea/vllm](https://hub.docker.com/r/opea/vllm) | [Link](https://github.com/vllm-project/vllm/blob/main/Dockerfile.cpu) | The docker image powered by vllm-project for deploying and serving vllm Models |
|
||||
| [opea/vllm-gaudi]() | [Link](https://github.com/HabanaAI/vllm-fork/blob/habana_main/Dockerfile.hpu) | The docker image powered by vllm-fork for deploying and serving vllm-gaudi Models |
|
||||
| [opea/vllm-openvino](https://hub.docker.com/r/opea/vllm-openvino) | [Link](https://github.com/vllm-project/vllm/blob/main/Dockerfile.openvino) | The docker image powered by vllm-project for deploying and serving vllm Models of the Openvino Framework |
|
||||
| [opea/web-retriever-chroma](https://hub.docker.com/r/opea/web-retriever-chroma) | [Link](https://github.com/opea-project/GenAIComps/tree/main/comps/web_retrievers/chroma/langchain/Dockerfile) | The docker image exposed the OPEA retrieval microservice based on chroma vectordb for GenAI application use |
|
||||
| [opea/whisper](https://hub.docker.com/r/opea/whisper) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/asr/whisper/dependency/Dockerfile) | The docker image exposed the OPEA Whisper service for GenAI application use |
|
||||
| [opea/whisper-gaudi](https://hub.docker.com/r/opea/whisper-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/asr/whisper/dependency/Dockerfile.intel_hpu) | The docker image exposed the OPEA Whisper service on Gaudi2 for GenAI application use |
|
||||
|
||||
Reference in New Issue
Block a user