Refine third parties links (#1764)

Signed-off-by: Spycsh <sihan.chen@intel.com>
This commit is contained in:
Spycsh
2025-04-08 18:39:13 +08:00
committed by GitHub
parent 12932477ee
commit d4952d1e7c
18 changed files with 107 additions and 107 deletions

View File

@@ -517,7 +517,7 @@ jobs:
password: ${{ secrets.DOCKERHUB_TOKEN }}
repository: opea/gpt-sovits
short-description: "The docker image exposed the OPEA GPT-SoVITS service for GenAI application use."
readme-filepath: GenAIComps/comps/tts/src/integrations/dependency/gpt-sovits/README.md
readme-filepath: GenAIComps/comps/third_parties/gpt-sovits/src/README.md
enable-url-completion: false
- name: Description for
@@ -697,7 +697,7 @@ jobs:
password: ${{ secrets.DOCKERHUB_TOKEN }}
repository: opea/lvm-llava
short-description: "The docker image exposed the OPEA microservice running LLaVA as a large visual model (LVM) server for GenAI application use."
readme-filepath: GenAIComps/comps/lvms/src/integrations/dependency/llava/README.md
readme-filepath: GenAIComps/comps/third_parties/llava/src/README.md
enable-url-completion: false
- name: Description for
@@ -707,7 +707,7 @@ jobs:
password: ${{ secrets.DOCKERHUB_TOKEN }}
repository: opea/lvm-video-llama
short-description: "The docker image exposed the OPEA microservice running Video-Llama as a large visual model (LVM) for GenAI application use."
readme-filepath: GenAIComps/comps/lvms/src/integrations/dependency/video-llama/README.md
readme-filepath: GenAIComps/comps/third_parties/video-llama/src/README.md
enable-url-completion: false
- name: Description for
@@ -717,7 +717,7 @@ jobs:
password: ${{ secrets.DOCKERHUB_TOKEN }}
repository: opea/lvm-predictionguard
short-description: "The docker image exposed the OPEA microservice running predictionguard as a large visual model (LVM) server for GenAI application use."
readme-filepath: GenAIComps/comps/lvms/src/integrations/dependency/predictionguard/README.md
readme-filepath: GenAIComps/comps/third_parties/predictionguard/src/README.md
enable-url-completion: false
- name: Description for
@@ -727,7 +727,7 @@ jobs:
password: ${{ secrets.DOCKERHUB_TOKEN }}
repository: opea/llava-gaudi
short-description: "The docker image exposed the OPEA microservice running LLaVA as a large visual model (LVM) service for GenAI application use on the Gaudi2."
readme-filepath: GenAIComps/comps/lvms/src/integrations/dependency/llava/README.md
readme-filepath: GenAIComps/comps/third_parties/llava/src/README.md
enable-url-completion: false
- name: Description for
@@ -737,7 +737,7 @@ jobs:
password: ${{ secrets.DOCKERHUB_TOKEN }}
repository: opea/lvm-llama-vision
short-description: "The docker image exposed the OPEA microservice running Llama Vision as the base large visual model service for GenAI application use."
readme-filepath: GenAIComps/comps/lvms/src/integrations/dependency/llama-vision/README.md
readme-filepath: GenAIComps/comps/third_parties/llama-vision/src/README.md
enable-url-completion: false
- name: Description for
@@ -747,7 +747,7 @@ jobs:
password: ${{ secrets.DOCKERHUB_TOKEN }}
repository: opea/lvm-llama-vision-tp
short-description: "The docker image exposed the OPEA microservice running Llama Vision with deepspeed as the base large visual model service for GenAI application use."
readme-filepath: GenAIComps/comps/lvms/src/integrations/dependency/llama-vision/README.md
readme-filepath: GenAIComps/comps/third_parties/llama-vision/src/README.md
enable-url-completion: false
- name: Description for lvm-llama-vision-guard
@@ -757,7 +757,7 @@ jobs:
password: ${{ secrets.DOCKERHUB_TOKEN }}
repository: opea/lvm-llama-vision-guard
short-description: "The docker image exposed the OPEA microservice running Llama Vision Guard as the base large visual model service for GenAI application use."
readme-filepath: GenAIComps/comps/lvms/src/integrations/dependency/llama-vision/README.md
readme-filepath: GenAIComps/comps/third_parties/llama-vision/src/README.md
enable-url-completion: false
- name: Description for promptregistry-mongo
@@ -857,7 +857,7 @@ jobs:
password: ${{ secrets.DOCKERHUB_TOKEN }}
repository: opea/gpt-sovits
short-description: "The docker image exposed the OPEA gpt-sovits service for GenAI application use."
readme-filepath: GenAIComps/comps/tts/src/integrations/dependency/gpt-sovits/README.md
readme-filepath: GenAIComps/comps/third_parties/gpt-sovits/src/README.md
enable-url-completion: false
- name: Description for nginx

View File

@@ -18,7 +18,7 @@ cd GenAIComps
### 2. Build ASR Image
```bash
docker build -t opea/whisper:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/asr/src/integrations/dependency/whisper/Dockerfile .
docker build -t opea/whisper:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/third_parties/whisper/src/Dockerfile .
```
### 3. Build vLLM Image
@@ -34,10 +34,10 @@ docker build --no-cache --build-arg https_proxy=$https_proxy --build-arg http_pr
### 4. Build TTS Image
```bash
docker build -t opea/speecht5:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/tts/src/integrations/dependency/speecht5/Dockerfile .
docker build -t opea/speecht5:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/third_parties/speecht5/src/Dockerfile .
# multilang tts (optional)
docker build -t opea/gpt-sovits:latest --build-arg http_proxy=$http_proxy --build-arg https_proxy=$https_proxy -f comps/tts/src/integrations/dependency/gpt-sovits/Dockerfile .
docker build -t opea/gpt-sovits:latest --build-arg http_proxy=$http_proxy --build-arg https_proxy=$https_proxy -f comps/third_parties/gpt-sovits/src/Dockerfile .
```
### 5. Build MegaService Docker Image
@@ -177,7 +177,7 @@ to the response, decode the base64 string and save it as a .wav file.
```bash
# if you are using speecht5 as the tts service, voice can be "default" or "male"
# if you are using gpt-sovits for the tts service, you can set the reference audio following https://github.com/opea-project/GenAIComps/blob/main/comps/tts/src/integrations/dependency/gpt-sovits/README.md
# if you are using gpt-sovits for the tts service, you can set the reference audio following https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/gpt-sovits/src/README.md
curl http://${host_ip}:3008/v1/audioqna \
-X POST \
-d '{"audio": "UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA", "max_tokens":64, "voice":"default"}' \

View File

@@ -18,7 +18,7 @@ cd GenAIComps
### 2. Build ASR Image
```bash
docker build -t opea/whisper-gaudi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/asr/src/integrations/dependency/whisper/Dockerfile.intel_hpu .
docker build -t opea/whisper-gaudi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/third_parties/whisper/src/Dockerfile.intel_hpu .
```
### 3. Build vLLM Image
@@ -32,7 +32,7 @@ docker build --no-cache --build-arg https_proxy=$https_proxy --build-arg http_pr
### 4. Build TTS Image
```bash
docker build -t opea/speecht5-gaudi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/tts/src/integrations/dependency/speecht5/Dockerfile.intel_hpu .
docker build -t opea/speecht5-gaudi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/third_parties/speecht5/src/Dockerfile.intel_hpu .
```
### 5. Build MegaService Docker Image

View File

@@ -26,13 +26,13 @@ services:
whisper-gaudi:
build:
context: GenAIComps
dockerfile: comps/asr/src/integrations/dependency/whisper/Dockerfile.intel_hpu
dockerfile: comps/third_parties/whisper/src/Dockerfile.intel_hpu
extends: audioqna
image: ${REGISTRY:-opea}/whisper-gaudi:${TAG:-latest}
whisper:
build:
context: GenAIComps
dockerfile: comps/asr/src/integrations/dependency/whisper/Dockerfile
dockerfile: comps/third_parties/whisper/src/Dockerfile
extends: audioqna
image: ${REGISTRY:-opea}/whisper:${TAG:-latest}
asr:
@@ -50,13 +50,13 @@ services:
speecht5-gaudi:
build:
context: GenAIComps
dockerfile: comps/tts/src/integrations/dependency/speecht5/Dockerfile.intel_hpu
dockerfile: comps/third_parties/speecht5/src/Dockerfile.intel_hpu
extends: audioqna
image: ${REGISTRY:-opea}/speecht5-gaudi:${TAG:-latest}
speecht5:
build:
context: GenAIComps
dockerfile: comps/tts/src/integrations/dependency/speecht5/Dockerfile
dockerfile: comps/third_parties/speecht5/src/Dockerfile
extends: audioqna
image: ${REGISTRY:-opea}/speecht5:${TAG:-latest}
tts:
@@ -68,7 +68,7 @@ services:
gpt-sovits:
build:
context: GenAIComps
dockerfile: comps/tts/src/integrations/dependency/gpt-sovits/Dockerfile
dockerfile: comps/third_parties/gpt-sovits/src/Dockerfile
extends: audioqna
image: ${REGISTRY:-opea}/gpt-sovits:${TAG:-latest}
vllm:

View File

@@ -14,7 +14,7 @@ cd GenAIComps
### 2. Build ASR Image
```bash
docker build -t opea/whisper:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/asr/src/integrations/dependency/whisper/Dockerfile .
docker build -t opea/whisper:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/third_parties/whisper/src/Dockerfile .
docker build -t opea/asr:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/asr/src/Dockerfile .
@@ -29,7 +29,7 @@ docker build --no-cache -t opea/llm-textgen:latest --build-arg https_proxy=$http
### 4. Build TTS Image
```bash
docker build -t opea/speecht5:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/tts/src/integrations/dependency/speecht5/Dockerfile .
docker build -t opea/speecht5:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/third_parties/speecht5/src/Dockerfile .
docker build -t opea/tts:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/tts/src/Dockerfile .
```

View File

@@ -14,7 +14,7 @@ cd GenAIComps
### 2. Build ASR Image
```bash
docker build -t opea/whisper:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/asr/src/integrations/dependency/whisper/Dockerfile .
docker build -t opea/whisper:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/third_parties/whisper/src/Dockerfile .
```
### 3. Build LLM Image
@@ -24,7 +24,7 @@ Intel Xeon optimized image hosted in huggingface repo will be used for TGI servi
### 4. Build TTS Image
```bash
docker build -t opea/speecht5:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/tts/src/integrations/dependency/speecht5/Dockerfile .
docker build -t opea/speecht5:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/third_parties/speecht5/src/Dockerfile .
```
### 5. Build Animation Image

View File

@@ -14,7 +14,7 @@ cd GenAIComps
### 2. Build ASR Image
```bash
docker build -t opea/whisper-gaudi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/asr/src/integrations/dependency/whisper/Dockerfile.intel_hpu .
docker build -t opea/whisper-gaudi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/third_parties/whisper/src/Dockerfile.intel_hpu .
```
### 3. Build LLM Image
@@ -24,7 +24,7 @@ Intel Gaudi optimized image hosted in huggingface repo will be used for TGI serv
### 4. Build TTS Image
```bash
docker build -t opea/speecht5-gaudi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/tts/src/integrations/dependency/speecht5/Dockerfile.intel_hpu .
docker build -t opea/speecht5-gaudi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/third_parties/speecht5/src/Dockerfile.intel_hpu .
```
### 5. Build Animation Image

View File

@@ -14,13 +14,13 @@ services:
whisper-gaudi:
build:
context: GenAIComps
dockerfile: comps/asr/src/integrations/dependency/whisper/Dockerfile.intel_hpu
dockerfile: comps/third_parties/whisper/src/Dockerfile.intel_hpu
extends: avatarchatbot
image: ${REGISTRY:-opea}/whisper-gaudi:${TAG:-latest}
whisper:
build:
context: GenAIComps
dockerfile: comps/asr/src/integrations/dependency/whisper/Dockerfile
dockerfile: comps/third_parties/whisper/src/Dockerfile
extends: avatarchatbot
image: ${REGISTRY:-opea}/whisper:${TAG:-latest}
asr:
@@ -38,13 +38,13 @@ services:
speecht5-gaudi:
build:
context: GenAIComps
dockerfile: comps/tts/src/integrations/dependency/speecht5/Dockerfile.intel_hpu
dockerfile: comps/third_parties/speecht5/src/Dockerfile.intel_hpu
extends: avatarchatbot
image: ${REGISTRY:-opea}/speecht5-gaudi:${TAG:-latest}
speecht5:
build:
context: GenAIComps
dockerfile: comps/tts/src/integrations/dependency/speecht5/Dockerfile
dockerfile: comps/third_parties/speecht5/src/Dockerfile
extends: avatarchatbot
image: ${REGISTRY:-opea}/speecht5:${TAG:-latest}
tts:

View File

@@ -28,7 +28,7 @@ cd GenAIComps
The Whisper Service converts audio files to text. Follow these steps to build and run the service:
```bash
docker build -t opea/whisper:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/asr/src/integrations/dependency/whisper/Dockerfile .
docker build -t opea/whisper:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/third_parties/whisper/src/Dockerfile .
```
### 2. Build MegaService Docker Image

View File

@@ -20,7 +20,7 @@ cd GenAIComps
The Whisper Service converts audio files to text. Follow these steps to build and run the service:
```bash
docker build -t opea/whisper:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/asr/src/integrations/dependency/whisper/Dockerfile .
docker build -t opea/whisper:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/third_parties/whisper/src/Dockerfile .
```
### 2. Build MegaService Docker Image

View File

@@ -40,7 +40,7 @@ services:
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
context: GenAIComps
dockerfile: comps/asr/src/integrations/dependency/whisper/Dockerfile
dockerfile: comps/third_parties/whisper/src/Dockerfile
extends: docsum
image: ${REGISTRY:-opea}/whisper:${TAG:-latest}
llm-docsum:

View File

@@ -39,7 +39,7 @@ docker build --no-cache -t opea/embedding:latest --build-arg https_proxy=$https_
Build lvm-llava image
```bash
docker build --no-cache -t opea/lvm-llava:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/src/integrations/dependency/llava/Dockerfile .
docker build --no-cache -t opea/lvm-llava:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/third_parties/whisper/src/Dockerfile .
```
### 3. Build retriever-multimodal-redis Image

View File

@@ -151,7 +151,7 @@ docker build --no-cache -t opea/retriever:latest --build-arg https_proxy=$https_
Build lvm-llava image
```bash
docker build --no-cache -t opea/lvm-llava:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/src/integrations/dependency/llava/Dockerfile .
docker build --no-cache -t opea/lvm-llava:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/third_parties/llava/src/Dockerfile .
```
Build lvm microservice image
@@ -171,13 +171,13 @@ docker build --no-cache -t opea/dataprep:latest --build-arg https_proxy=$https_p
Build whisper server image
```bash
docker build --no-cache -t opea/whisper:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/asr/src/integrations/dependency/whisper/Dockerfile .
docker build --no-cache -t opea/whisper:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/third_parties/whisper/src/Dockerfile .
```
### 6. Build TTS Image
```bash
docker build --no-cache -t opea/speecht5:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/tts/src/integrations/dependency/speecht5/Dockerfile .
docker build --no-cache -t opea/speecht5:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/third_parties/speecht5/src/Dockerfile .
```
### 7. Build MegaService Docker Image

View File

@@ -113,7 +113,7 @@ docker build --no-cache -t opea/dataprep:latest --build-arg https_proxy=$https_p
Build whisper server image
```bash
docker build --no-cache -t opea/whisper:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/asr/src/integrations/dependency/whisper/Dockerfile .
docker build --no-cache -t opea/whisper:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/third_parties/whisper/src/Dockerfile .
```
### 6. Build TTS Server Image
@@ -121,7 +121,7 @@ docker build --no-cache -t opea/whisper:latest --build-arg https_proxy=$https_pr
Build TTS server image
```bash
docker build --no-cache -t opea/speecht5:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/tts/src/integrations/dependency/speecht5/Dockerfile .
docker build --no-cache -t opea/speecht5:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/third_parties/speecht5/src/Dockerfile .
```
### 7. Build MegaService Docker Image

View File

@@ -44,7 +44,7 @@ services:
lvm-llava:
build:
context: GenAIComps
dockerfile: comps/lvms/src/integrations/dependency/llava/Dockerfile
dockerfile: comps/third_parties/llava/src/Dockerfile
extends: multimodalqna
image: ${REGISTRY:-opea}/lvm-llava:${TAG:-latest}
lvm:
@@ -62,13 +62,13 @@ services:
whisper:
build:
context: GenAIComps
dockerfile: comps/asr/src/integrations/dependency/whisper/Dockerfile
dockerfile: comps/third_parties/whisper/src/Dockerfile
extends: multimodalqna
image: ${REGISTRY:-opea}/whisper:${TAG:-latest}
speecht5:
build:
context: GenAIComps
dockerfile: comps/tts/src/integrations/dependency/speecht5/Dockerfile
dockerfile: comps/third_parties/speecht5/src/Dockerfile
extends: multimodalqna
image: ${REGISTRY:-opea}/speecht5:${TAG:-latest}
tts:

View File

@@ -71,7 +71,7 @@ docker build -t opea/reranking:latest --build-arg https_proxy=$https_proxy --bui
### 4. Build LVM Image (Xeon)
```bash
docker build -t opea/lvm-video-llama:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/src/integrations/dependency/video-llama/Dockerfile .
docker build -t opea/lvm-video-llama:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/third_parties/video-llama/src/Dockerfile .
# LVM Service Image
docker build -t opea/lvm:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/src/Dockerfile .

View File

@@ -44,7 +44,7 @@ services:
lvm-video-llama:
build:
context: GenAIComps
dockerfile: comps/lvms/src/integrations/dependency/video-llama/Dockerfile
dockerfile: comps/third_parties/video-llama/src/Dockerfile
extends: videoqna
image: ${REGISTRY:-opea}/lvm-video-llama:${TAG:-latest}
lvm:

View File

@@ -45,66 +45,66 @@ Take ChatQnA for example. ChatQnA is a chatbot application service based on the
## Microservice images
| Microservice Images | Dockerfile | Description | Readme |
| ------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- |
| [opea/agent](https://hub.docker.com/r/opea/agent) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/agent/src/Dockerfile) | The docker image exposed the OPEA agent microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/agent/src/README.md) |
| [opea/agent-ui](https://hub.docker.com/r/opea/agent-ui) | [Link](https://github.com/opea-project/GenAIExamples/blob/main/AgentQnA/ui/docker/Dockerfile) | The docker image exposed the OPEA agent microservice UI entry for GenAI application use | [Link](https://github.com/opea-project/GenAIExamples/blob/main/AgentQnA/README.md) |
| [opea/asr](https://hub.docker.com/r/opea/asr) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/asr/src/Dockerfile) | The docker image exposed the OPEA Audio-Speech-Recognition microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/asr/src/README.md) |
| [opea/animation](https://hub.docker.com/r/opea/animation) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/animation/src/Dockerfile) | The purpose of the Docker image is to expose the OPEA Avatar Animation microservice for GenAI application use. | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/animation/src/README.md) |
| [opea/chathistory-mongo](https://hub.docker.com/r/opea/chathistory-mongo) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/chathistory/src/Dockerfile) | The docker image exposes OPEA Chat History microservice which based on MongoDB database, designed to allow user to store, retrieve and manage chat conversations | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/chathistory/src/README.md) |
| [opea/dataprep](https://hub.docker.com/r/opea/dataprep) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/src/Dockerfile) | The docker image exposed the OPEA dataprep microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/README.md) |
| [opea/embedding](https://hub.docker.com/r/opea/embedding) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/src/Dockerfile) | The docker image exposed the OPEA mosec embedding microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/src/README.md) |
| [opea/embedding-multimodal-clip](https://hub.docker.com/r/opea/embedding-multimodal-clip) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/clip/src/Dockerfile) | The docker image exposed the OPEA mosec embedding microservice base on Langchain framework for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/clip/src/README.md) |
| [opea/embedding-multimodal-bridgetower](https://hub.docker.com/r/opea/embedding-multimodal-bridgetower) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/bridgetower/src/Dockerfile) | The docker image exposes OPEA multimodal embedded microservices based on bridgetower for use by GenAI applications | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/bridgetower/src/README.md) |
| [opea/embedding-multimodal-bridgetower-gaudi](https://hub.docker.com/r/opea/embedding-multimodal-bridgetower-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/bridgetower/src/Dockerfile.intel_hpu) | The docker image exposes OPEA multimodal embedded microservices based on bridgetower for use by GenAI applications on the Gaudi | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/bridgetower/src/README.md) |
| [opea/feedbackmanagement-mongo](https://hub.docker.com/r/opea/feedbackmanagement-mongo) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/feedback_management/src/Dockerfile) | The docker image exposes that the OPEA feedback management microservice uses a MongoDB database for GenAI applications. | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/feedback_management/src/README.md) |
| [opea/finetuning](https://hub.docker.com/r/opea/finetuning) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/finetuning/src/Dockerfile) | The docker image exposed the OPEA Fine-tuning microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/finetuning/src/README.md) |
| [opea/finetuning-gaudi](https://hub.docker.com/r/opea/finetuning-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/finetuning/src/Dockerfile.intel_hpu) | The docker image exposed the OPEA Fine-tuning microservice for GenAI application use on the Gaudi | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/finetuning/src/README.md) |
| [opea/gpt-sovits](https://hub.docker.com/r/opea/gpt-sovits) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/tts/src/integrations/dependency/gpt-sovits/Dockerfile) | The docker image exposed the OPEA GPT-SoVITS service for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/tts/src/integrations/dependency/gpt-sovits/README.md) |
| [opea/guardrails](https://hub.docker.com/r/opea/guardrails) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/guardrails/Dockerfile) | The docker image exposed the OPEA guardrail microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/guardrails/README.md) |
| [opea/guardrails-toxicity-predictionguard](https://hub.docker.com/r/opea/guardrails-toxicity-predictionguard) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/toxicity_detection/Dockerfile) | The docker image exposed the OPEA guardrail microservice to provide toxicity detection for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/toxicity_detection/README.md) |
| [opea/guardrails-pii-predictionguard](https://hub.docker.com/r/opea/guardrails-pii-predictionguard) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/pii_detection/Dockerfile) | The docker image exposed the OPEA guardrail microservice to provide PII detection for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/pii_detection/README.md) |
| [opea/guardrails-injection-predictionguard](https://hub.docker.com/r/opea/guardrails-injection-predictionguard) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/prompt_injection/Dockerfile) | The docker image exposed the OPEA guardrail microservice to provide injection predictionguard for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/prompt_injection/README.md) |
| [opea/guardrails-hallucination-detection](https://hub.docker.com/r/opea/guardrails-hallucination-detection) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/hallucination_detection/Dockerfile) | The docker image exposed the OPEA guardrail microservice to provide hallucination detection for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/hallucination_detection/README.md) |
| [opea/guardrails-factuality-predictionguard](https://hub.docker.com/r/opea/guardrails-factuality-predictionguard) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/factuality_alignment/Dockerfile) | The docker image exposed the OPEA guardrail microservice to provide factuality predictionguard for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/factuality_alignment/README.md) |
| [opea/guardrails-bias-detection](https://hub.docker.com/r/opea/guardrails-bias-detection) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/bias_detection/Dockerfile) | The docker image exposed the OPEA guardrail microservice to provide bias detection for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/bias_detection/README.md) |
| [opea/image2image-gaudi](https://hub.docker.com/r/opea/image2image-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/image2image/src/Dockerfile.intel_hpu) | The purpose of the Docker image is to expose the OPEA Image-to-Image microservice for GenAI application use on the Gaudi. | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/image2image/src/README.md) |
| [opea/image2image](https://hub.docker.com/r/opea/image2image) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/image2image/src/Dockerfile) | The purpose of the Docker image is to expose the OPEA Image-to-Image microservice for GenAI application use. | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/image2image/src/README.md) |
| [opea/image2video-gaudi](https://hub.docker.com/r/opea/image2video-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/image2video/src/Dockerfile.intel_hpu) | The purpose of the Docker image is to expose the OPEA image-to-video microservice for GenAI application use on the Gaudi. | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/image2video/src/README.md) |
| [opea/image2video](https://hub.docker.com/r/opea/image2video) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/image2video/src/Dockerfile) | The purpose of the Docker image is to expose the OPEA image-to-video microservice for GenAI application use. | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/image2video/src/README.md) |
| [opea/llm-textgen](https://hub.docker.com/r/opea/llm-textgen) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/text-generation/Dockerfile) | The docker image exposed the OPEA LLM microservice upon textgen docker image for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/text-generation/README.md) |
| [opea/llm-textgen-gaudi](https://hub.docker.com/r/opea/llm-textgen-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/text-generation/Dockerfile.intel_hpu) | The docker image exposed the OPEA LLM microservice upon textgen docker image for GenAI application use on the Gaudi2 | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/text-generation/README.md) |
| [opea/llm-eval](https://hub.docker.com/r/opea/llm-eval) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/utils/lm-eval/Dockerfile) | The docker image exposed the OPEA LLM microservice upon eval docker image for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/utils/lm-eval/README.md) |
| [opea/llm-docsum](https://hub.docker.com/r/opea/llm-docsum) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/doc-summarization/Dockerfile) | The docker image exposed the OPEA LLM microservice upon docsum docker image for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/doc-summarization/README.md) |
| [opea/llm-faqgen](https://hub.docker.com/r/opea/llm-faqgen) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/faq-generation/Dockerfile) | This docker image is designed to build a frequently asked questions microservice using the HuggingFace Text Generation Inference(TGI) framework. The microservice accepts document input and generates a FAQ. | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/faq-generation/README.md) |
| [opea/lvm](https://hub.docker.com/r/opea/lvm) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/src/Dockerfile) | The docker image exposed the OPEA large visual model (LVM) microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/src/README.md) |
| [opea/lvm-llava](https://hub.docker.com/r/opea/lvm-llava) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/src/integrations/dependency/llava/Dockerfile) | The docker image exposed the OPEA microservice running LLaVA as a large visual model (LVM) server for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/src/integrations/dependency/llava/README.md) |
| [opea/lvm-video-llama](https://hub.docker.com/r/opea/lvm-video-llama) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/src/integrations/dependency/video-llama/Dockerfile) | The docker image exposed the OPEA microservice running Video-Llama as a large visual model (LVM) for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/src/integrations/dependency/video-llama/README.md) |
| [opea/lvm-predictionguard](https://hub.docker.com/r/opea/lvm-predictionguard) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/src/integrations/dependency/predictionguard/Dockerfile) | The docker image exposed the OPEA microservice running predictionguard as a large visual model (LVM) server for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/src/integrations/dependency/predictionguard/README.md) |
| [opea/llava-gaudi](https://hub.docker.com/r/opea/llava-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/src/integrations/dependency/llava/Dockerfile.intel_hpu) | The docker image exposed the OPEA microservice running LLaVA as a large visual model (LVM) service for GenAI application use on the Gaudi2 | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/src/integrations/dependency/llava/README.md) |
| [opea/lvm-llama-vision](https://hub.docker.com/r/opea/lvm-llama-vision) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/src/integrations/dependency/llama-vision/Dockerfile) | The docker image exposed the OPEA microservice running Llama Vision as the base large visual model service for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/src/integrations/dependency/llama-vision/README.md) |
| [opea/lvm-llama-vision-tp](https://hub.docker.com/r/opea/lvm-llama-vision-tp) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/src/integrations/dependency/llama-vision/Dockerfile.tp) | The docker image exposed the OPEA microservice running Llama Vision with deepspeed as the base large visual model service for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/src/integrations/dependency/llama-vision/README.md) |
| [opea/lvm-llama-vision-guard](https://hub.docker.com/r/opea/lvm-llama-vision-guard) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/src/integrations/dependency/llama-vision/Dockerfile.guard) | The docker image exposed the OPEA microservice running Llama Vision Guard as the base large visual model service for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/src/integrations/dependency/llama-vision/README.md) |
| [opea/promptregistry-mongo](https://hub.docker.com/r/opea/promptregistry-mongo) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/prompt_registry/src/Dockerfile) | The docker image exposes the OPEA Prompt Registry microservices which based on MongoDB database, designed to store and retrieve user's preferred prompts | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/prompt_registry/src/README.md) |
| [opea/reranking](https://hub.docker.com/r/opea/reranking) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/rerankings/src/Dockerfile) | The docker image exposed the OPEA reranking microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/rerankings/src/README.md) |
| [opea/retriever](https://hub.docker.com/r/opea/retriever) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/retrievers/src/Dockerfile) | The docker image exposed the OPEA retrieval microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/retrievers/README.md) |
| [opea/text2image](https://hub.docker.com/r/opea/text2image) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/text2image/src/Dockerfile) | The docker image exposed the OPEA text-to-image microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/text2image/src/README.md) |
| [opea/text2image-gaudi](https://hub.docker.com/r/opea/text2image-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/text2image/src/Dockerfile.intel_hpu) | The docker image exposed the OPEA text-to-image microservice for GenAI application use on the Gaudi | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/text2image/src/README.md) |
| [opea/text2image-ui](https://hub.docker.com/r/opea/text2image-ui) | [Link](https://github.com/opea-project/GenAIExamples/blob/main/Text2Image/ui/docker/Dockerfile) | The docker image exposed the OPEA text-to-image microservice UI entry for GenAI application use | [Link](https://github.com/opea-project/GenAIExamples/blob/main/Text2Image/README.md) |
| [opea/text2sql](https://hub.docker.com/r/opea/text2sql) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/text2sql/src/Dockerfile) | The docker image exposed the OPEA text to Structured Query Language microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/text2sql/src/README.md) |
| [opea/text2sql-react-ui](https://hub.docker.com/r/opea/text2sql-react-ui) | [Link](https://github.com/opea-project/GenAIExamples/blob/main/DBQnA/ui/docker/Dockerfile.react) | The docker image exposed the OPEA text to Structured Query Language microservice react UI entry for GenAI application use | [Link](https://github.com/opea-project/GenAIExamples/blob/main/DBQnA/README.md) |
| [opea/tts](https://hub.docker.com/r/opea/tts) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/tts/src/Dockerfile) | The docker image exposed the OPEA Text-To-Speech microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/tts/src/README.md) |
| [opea/speecht5](https://hub.docker.com/r/opea/speecht5) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/tts/src/integrations/dependency/speecht5/Dockerfile) | The docker image exposed the OPEA SpeechT5 service for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/tts/src/README.md) |
| [opea/speecht5-gaudi](https://hub.docker.com/r/opea/speecht5-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/tts/src/integrations/dependency/speecht5/Dockerfile.intel_hpu) | The docker image exposed the OPEA SpeechT5 service on Gaudi2 for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/tts/src/README.md) |
| [opea/gpt-sovits](https://hub.docker.com/r/opea/gpt-sovits) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/tts/src/integrations/dependency/gpt-sovits/Dockerfile) | The docker image exposed the OPEA gpt-sovits service for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/tts/src/integrations/dependency/gpt-sovits/README.md) |
| [opea/nginx](https://hub.docker.com/r/opea/nginx) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/nginx/src/Dockerfile) | The docker image exposed the OPEA nginx microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/nginx/deployment/kubernetes/README.md) |
| [opea/vectorstore-pathway](https://hub.docker.com/r/opea/vectorstore-pathway) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/pathway/src/Dockerfile) | The docker image exposed the OPEA Vectorstores microservice with Pathway for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/pathway/src/README.md) |
| [opea/wav2lip](https://hub.docker.com/r/opea/wav2lip) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/wav2lip/src/Dockerfile) | The docker image exposed the OPEA Generate lip movements from audio files microservice with Pathway for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/wav2lip/deployment/kubernetes/README.md) |
| [opea/wav2lip-gaudi](https://hub.docker.com/r/opea/wav2lip-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/wav2lip/src/Dockerfile.intel_hpu) | The docker image exposed the OPEA Generate lip movements from audio files microservice with Pathway for GenAI application use on the Gaudi2 | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/wav2lip/deployment/kubernetes/README.md) |
| [opea/vllm-arc](https://hub.docker.com/r/opea/vllm-arc) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/vllm/src/Dockerfile.intel_gpu) | The docker image powered by vllm-project for deploying and serving vllm Models on Arc | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/vllm/README.md) |
| [opea/vllm-openvino](https://hub.docker.com/r/opea/vllm-openvino) | [Link](https://github.com/vllm-project/vllm/blob/v0.6.1/Dockerfile.openvino) | The docker image powered by vllm-project for deploying and serving vllm Models of the Openvino Framework | [Link](https://github.com/vllm-project/vllm/blob/main/README.md) |
| [opea/vllm-gaudi](https://hub.docker.com/r/opea/vllm-gaudi) | [Link](https://github.com/HabanaAI/vllm-fork/blob/habana_main/Dockerfile.hpu) | The docker image powered by vllm-project for deploying and serving vllm Models on Gaudi2 | [Link](https://github.com/HabanaAI/vllm-fork/blob/habana_main/README.md) |
| [opea/vllm](https://hub.docker.com/r/opea/vllm) | [Link](https://github.com/HabanaAI/vllm-fork/blob/habana_main/Dockerfile.cpu) | The docker image powered by vllm-project for deploying and serving vllm Models | [Link](https://github.com/HabanaAI/vllm-fork/blob/habana_main/README.md) |
| [opea/whisper-gaudi](https://hub.docker.com/r/opea/whisper-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/asr/src/integrations/dependency/whisper/Dockerfile.intel_hpu) | The docker image exposed the OPEA Whisper service on Gaudi2 for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/asr/src/README.md) |
| [opea/whisper](https://hub.docker.com/r/opea/whisper) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/asr/src/integrations/dependency/whisper/Dockerfile) | The docker image exposed the OPEA Whisper service for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/asr/src/README.md) |
| [opea/web-retriever](https://hub.docker.com/r/opea/web-retriever) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/web_retrievers/src/Dockerfile) | The docker image exposed the OPEA retrieval microservice based on chroma vectordb for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/web_retrievers/src/README.md) |
| Microservice Images | Dockerfile | Description | Readme |
| ------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ |
| [opea/agent](https://hub.docker.com/r/opea/agent) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/agent/src/Dockerfile) | The docker image exposed the OPEA agent microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/agent/src/README.md) |
| [opea/agent-ui](https://hub.docker.com/r/opea/agent-ui) | [Link](https://github.com/opea-project/GenAIExamples/blob/main/AgentQnA/ui/docker/Dockerfile) | The docker image exposed the OPEA agent microservice UI entry for GenAI application use | [Link](https://github.com/opea-project/GenAIExamples/blob/main/AgentQnA/README.md) |
| [opea/asr](https://hub.docker.com/r/opea/asr) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/asr/src/Dockerfile) | The docker image exposed the OPEA Audio-Speech-Recognition microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/asr/src/README.md) |
| [opea/animation](https://hub.docker.com/r/opea/animation) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/animation/src/Dockerfile) | The purpose of the Docker image is to expose the OPEA Avatar Animation microservice for GenAI application use. | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/animation/src/README.md) |
| [opea/chathistory-mongo](https://hub.docker.com/r/opea/chathistory-mongo) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/chathistory/src/Dockerfile) | The docker image exposes OPEA Chat History microservice which based on MongoDB database, designed to allow user to store, retrieve and manage chat conversations | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/chathistory/src/README.md) |
| [opea/dataprep](https://hub.docker.com/r/opea/dataprep) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/src/Dockerfile) | The docker image exposed the OPEA dataprep microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/README.md) |
| [opea/embedding](https://hub.docker.com/r/opea/embedding) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/src/Dockerfile) | The docker image exposed the OPEA mosec embedding microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/src/README.md) |
| [opea/embedding-multimodal-clip](https://hub.docker.com/r/opea/embedding-multimodal-clip) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/clip/src/Dockerfile) | The docker image exposed the OPEA mosec embedding microservice base on Langchain framework for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/clip/src/README.md) |
| [opea/embedding-multimodal-bridgetower](https://hub.docker.com/r/opea/embedding-multimodal-bridgetower) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/bridgetower/src/Dockerfile) | The docker image exposes OPEA multimodal embedded microservices based on bridgetower for use by GenAI applications | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/bridgetower/src/README.md) |
| [opea/embedding-multimodal-bridgetower-gaudi](https://hub.docker.com/r/opea/embedding-multimodal-bridgetower-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/bridgetower/src/Dockerfile.intel_hpu) | The docker image exposes OPEA multimodal embedded microservices based on bridgetower for use by GenAI applications on the Gaudi | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/bridgetower/src/README.md) |
| [opea/feedbackmanagement-mongo](https://hub.docker.com/r/opea/feedbackmanagement-mongo) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/feedback_management/src/Dockerfile) | The docker image exposes that the OPEA feedback management microservice uses a MongoDB database for GenAI applications. | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/feedback_management/src/README.md) |
| [opea/finetuning](https://hub.docker.com/r/opea/finetuning) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/finetuning/src/Dockerfile) | The docker image exposed the OPEA Fine-tuning microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/finetuning/src/README.md) |
| [opea/finetuning-gaudi](https://hub.docker.com/r/opea/finetuning-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/finetuning/src/Dockerfile.intel_hpu) | The docker image exposed the OPEA Fine-tuning microservice for GenAI application use on the Gaudi | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/finetuning/src/README.md) |
| [opea/gpt-sovits](https://hub.docker.com/r/opea/gpt-sovits) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/gpt-sovits/src/Dockerfile) | The docker image exposed the OPEA GPT-SoVITS service for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/gpt-sovits/src/README.md) |
| [opea/guardrails](https://hub.docker.com/r/opea/guardrails) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/guardrails/Dockerfile) | The docker image exposed the OPEA guardrail microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/guardrails/README.md) |
| [opea/guardrails-toxicity-predictionguard](https://hub.docker.com/r/opea/guardrails-toxicity-predictionguard) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/toxicity_detection/Dockerfile) | The docker image exposed the OPEA guardrail microservice to provide toxicity detection for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/toxicity_detection/README.md) |
| [opea/guardrails-pii-predictionguard](https://hub.docker.com/r/opea/guardrails-pii-predictionguard) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/pii_detection/Dockerfile) | The docker image exposed the OPEA guardrail microservice to provide PII detection for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/pii_detection/README.md) |
| [opea/guardrails-injection-predictionguard](https://hub.docker.com/r/opea/guardrails-injection-predictionguard) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/prompt_injection/Dockerfile) | The docker image exposed the OPEA guardrail microservice to provide injection predictionguard for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/prompt_injection/README.md) |
| [opea/guardrails-hallucination-detection](https://hub.docker.com/r/opea/guardrails-hallucination-detection) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/hallucination_detection/Dockerfile) | The docker image exposed the OPEA guardrail microservice to provide hallucination detection for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/hallucination_detection/README.md) |
| [opea/guardrails-factuality-predictionguard](https://hub.docker.com/r/opea/guardrails-factuality-predictionguard) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/factuality_alignment/Dockerfile) | The docker image exposed the OPEA guardrail microservice to provide factuality predictionguard for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/factuality_alignment/README.md) |
| [opea/guardrails-bias-detection](https://hub.docker.com/r/opea/guardrails-bias-detection) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/bias_detection/Dockerfile) | The docker image exposed the OPEA guardrail microservice to provide bias detection for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/guardrails/src/bias_detection/README.md) |
| [opea/image2image-gaudi](https://hub.docker.com/r/opea/image2image-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/image2image/src/Dockerfile.intel_hpu) | The purpose of the Docker image is to expose the OPEA Image-to-Image microservice for GenAI application use on the Gaudi. | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/image2image/src/README.md) |
| [opea/image2image](https://hub.docker.com/r/opea/image2image) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/image2image/src/Dockerfile) | The purpose of the Docker image is to expose the OPEA Image-to-Image microservice for GenAI application use. | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/image2image/src/README.md) |
| [opea/image2video-gaudi](https://hub.docker.com/r/opea/image2video-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/image2video/src/Dockerfile.intel_hpu) | The purpose of the Docker image is to expose the OPEA image-to-video microservice for GenAI application use on the Gaudi. | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/image2video/src/README.md) |
| [opea/image2video](https://hub.docker.com/r/opea/image2video) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/image2video/src/Dockerfile) | The purpose of the Docker image is to expose the OPEA image-to-video microservice for GenAI application use. | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/image2video/src/README.md) |
| [opea/llm-textgen](https://hub.docker.com/r/opea/llm-textgen) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/text-generation/Dockerfile) | The docker image exposed the OPEA LLM microservice upon textgen docker image for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/text-generation/README.md) |
| [opea/llm-textgen-gaudi](https://hub.docker.com/r/opea/llm-textgen-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/text-generation/Dockerfile.intel_hpu) | The docker image exposed the OPEA LLM microservice upon textgen docker image for GenAI application use on the Gaudi2 | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/text-generation/README.md) |
| [opea/llm-eval](https://hub.docker.com/r/opea/llm-eval) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/utils/lm-eval/Dockerfile) | The docker image exposed the OPEA LLM microservice upon eval docker image for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/utils/lm-eval/README.md) |
| [opea/llm-docsum](https://hub.docker.com/r/opea/llm-docsum) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/doc-summarization/Dockerfile) | The docker image exposed the OPEA LLM microservice upon docsum docker image for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/doc-summarization/README.md) |
| [opea/llm-faqgen](https://hub.docker.com/r/opea/llm-faqgen) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/faq-generation/Dockerfile) | This docker image is designed to build a frequently asked questions microservice using the HuggingFace Text Generation Inference(TGI) framework. The microservice accepts document input and generates a FAQ. | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/faq-generation/README.md) |
| [opea/lvm](https://hub.docker.com/r/opea/lvm) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/src/Dockerfile) | The docker image exposed the OPEA large visual model (LVM) microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/src/README.md) |
| [opea/lvm-llava](https://hub.docker.com/r/opea/lvm-llava) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/llava/src/Dockerfile) | The docker image exposed the OPEA microservice running LLaVA as a large visual model (LVM) server for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/llava/src/README.md) |
| [opea/lvm-video-llama](https://hub.docker.com/r/opea/lvm-video-llama) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/video-llama/src/Dockerfile) | The docker image exposed the OPEA microservice running Video-Llama as a large visual model (LVM) for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/video-llama/src/README.md) |
| [opea/lvm-predictionguard](https://hub.docker.com/r/opea/lvm-predictionguard) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/predictionguard/src/Dockerfile) | The docker image exposed the OPEA microservice running predictionguard as a large visual model (LVM) server for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/predictionguard/src/README.md) |
| [opea/lvm-llava-gaudi]() | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/llava/src/Dockerfile.intel_hpu) | The docker image exposed the OPEA microservice running LLaVA as a large visual model (LVM) service for GenAI application use on the Gaudi2 | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/llava/src/README.md) |
| [opea/lvm-llama-vision](https://hub.docker.com/r/opea/lvm-llama-vision) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/llama-vision/src/Dockerfile) | The docker image exposed the OPEA microservice running Llama Vision as the base large visual model service for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/llama-vision/src/README.md) |
| [opea/lvm-llama-vision-tp](https://hub.docker.com/r/opea/lvm-llama-vision-tp) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/llama-vision/src/Dockerfile.tp) | The docker image exposed the OPEA microservice running Llama Vision with deepspeed as the base large visual model service for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/llama-vision/src/README.md) |
| [opea/lvm-llama-vision-guard](https://hub.docker.com/r/opea/lvm-llama-vision-guard) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/llama-vision/src/Dockerfile.guard) | The docker image exposed the OPEA microservice running Llama Vision Guard as the base large visual model service for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/llama-vision/src/README.md) |
| [opea/promptregistry-mongo](https://hub.docker.com/r/opea/promptregistry-mongo) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/prompt_registry/src/Dockerfile) | The docker image exposes the OPEA Prompt Registry microservices which based on MongoDB database, designed to store and retrieve user's preferred prompts | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/prompt_registry/src/README.md) |
| [opea/reranking](https://hub.docker.com/r/opea/reranking) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/rerankings/src/Dockerfile) | The docker image exposed the OPEA reranking microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/rerankings/src/README.md) |
| [opea/retriever](https://hub.docker.com/r/opea/retriever) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/retrievers/src/Dockerfile) | The docker image exposed the OPEA retrieval microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/retrievers/README.md) |
| [opea/text2image](https://hub.docker.com/r/opea/text2image) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/text2image/src/Dockerfile) | The docker image exposed the OPEA text-to-image microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/text2image/src/README.md) |
| [opea/text2image-gaudi](https://hub.docker.com/r/opea/text2image-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/text2image/src/Dockerfile.intel_hpu) | The docker image exposed the OPEA text-to-image microservice for GenAI application use on the Gaudi | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/text2image/src/README.md) |
| [opea/text2image-ui](https://hub.docker.com/r/opea/text2image-ui) | [Link](https://github.com/opea-project/GenAIExamples/blob/main/Text2Image/ui/docker/Dockerfile) | The docker image exposed the OPEA text-to-image microservice UI entry for GenAI application use | [Link](https://github.com/opea-project/GenAIExamples/blob/main/Text2Image/README.md) |
| [opea/text2sql](https://hub.docker.com/r/opea/text2sql) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/text2sql/src/Dockerfile) | The docker image exposed the OPEA text to Structured Query Language microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/text2sql/src/README.md) |
| [opea/text2sql-react-ui](https://hub.docker.com/r/opea/text2sql-react-ui) | [Link](https://github.com/opea-project/GenAIExamples/blob/main/DBQnA/ui/docker/Dockerfile.react) | The docker image exposed the OPEA text to Structured Query Language microservice react UI entry for GenAI application use | [Link](https://github.com/opea-project/GenAIExamples/blob/main/DBQnA/README.md) |
| [opea/tts](https://hub.docker.com/r/opea/tts) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/tts/src/Dockerfile) | The docker image exposed the OPEA Text-To-Speech microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/tts/src/README.md) |
| [opea/speecht5](https://hub.docker.com/r/opea/speecht5) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/speecht5/src/Dockerfile) | The docker image exposed the OPEA SpeechT5 service for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/tts/src/README.md) |
| [opea/speecht5-gaudi](https://hub.docker.com/r/opea/speecht5-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/speecht5/src/Dockerfile.intel_hpu) | The docker image exposed the OPEA SpeechT5 service on Gaudi2 for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/tts/src/README.md) |
| [opea/gpt-sovits](https://hub.docker.com/r/opea/gpt-sovits) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/gpt-sovits/src/Dockerfile) | The docker image exposed the OPEA gpt-sovits service for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/gpt-sovits/src/README.md) |
| [opea/nginx](https://hub.docker.com/r/opea/nginx) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/nginx/src/Dockerfile) | The docker image exposed the OPEA nginx microservice for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/nginx/deployment/kubernetes/README.md) |
| [opea/vectorstore-pathway](https://hub.docker.com/r/opea/vectorstore-pathway) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/pathway/src/Dockerfile) | The docker image exposed the OPEA Vectorstores microservice with Pathway for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/pathway/src/README.md) |
| [opea/wav2lip](https://hub.docker.com/r/opea/wav2lip) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/wav2lip/src/Dockerfile) | The docker image exposed the OPEA Generate lip movements from audio files microservice with Pathway for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/wav2lip/deployment/kubernetes/README.md) |
| [opea/wav2lip-gaudi](https://hub.docker.com/r/opea/wav2lip-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/wav2lip/src/Dockerfile.intel_hpu) | The docker image exposed the OPEA Generate lip movements from audio files microservice with Pathway for GenAI application use on the Gaudi2 | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/wav2lip/deployment/kubernetes/README.md) |
| [opea/vllm-arc](https://hub.docker.com/r/opea/vllm-arc) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/vllm/src/Dockerfile.intel_gpu) | The docker image powered by vllm-project for deploying and serving vllm Models on Arc | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/vllm/README.md) |
| [opea/vllm-openvino](https://hub.docker.com/r/opea/vllm-openvino) | [Link](https://github.com/vllm-project/vllm/blob/v0.6.1/Dockerfile.openvino) | The docker image powered by vllm-project for deploying and serving vllm Models of the Openvino Framework | [Link](https://github.com/vllm-project/vllm/blob/main/README.md) |
| [opea/vllm-gaudi](https://hub.docker.com/r/opea/vllm-gaudi) | [Link](https://github.com/HabanaAI/vllm-fork/blob/habana_main/Dockerfile.hpu) | The docker image powered by vllm-project for deploying and serving vllm Models on Gaudi2 | [Link](https://github.com/HabanaAI/vllm-fork/blob/habana_main/README.md) |
| [opea/vllm](https://hub.docker.com/r/opea/vllm) | [Link](https://github.com/HabanaAI/vllm-fork/blob/habana_main/Dockerfile.cpu) | The docker image powered by vllm-project for deploying and serving vllm Models | [Link](https://github.com/HabanaAI/vllm-fork/blob/habana_main/README.md) |
| [opea/whisper-gaudi](https://hub.docker.com/r/opea/whisper-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/whisper/src/Dockerfile.intel_hpu) | The docker image exposed the OPEA Whisper service on Gaudi2 for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/asr/src/README.md) |
| [opea/whisper](https://hub.docker.com/r/opea/whisper) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/whisper/src/Dockerfile) | The docker image exposed the OPEA Whisper service for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/asr/src/README.md) |
| [opea/web-retriever](https://hub.docker.com/r/opea/web-retriever) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/web_retrievers/src/Dockerfile) | The docker image exposed the OPEA retrieval microservice based on chroma vectordb for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/web_retrievers/src/README.md) |