Rename docker image name (#35)
Signed-off-by: lvliang-intel <liang1.lv@intel.com>
This commit is contained in:
@@ -26,13 +26,13 @@ The other way is to start the ASR microservice with Docker.
|
||||
|
||||
```bash
|
||||
cd ../../
|
||||
docker build -t intel/gen-ai-comps:asr --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/asr/Dockerfile .
|
||||
docker build -t opea/gen-ai-comps:asr --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/asr/Dockerfile .
|
||||
```
|
||||
|
||||
## Run Docker with CLI
|
||||
|
||||
```bash
|
||||
docker run -p 9099:9099 --network=host --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy intel/gen-ai-comps:asr
|
||||
docker run -p 9099:9099 --network=host --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy opea/gen-ai-comps:asr
|
||||
```
|
||||
|
||||
# Test
|
||||
|
||||
@@ -70,13 +70,13 @@ python embedding_tei_gaudi.py
|
||||
|
||||
```bash
|
||||
cd ../../
|
||||
docker build -t intel/gen-ai-comps:embedding-tei-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/docker/Dockerfile .
|
||||
docker build -t opea/gen-ai-comps:embedding-tei-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/docker/Dockerfile .
|
||||
```
|
||||
|
||||
## Run Docker with CLI
|
||||
|
||||
```bash
|
||||
docker run -d --name="embedding-tei-server" -p 6000:6000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e TEI_EMBEDDING_ENDPOINT=$TEI_EMBEDDING_ENDPOINT intel/gen-ai-comps:embedding-tei-server
|
||||
docker run -d --name="embedding-tei-server" -p 6000:6000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e TEI_EMBEDDING_ENDPOINT=$TEI_EMBEDDING_ENDPOINT opea/gen-ai-comps:embedding-tei-server
|
||||
```
|
||||
|
||||
## Run Docker with Docker Compose
|
||||
|
||||
@@ -16,7 +16,7 @@ version: "3.8"
|
||||
|
||||
services:
|
||||
embedding:
|
||||
image: intel/gen-ai-comps:embedding-tei-server
|
||||
image: opea/gen-ai-comps:embedding-tei-server
|
||||
container_name: embedding-tei-server
|
||||
ports:
|
||||
- "6000:6000"
|
||||
|
||||
@@ -74,13 +74,13 @@ export LLM_MODEL_ID=${your_hf_llm_model}
|
||||
|
||||
```bash
|
||||
cd ../../
|
||||
docker build -t intel/gen-ai-comps:guardrails-tgi-gaudi-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/guardrails/langchain/docker/Dockerfile .
|
||||
docker build -t opea/gen-ai-comps:guardrails-tgi-gaudi-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/guardrails/langchain/docker/Dockerfile .
|
||||
```
|
||||
|
||||
## Run Docker with CLI
|
||||
|
||||
```bash
|
||||
docker run -d --name="guardrails-tgi-server" -p 9090:9090 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e SAFETY_GUARD_ENDPOINT=$SAFETY_GUARD_ENDPOINT -e HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN intel/gen-ai-comps:guardrails-tgi-gauid-server
|
||||
docker run -d --name="guardrails-tgi-server" -p 9090:9090 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e SAFETY_GUARD_ENDPOINT=$SAFETY_GUARD_ENDPOINT -e HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN opea/gen-ai-comps:guardrails-tgi-gauid-server
|
||||
```
|
||||
|
||||
## Run Docker with Docker Compose
|
||||
|
||||
@@ -25,7 +25,7 @@ services:
|
||||
shm_size: 1g
|
||||
command: --model-id ${LLM_MODEL_ID}
|
||||
guardrails:
|
||||
image: intel/gen-ai-comps:guardrails-tgi-gaudi-server
|
||||
image: opea/gen-ai-comps:guardrails-tgi-gaudi-server
|
||||
container_name: guardrails-tgi-gaudi-server
|
||||
ports:
|
||||
- "9090:9090"
|
||||
|
||||
@@ -57,13 +57,13 @@ export LLM_MODEL_ID=${your_hf_llm_model}
|
||||
|
||||
```bash
|
||||
cd ../../
|
||||
docker build -t intel/gen-ai-comps:llm-tgi-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/langchain/docker/Dockerfile .
|
||||
docker build -t opea/gen-ai-comps:llm-tgi-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/langchain/docker/Dockerfile .
|
||||
```
|
||||
|
||||
## Run Docker with CLI
|
||||
|
||||
```bash
|
||||
docker run -d --name="llm-tgi-server" -p 9000:9000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e TGI_LLM_ENDPOINT=$TGI_LLM_ENDPOINT -e HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN intel/gen-ai-comps:llm-tgi-server
|
||||
docker run -d --name="llm-tgi-server" -p 9000:9000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e TGI_LLM_ENDPOINT=$TGI_LLM_ENDPOINT -e HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN opea/gen-ai-comps:llm-tgi-server
|
||||
```
|
||||
|
||||
## Run Docker with Docker Compose
|
||||
|
||||
@@ -25,7 +25,7 @@ services:
|
||||
shm_size: 1g
|
||||
command: --model-id ${LLM_MODEL_ID}
|
||||
llm:
|
||||
image: intel/gen-ai-comps:llm-tgi-server
|
||||
image: opea/gen-ai-comps:llm-tgi-server
|
||||
container_name: llm-tgi-server
|
||||
ports:
|
||||
- "9000:9000"
|
||||
|
||||
@@ -48,13 +48,13 @@ If you start an Reranking microservice with docker, the `docker_compose_rerankin
|
||||
|
||||
```bash
|
||||
cd ../../
|
||||
docker build -t intel/gen-ai-comps:reranking-tei-xeon-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/reranks/docker/Dockerfile .
|
||||
docker build -t opea/gen-ai-comps:reranking-tei-xeon-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/reranks/docker/Dockerfile .
|
||||
```
|
||||
|
||||
## Run Docker with CLI
|
||||
|
||||
```bash
|
||||
docker run -d --name="reranking-tei-server" -p 8000:8000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e TEI_RERANKING_ENDPOINT=$TEI_RERANKING_ENDPOINT -e HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN intel/gen-ai-comps:reranking-tei-xeon-server
|
||||
docker run -d --name="reranking-tei-server" -p 8000:8000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e TEI_RERANKING_ENDPOINT=$TEI_RERANKING_ENDPOINT -e HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN opea/gen-ai-comps:reranking-tei-xeon-server
|
||||
```
|
||||
|
||||
## Run Docker with Docker Compose
|
||||
|
||||
@@ -25,7 +25,7 @@ services:
|
||||
shm_size: 1g
|
||||
command: --model-id ${RERANK_MODEL_ID}
|
||||
reranking:
|
||||
image: intel/gen-ai-comps:reranking-tei-xeon-server
|
||||
image: opea/gen-ai-comps:reranking-tei-xeon-server
|
||||
container_name: reranking-tei-xeon-server
|
||||
ports:
|
||||
- "8000:8000"
|
||||
|
||||
@@ -45,13 +45,13 @@ python langchain/retriever_redis.py
|
||||
|
||||
```bash
|
||||
cd ../../
|
||||
docker build -t intel/gen-ai-comps:retriever-redis-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/retrievers/langchain/docker/Dockerfile .
|
||||
docker build -t opea/gen-ai-comps:retriever-redis-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/retrievers/langchain/docker/Dockerfile .
|
||||
```
|
||||
|
||||
## Run Docker with CLI
|
||||
|
||||
```bash
|
||||
docker run -d --name="retriever-redis-server" -p 7000:7000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e REDIS_URL=$REDIS_URL -e INDEX_NAME=$INDEX_NAME intel/gen-ai-comps:retriever-redis-server
|
||||
docker run -d --name="retriever-redis-server" -p 7000:7000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e REDIS_URL=$REDIS_URL -e INDEX_NAME=$INDEX_NAME opea/gen-ai-comps:retriever-redis-server
|
||||
```
|
||||
|
||||
## Run Docker with Docker Compose
|
||||
|
||||
@@ -16,7 +16,7 @@ version: "3.8"
|
||||
|
||||
services:
|
||||
retriever:
|
||||
image: intel/gen-ai-comps:retriever-redis-server
|
||||
image: opea/gen-ai-comps:retriever-redis-server
|
||||
container_name: retriever-redis-server
|
||||
ports:
|
||||
- "7000:7000"
|
||||
|
||||
@@ -26,13 +26,13 @@ The other way is to start the ASR microservice with Docker.
|
||||
|
||||
```bash
|
||||
cd ../../
|
||||
docker build -t intel/gen-ai-comps:tts --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/tts/Dockerfile .
|
||||
docker build -t opea/gen-ai-comps:tts --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/tts/Dockerfile .
|
||||
```
|
||||
|
||||
## Run Docker with CLI
|
||||
|
||||
```bash
|
||||
docker run -p 9999:9999 --network=host --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy intel/gen-ai-comps:tts
|
||||
docker run -p 9999:9999 --network=host --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy opea/gen-ai-comps:tts
|
||||
```
|
||||
|
||||
# Test
|
||||
|
||||
Reference in New Issue
Block a user