diff --git a/comps/asr/README.md b/comps/asr/README.md index f2fda8511..0636c2d5b 100644 --- a/comps/asr/README.md +++ b/comps/asr/README.md @@ -26,13 +26,13 @@ The other way is to start the ASR microservice with Docker. ```bash cd ../../ -docker build -t intel/gen-ai-comps:asr --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/asr/Dockerfile . +docker build -t opea/gen-ai-comps:asr --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/asr/Dockerfile . ``` ## Run Docker with CLI ```bash -docker run -p 9099:9099 --network=host --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy intel/gen-ai-comps:asr +docker run -p 9099:9099 --network=host --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy opea/gen-ai-comps:asr ``` # Test diff --git a/comps/embeddings/README.md b/comps/embeddings/README.md index a9ace434d..1db05d718 100644 --- a/comps/embeddings/README.md +++ b/comps/embeddings/README.md @@ -70,13 +70,13 @@ python embedding_tei_gaudi.py ```bash cd ../../ -docker build -t intel/gen-ai-comps:embedding-tei-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/docker/Dockerfile . +docker build -t opea/gen-ai-comps:embedding-tei-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/docker/Dockerfile . ``` ## Run Docker with CLI ```bash -docker run -d --name="embedding-tei-server" -p 6000:6000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e TEI_EMBEDDING_ENDPOINT=$TEI_EMBEDDING_ENDPOINT intel/gen-ai-comps:embedding-tei-server +docker run -d --name="embedding-tei-server" -p 6000:6000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e TEI_EMBEDDING_ENDPOINT=$TEI_EMBEDDING_ENDPOINT opea/gen-ai-comps:embedding-tei-server ``` ## Run Docker with Docker Compose diff --git a/comps/embeddings/langchain/docker/docker_compose_embedding.yaml b/comps/embeddings/langchain/docker/docker_compose_embedding.yaml index 8d84d3b6e..26d6c0287 100644 --- a/comps/embeddings/langchain/docker/docker_compose_embedding.yaml +++ b/comps/embeddings/langchain/docker/docker_compose_embedding.yaml @@ -16,7 +16,7 @@ version: "3.8" services: embedding: - image: intel/gen-ai-comps:embedding-tei-server + image: opea/gen-ai-comps:embedding-tei-server container_name: embedding-tei-server ports: - "6000:6000" diff --git a/comps/guardrails/README.md b/comps/guardrails/README.md index 262971d76..1bc2855d4 100644 --- a/comps/guardrails/README.md +++ b/comps/guardrails/README.md @@ -74,13 +74,13 @@ export LLM_MODEL_ID=${your_hf_llm_model} ```bash cd ../../ -docker build -t intel/gen-ai-comps:guardrails-tgi-gaudi-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/guardrails/langchain/docker/Dockerfile . +docker build -t opea/gen-ai-comps:guardrails-tgi-gaudi-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/guardrails/langchain/docker/Dockerfile . ``` ## Run Docker with CLI ```bash -docker run -d --name="guardrails-tgi-server" -p 9090:9090 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e SAFETY_GUARD_ENDPOINT=$SAFETY_GUARD_ENDPOINT -e HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN intel/gen-ai-comps:guardrails-tgi-gauid-server +docker run -d --name="guardrails-tgi-server" -p 9090:9090 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e SAFETY_GUARD_ENDPOINT=$SAFETY_GUARD_ENDPOINT -e HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN opea/gen-ai-comps:guardrails-tgi-gauid-server ``` ## Run Docker with Docker Compose diff --git a/comps/guardrails/langchain/docker/docker_compose_guardrails.yaml b/comps/guardrails/langchain/docker/docker_compose_guardrails.yaml index f64e308ea..20eaae85c 100644 --- a/comps/guardrails/langchain/docker/docker_compose_guardrails.yaml +++ b/comps/guardrails/langchain/docker/docker_compose_guardrails.yaml @@ -25,7 +25,7 @@ services: shm_size: 1g command: --model-id ${LLM_MODEL_ID} guardrails: - image: intel/gen-ai-comps:guardrails-tgi-gaudi-server + image: opea/gen-ai-comps:guardrails-tgi-gaudi-server container_name: guardrails-tgi-gaudi-server ports: - "9090:9090" diff --git a/comps/llms/README.md b/comps/llms/README.md index 9314c93ca..363cb9f9a 100644 --- a/comps/llms/README.md +++ b/comps/llms/README.md @@ -57,13 +57,13 @@ export LLM_MODEL_ID=${your_hf_llm_model} ```bash cd ../../ -docker build -t intel/gen-ai-comps:llm-tgi-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/langchain/docker/Dockerfile . +docker build -t opea/gen-ai-comps:llm-tgi-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/langchain/docker/Dockerfile . ``` ## Run Docker with CLI ```bash -docker run -d --name="llm-tgi-server" -p 9000:9000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e TGI_LLM_ENDPOINT=$TGI_LLM_ENDPOINT -e HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN intel/gen-ai-comps:llm-tgi-server +docker run -d --name="llm-tgi-server" -p 9000:9000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e TGI_LLM_ENDPOINT=$TGI_LLM_ENDPOINT -e HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN opea/gen-ai-comps:llm-tgi-server ``` ## Run Docker with Docker Compose diff --git a/comps/llms/langchain/docker/docker_compose_llm.yaml b/comps/llms/langchain/docker/docker_compose_llm.yaml index 6da9bfe83..117812e76 100644 --- a/comps/llms/langchain/docker/docker_compose_llm.yaml +++ b/comps/llms/langchain/docker/docker_compose_llm.yaml @@ -25,7 +25,7 @@ services: shm_size: 1g command: --model-id ${LLM_MODEL_ID} llm: - image: intel/gen-ai-comps:llm-tgi-server + image: opea/gen-ai-comps:llm-tgi-server container_name: llm-tgi-server ports: - "9000:9000" diff --git a/comps/reranks/README.md b/comps/reranks/README.md index 97ccaedfb..8caaea7b6 100644 --- a/comps/reranks/README.md +++ b/comps/reranks/README.md @@ -48,13 +48,13 @@ If you start an Reranking microservice with docker, the `docker_compose_rerankin ```bash cd ../../ -docker build -t intel/gen-ai-comps:reranking-tei-xeon-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/reranks/docker/Dockerfile . +docker build -t opea/gen-ai-comps:reranking-tei-xeon-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/reranks/docker/Dockerfile . ``` ## Run Docker with CLI ```bash -docker run -d --name="reranking-tei-server" -p 8000:8000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e TEI_RERANKING_ENDPOINT=$TEI_RERANKING_ENDPOINT -e HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN intel/gen-ai-comps:reranking-tei-xeon-server +docker run -d --name="reranking-tei-server" -p 8000:8000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e TEI_RERANKING_ENDPOINT=$TEI_RERANKING_ENDPOINT -e HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN opea/gen-ai-comps:reranking-tei-xeon-server ``` ## Run Docker with Docker Compose diff --git a/comps/reranks/docker/docker_compose_reranking.yaml b/comps/reranks/docker/docker_compose_reranking.yaml index 35cda505b..f741f55cc 100644 --- a/comps/reranks/docker/docker_compose_reranking.yaml +++ b/comps/reranks/docker/docker_compose_reranking.yaml @@ -25,7 +25,7 @@ services: shm_size: 1g command: --model-id ${RERANK_MODEL_ID} reranking: - image: intel/gen-ai-comps:reranking-tei-xeon-server + image: opea/gen-ai-comps:reranking-tei-xeon-server container_name: reranking-tei-xeon-server ports: - "8000:8000" diff --git a/comps/retrievers/README.md b/comps/retrievers/README.md index 8a55cd030..7313c2311 100644 --- a/comps/retrievers/README.md +++ b/comps/retrievers/README.md @@ -45,13 +45,13 @@ python langchain/retriever_redis.py ```bash cd ../../ -docker build -t intel/gen-ai-comps:retriever-redis-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/retrievers/langchain/docker/Dockerfile . +docker build -t opea/gen-ai-comps:retriever-redis-server --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/retrievers/langchain/docker/Dockerfile . ``` ## Run Docker with CLI ```bash -docker run -d --name="retriever-redis-server" -p 7000:7000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e REDIS_URL=$REDIS_URL -e INDEX_NAME=$INDEX_NAME intel/gen-ai-comps:retriever-redis-server +docker run -d --name="retriever-redis-server" -p 7000:7000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e REDIS_URL=$REDIS_URL -e INDEX_NAME=$INDEX_NAME opea/gen-ai-comps:retriever-redis-server ``` ## Run Docker with Docker Compose diff --git a/comps/retrievers/langchain/docker/docker_compose_retriever.yaml b/comps/retrievers/langchain/docker/docker_compose_retriever.yaml index 0897353b8..0a4e38fe5 100644 --- a/comps/retrievers/langchain/docker/docker_compose_retriever.yaml +++ b/comps/retrievers/langchain/docker/docker_compose_retriever.yaml @@ -16,7 +16,7 @@ version: "3.8" services: retriever: - image: intel/gen-ai-comps:retriever-redis-server + image: opea/gen-ai-comps:retriever-redis-server container_name: retriever-redis-server ports: - "7000:7000" diff --git a/comps/tts/README.md b/comps/tts/README.md index bc3c8fa4d..a4e122344 100644 --- a/comps/tts/README.md +++ b/comps/tts/README.md @@ -26,13 +26,13 @@ The other way is to start the ASR microservice with Docker. ```bash cd ../../ -docker build -t intel/gen-ai-comps:tts --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/tts/Dockerfile . +docker build -t opea/gen-ai-comps:tts --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/tts/Dockerfile . ``` ## Run Docker with CLI ```bash -docker run -p 9999:9999 --network=host --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy intel/gen-ai-comps:tts +docker run -p 9999:9999 --network=host --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy opea/gen-ai-comps:tts ``` # Test