Check duplicated dockerfile (#1289)
Signed-off-by: ZePan110 <ze.pan@intel.com>
This commit is contained in:
@@ -10,7 +10,7 @@ git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
|
||||
### Build Docker image
|
||||
docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile .
|
||||
docker build -t opea/llm-textgen:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile .
|
||||
```
|
||||
|
||||
### Build the MegaService Docker Image
|
||||
|
||||
@@ -34,7 +34,7 @@ services:
|
||||
ipc: host
|
||||
command: --model-id ${CODEGEN_LLM_MODEL_ID} --max-input-length 1024 --max-total-tokens 2048
|
||||
codegen-llm-server:
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
image: ${REGISTRY:-opea}/llm-textgen:${TAG:-latest}
|
||||
container_name: codegen-llm-server
|
||||
depends_on:
|
||||
codegen-tgi-service:
|
||||
|
||||
@@ -19,7 +19,7 @@ Should the Docker image you seek not yet be available on Docker Hub, you can bui
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile .
|
||||
docker build -t opea/llm-textgen:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile .
|
||||
```
|
||||
|
||||
### 2. Build the MegaService Docker Image
|
||||
@@ -43,7 +43,7 @@ docker build -t opea/codegen-ui:latest --build-arg https_proxy=$https_proxy --bu
|
||||
|
||||
Then run the command `docker images`, you will have the following 3 Docker Images:
|
||||
|
||||
- `opea/llm-tgi:latest`
|
||||
- `opea/llm-textgen:latest`
|
||||
- `opea/codegen:latest`
|
||||
- `opea/codegen-ui:latest`
|
||||
|
||||
@@ -60,7 +60,7 @@ docker build --no-cache -t opea/codegen-react-ui:latest --build-arg https_proxy=
|
||||
|
||||
Then run the command `docker images`, you will have the following 3 Docker Images:
|
||||
|
||||
- `opea/llm-tgi:latest`
|
||||
- `opea/llm-textgen:latest`
|
||||
- `opea/codegen:latest`
|
||||
- `opea/codegen-ui:latest`
|
||||
- `opea/codegen-react-ui:latest` (optional)
|
||||
|
||||
@@ -23,8 +23,8 @@ services:
|
||||
retries: 100
|
||||
command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0
|
||||
llm:
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
container_name: llm-tgi-server
|
||||
image: ${REGISTRY:-opea}/llm-textgen:${TAG:-latest}
|
||||
container_name: llm-textgen-server
|
||||
depends_on:
|
||||
tgi-service:
|
||||
condition: service_healthy
|
||||
|
||||
@@ -11,7 +11,7 @@ First of all, you need to build the Docker images locally. This step can be igno
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
cd GenAIComps
|
||||
docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile .
|
||||
docker build -t opea/llm-textgen:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile .
|
||||
```
|
||||
|
||||
### 2. Build the MegaService Docker Image
|
||||
@@ -46,7 +46,7 @@ docker build --no-cache -t opea/codegen-react-ui:latest --build-arg https_proxy=
|
||||
|
||||
Then run the command `docker images`, you will have the following 3 Docker images:
|
||||
|
||||
- `opea/llm-tgi:latest`
|
||||
- `opea/llm-textgen:latest`
|
||||
- `opea/codegen:latest`
|
||||
- `opea/codegen-ui:latest`
|
||||
- `opea/codegen-react-ui:latest`
|
||||
|
||||
@@ -31,8 +31,8 @@ services:
|
||||
ipc: host
|
||||
command: --model-id ${LLM_MODEL_ID} --max-input-length 1024 --max-total-tokens 2048
|
||||
llm:
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
container_name: llm-tgi-gaudi-server
|
||||
image: ${REGISTRY:-opea}/llm-textgen:${TAG:-latest}
|
||||
container_name: llm-textgen-gaudi-server
|
||||
depends_on:
|
||||
tgi-service:
|
||||
condition: service_healthy
|
||||
|
||||
@@ -23,9 +23,9 @@ services:
|
||||
dockerfile: ./docker/Dockerfile.react
|
||||
extends: codegen
|
||||
image: ${REGISTRY:-opea}/codegen-react-ui:${TAG:-latest}
|
||||
llm-tgi:
|
||||
llm-textgen:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/src/text-generation/Dockerfile
|
||||
extends: codegen
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
image: ${REGISTRY:-opea}/llm-textgen:${TAG:-latest}
|
||||
|
||||
@@ -325,7 +325,7 @@ spec:
|
||||
runAsUser: 1000
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
image: "opea/llm-tgi:latest"
|
||||
image: "opea/llm-textgen:latest"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: llm-uservice
|
||||
|
||||
@@ -179,7 +179,7 @@ spec:
|
||||
- name: no_proxy
|
||||
value:
|
||||
securityContext: {}
|
||||
image: "opea/llm-tgi:latest"
|
||||
image: "opea/llm-textgen:latest"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: llm-uservice
|
||||
|
||||
@@ -326,7 +326,7 @@ spec:
|
||||
runAsUser: 1000
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
image: "opea/llm-tgi:latest"
|
||||
image: "opea/llm-textgen:latest"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: llm-uservice
|
||||
|
||||
@@ -19,7 +19,7 @@ function build_docker_images() {
|
||||
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="codegen codegen-ui llm-tgi"
|
||||
service_list="codegen codegen-ui llm-textgen"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.6
|
||||
@@ -94,7 +94,7 @@ function validate_microservices() {
|
||||
"${ip_address}:9000/v1/chat/completions" \
|
||||
"data: " \
|
||||
"llm" \
|
||||
"llm-tgi-gaudi-server" \
|
||||
"llm-textgen-gaudi-server" \
|
||||
'{"query":"def print_hello_world():"}'
|
||||
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ function build_docker_images() {
|
||||
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="codegen codegen-ui llm-tgi"
|
||||
service_list="codegen codegen-ui llm-textgen"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu
|
||||
|
||||
@@ -19,7 +19,7 @@ function build_docker_images() {
|
||||
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
|
||||
|
||||
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
|
||||
service_list="codegen codegen-ui llm-tgi"
|
||||
service_list="codegen codegen-ui llm-textgen"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu
|
||||
@@ -95,7 +95,7 @@ function validate_microservices() {
|
||||
"${ip_address}:9000/v1/chat/completions" \
|
||||
"data: " \
|
||||
"llm" \
|
||||
"llm-tgi-server" \
|
||||
"llm-textgen-server" \
|
||||
'{"query":"def print_hello_world():"}'
|
||||
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user