fix the docker image name for release image build (#1152)

Signed-off-by: chensuyue <suyue.chen@intel.com>
This commit is contained in:
chen, suyue
2024-11-18 23:48:01 +08:00
committed by GitHub
parent c3e6f43ece
commit 9ba034b22d
8 changed files with 18 additions and 22 deletions

View File

@@ -8,7 +8,7 @@ on:
branches: [ 'main' ]
paths:
- "**.py"
- "**Dockerfile"
- "**Dockerfile*"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-on-push

View File

@@ -2,7 +2,7 @@
# SPDX-License-Identifier: Apache-2.0
services:
texttosql-service:
texttosql:
build:
context: GenAIComps
dockerfile: comps/texttosql/langchain/Dockerfile
@@ -11,11 +11,10 @@ services:
https_proxy: ${https_proxy}
no_proxy: ${no_proxy}
image: ${REGISTRY:-opea}/texttosql:${TAG:-latest}
dbqna-xeon-react-ui:
texttosql-react-ui:
build:
context: GenAIExamples/DBQnA/ui/docker
dockerfile: Dockerfile.react
context: ../ui
dockerfile: ./docker/Dockerfile.react
args:
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}

View File

@@ -2,7 +2,7 @@
# SPDX-License-Identifier: Apache-2.0
services:
server:
edgecraftrag-server:
build:
context: ..
args:
@@ -10,7 +10,7 @@ services:
https_proxy: ${https_proxy}
dockerfile: ./Dockerfile.server
image: ${REGISTRY:-opea}/edgecraftrag-server:${TAG:-latest}
ui:
edgecraftrag-ui:
build:
context: ..
args:
@@ -18,7 +18,7 @@ services:
https_proxy: ${https_proxy}
dockerfile: ./ui/docker/Dockerfile.ui
image: ${REGISTRY:-opea}/edgecraftrag-ui:${TAG:-latest}
ecrag:
edgecraftrag:
build:
context: ..
args:

View File

@@ -28,8 +28,7 @@ HF_ENDPOINT=https://hf-mirror.com
function build_docker_images() {
cd $WORKPATH/docker_image_build
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="server ui ecrag"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker compose -f build.yaml build --no-cache > ${LOG_PATH}/docker_image_build.log
docker images && sleep 1s
}

View File

@@ -145,9 +145,9 @@ services:
- LOGFLAG=${LOGFLAG}
ipc: host
restart: always
chatqna-gaudi-ui-server:
graphrag-ui-server:
image: ${REGISTRY:-opea}/graphrag-ui:${TAG:-latest}
container_name: chatqna-gaudi-ui-server
container_name: graphrag-ui-server
depends_on:
- graphrag-gaudi-backend-server
ports:
@@ -163,14 +163,14 @@ services:
container_name: chatqna-gaudi-nginx-server
depends_on:
- graphrag-gaudi-backend-server
- chatqna-gaudi-ui-server
- graphrag-ui-server
ports:
- "${NGINX_PORT:-80}:80"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- FRONTEND_SERVICE_IP=chatqna-gaudi-ui-server
- FRONTEND_SERVICE_IP=graphrag-ui-server
- FRONTEND_SERVICE_PORT=5173
- BACKEND_SERVICE_NAME=graphrag
- BACKEND_SERVICE_IP=graphrag-gaudi-backend-server

View File

@@ -29,7 +29,7 @@ services:
context: GenAIComps
dockerfile: comps/dataprep/neo4j/llama_index/Dockerfile
image: ${REGISTRY:-opea}/dataprep-neo4j-llamaindex:${TAG:-latest}
chatqna-gaudi-nginx-server:
nginx:
build:
args:
http_proxy: ${http_proxy}
@@ -38,7 +38,7 @@ services:
context: GenAIComps
dockerfile: comps/nginx/Dockerfile
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
chatqna-gaudi-ui-server:
graphrag-ui:
build:
args:
http_proxy: ${http_proxy}

View File

@@ -19,12 +19,10 @@ function build_docker_images() {
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="graphrag dataprep-neo4j-llamaindex retriever-neo4j-llamaindex chatqna-gaudi-ui-server chatqna-gaudi-nginx-server"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker compose -f build.yaml build --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.6
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
docker pull neo4j:latest
docker images && sleep 1s
}

View File

@@ -53,7 +53,7 @@ function start_services() {
if grep -q Connected ${LOG_PATH}/tgi_service_start.log; then
break
fi
sleep 10s
sleep 5s
n=$((n+1))
done
}
@@ -94,7 +94,7 @@ function validate_microservices() {
"${ip_address}:8008/generate" \
"generated_text" \
"tgi-gaudi" \
"tgi-gaudi-service" \
"tgi-gaudi-server" \
'{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}'
# llm microservice