From 3d00a33e098b118466404f45222d14c19da3a469 Mon Sep 17 00:00:00 2001 From: "chen, suyue" Date: Wed, 18 Sep 2024 11:11:59 +0800 Subject: [PATCH] update image build compose (#698) Signed-off-by: chensuyue Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../workflows/docker/compose/agent-compose-cd.yaml | 4 ++-- .../docker/compose/dataprep-compose-cd.yaml | 4 ---- .../workflows/docker/compose/dataprep-compose.yaml | 4 ++++ .../docker/compose/embeddings-compose-cd.yaml | 12 ++---------- .../docker/compose/embeddings-compose.yaml | 8 ++++++++ .../docker/compose/finetuning-compose-cd.yaml | 4 ---- .../docker/compose/finetuning-compose.yaml | 9 +++++++++ .../docker/compose/guardrails-compose.yaml | 10 ---------- .../workflows/docker/compose/lvms-compose-cd.yaml | 10 ---------- .github/workflows/docker/compose/lvms-compose.yaml | 8 ++++++++ .../docker/compose/retrievers-compose-cd.yaml | 4 ---- .../docker/compose/retrievers-compose.yaml | 4 ++++ comps/agent/langchain/README.md | 8 ++++---- tests/agent/test_agent_langchain_on_intel_hpu.sh | 14 +++++++------- 14 files changed, 48 insertions(+), 55 deletions(-) create mode 100644 .github/workflows/docker/compose/finetuning-compose.yaml diff --git a/.github/workflows/docker/compose/agent-compose-cd.yaml b/.github/workflows/docker/compose/agent-compose-cd.yaml index f8c3d7386..fcff2be27 100644 --- a/.github/workflows/docker/compose/agent-compose-cd.yaml +++ b/.github/workflows/docker/compose/agent-compose-cd.yaml @@ -3,7 +3,7 @@ # this file should be run in the root of the repo services: - comps-agent-langchain: + agent-langchain: build: dockerfile: comps/agent/langchain/Dockerfile - image: ${REGISTRY:-opea}/comps-agent-langchain:${TAG:-latest} + image: ${REGISTRY:-opea}/agent-langchain:${TAG:-latest} diff --git a/.github/workflows/docker/compose/dataprep-compose-cd.yaml b/.github/workflows/docker/compose/dataprep-compose-cd.yaml index 6622a2921..b7589a12c 100644 --- a/.github/workflows/docker/compose/dataprep-compose-cd.yaml +++ b/.github/workflows/docker/compose/dataprep-compose-cd.yaml @@ -19,10 +19,6 @@ services: build: dockerfile: comps/dataprep/pinecone/langchain/Dockerfile image: ${REGISTRY:-opea}/dataprep-pinecone:${TAG:-latest} - dataprep-multimodal-redis: - build: - dockerfile: comps/dataprep/multimodal/redis/langchain/Dockerfile - image: ${REGISTRY:-opea}/dataprep-multimodal-redis:${TAG:-latest} dataprep-vdms: build: dockerfile: comps/dataprep/vdms/langchain/Dockerfile diff --git a/.github/workflows/docker/compose/dataprep-compose.yaml b/.github/workflows/docker/compose/dataprep-compose.yaml index 3c0346103..078fdb998 100644 --- a/.github/workflows/docker/compose/dataprep-compose.yaml +++ b/.github/workflows/docker/compose/dataprep-compose.yaml @@ -21,3 +21,7 @@ services: build: dockerfile: comps/dataprep/vdms/multimodal_langchain/Dockerfile image: ${REGISTRY:-opea}/dataprep-multimodal-vdms:${TAG:-latest} + dataprep-multimodal-redis: + build: + dockerfile: comps/dataprep/multimodal/redis/langchain/Dockerfile + image: ${REGISTRY:-opea}/dataprep-multimodal-redis:${TAG:-latest} diff --git a/.github/workflows/docker/compose/embeddings-compose-cd.yaml b/.github/workflows/docker/compose/embeddings-compose-cd.yaml index d9d0403dd..53243cfc5 100644 --- a/.github/workflows/docker/compose/embeddings-compose-cd.yaml +++ b/.github/workflows/docker/compose/embeddings-compose-cd.yaml @@ -14,18 +14,10 @@ services: build: dockerfile: comps/embeddings/tei/llama_index/Dockerfile image: ${REGISTRY:-opea}/embedding-tei-llama-index:${TAG:-latest} - bridgetower-embedder: - build: - dockerfile: comps/embeddings/multimodal/bridgetower/Dockerfile - image: ${REGISTRY:-opea}/bridgetower-embedder:${TAG:-latest} - bridgetower-embedder-gaudi: + embedding-multimodal-bridgetower-gaudi: build: dockerfile: comps/embeddings/multimodal/bridgetower/Dockerfile.intel_hpu - image: ${REGISTRY:-opea}/bridgetower-embedder-gaudi:${TAG:-latest} - embedding-multimodal: - build: - dockerfile: comps/embeddings/multimodal/multimodal_langchain/Dockerfile - image: ${REGISTRY:-opea}/embedding-multimodal:${TAG:-latest} + image: ${REGISTRY:-opea}/embedding-multimodal-bridgetower-gaudi:${TAG:-latest} embedding-predictionguard: build: dockerfile: comps/embeddings/predictionguard/Dockerfile diff --git a/.github/workflows/docker/compose/embeddings-compose.yaml b/.github/workflows/docker/compose/embeddings-compose.yaml index 5f701b4c6..5f486cfca 100644 --- a/.github/workflows/docker/compose/embeddings-compose.yaml +++ b/.github/workflows/docker/compose/embeddings-compose.yaml @@ -12,3 +12,11 @@ services: build: dockerfile: comps/embeddings/multimodal_clip/Dockerfile image: ${REGISTRY:-opea}/embedding-multimodal-clip:${TAG:-latest} + embedding-multimodal-bridgetower: + build: + dockerfile: comps/embeddings/multimodal/bridgetower/Dockerfile + image: ${REGISTRY:-opea}/embedding-multimodal-bridgetower:${TAG:-latest} + embedding-multimodal: + build: + dockerfile: comps/embeddings/multimodal/multimodal_langchain/Dockerfile + image: ${REGISTRY:-opea}/embedding-multimodal:${TAG:-latest} diff --git a/.github/workflows/docker/compose/finetuning-compose-cd.yaml b/.github/workflows/docker/compose/finetuning-compose-cd.yaml index 94e20941c..ece822a44 100644 --- a/.github/workflows/docker/compose/finetuning-compose-cd.yaml +++ b/.github/workflows/docker/compose/finetuning-compose-cd.yaml @@ -3,10 +3,6 @@ # this file should be run in the root of the repo services: - finetuning: - build: - dockerfile: comps/finetuning/Dockerfile - image: ${REGISTRY:-opea}/finetuning:${TAG:-latest} finetuning-gaudi: build: dockerfile: comps/finetuning/Dockerfile.intel_hpu diff --git a/.github/workflows/docker/compose/finetuning-compose.yaml b/.github/workflows/docker/compose/finetuning-compose.yaml new file mode 100644 index 000000000..5a48dc905 --- /dev/null +++ b/.github/workflows/docker/compose/finetuning-compose.yaml @@ -0,0 +1,9 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# this file should be run in the root of the repo +services: + finetuning: + build: + dockerfile: comps/finetuning/Dockerfile + image: ${REGISTRY:-opea}/finetuning:${TAG:-latest} diff --git a/.github/workflows/docker/compose/guardrails-compose.yaml b/.github/workflows/docker/compose/guardrails-compose.yaml index 349676167..81e516209 100644 --- a/.github/workflows/docker/compose/guardrails-compose.yaml +++ b/.github/workflows/docker/compose/guardrails-compose.yaml @@ -9,13 +9,3 @@ services: build: dockerfile: comps/guardrails/llama_guard/langchain/Dockerfile image: ${REGISTRY:-opea}/guardrails-tgi:${TAG:-latest} - - guardrails-bias-detection: - build: - dockerfile: comps/guardrails/bias_detection/Dockerfile - image: ${REGISTRY:-opea}/guardrails-bias-detection:${TAG:-latest} - - guardrails-toxicity-detection: - build: - dockerfile: comps/guardrails/toxicity_detection/Dockerfile - image: ${REGISTRY:-opea}/guardrails-toxicity-detection:${TAG:-latest} diff --git a/.github/workflows/docker/compose/lvms-compose-cd.yaml b/.github/workflows/docker/compose/lvms-compose-cd.yaml index 781c49cc8..31944d581 100644 --- a/.github/workflows/docker/compose/lvms-compose-cd.yaml +++ b/.github/workflows/docker/compose/lvms-compose-cd.yaml @@ -3,16 +3,6 @@ # this file should be run in the root of the repo services: - lvm: - build: - dockerfile: comps/lvms/llava/Dockerfile - image: ${REGISTRY:-opea}/lvm:${TAG:-latest} - # Xeon CPU - llava: - build: - dockerfile: comps/lvms/llava/dependency/Dockerfile - image: ${REGISTRY:-opea}/llava:${TAG:-latest} - # Gaudi2 HPU llava-hpu: build: dockerfile: comps/lvms/llava/dependency/Dockerfile.intel_hpu diff --git a/.github/workflows/docker/compose/lvms-compose.yaml b/.github/workflows/docker/compose/lvms-compose.yaml index d49c3f368..f602a6155 100644 --- a/.github/workflows/docker/compose/lvms-compose.yaml +++ b/.github/workflows/docker/compose/lvms-compose.yaml @@ -15,3 +15,11 @@ services: build: dockerfile: comps/lvms/video-llama/dependency/Dockerfile image: ${REGISTRY:-opea}/video-llama-lvm-server:${TAG:-latest} + lvm-llava: + build: + dockerfile: comps/lvms/llava/dependency/Dockerfile + image: ${REGISTRY:-opea}/lvm-llava:${TAG:-latest} + lvm-llava-svc: + build: + dockerfile: comps/lvms/llava/Dockerfile + image: ${REGISTRY:-opea}/lvm-llava-svc:${TAG:-latest} diff --git a/.github/workflows/docker/compose/retrievers-compose-cd.yaml b/.github/workflows/docker/compose/retrievers-compose-cd.yaml index 67b44fd0f..6e5ee43c2 100644 --- a/.github/workflows/docker/compose/retrievers-compose-cd.yaml +++ b/.github/workflows/docker/compose/retrievers-compose-cd.yaml @@ -23,10 +23,6 @@ services: build: dockerfile: comps/retrievers/pathway/langchain/Dockerfile image: ${REGISTRY:-opea}/retriever-pathway:${TAG:-latest} - multimodal-retriever-redis: - build: - dockerfile: comps/retrievers/multimodal/redis/langchain/Dockerfile - image: ${REGISTRY:-opea}/multimodal-retriever-redis:${TAG:-latest} retriever-neo4j: build: dockerfile: comps/retrievers/neo4j/langchain/Dockerfile diff --git a/.github/workflows/docker/compose/retrievers-compose.yaml b/.github/workflows/docker/compose/retrievers-compose.yaml index 80680a61f..4e72d955f 100644 --- a/.github/workflows/docker/compose/retrievers-compose.yaml +++ b/.github/workflows/docker/compose/retrievers-compose.yaml @@ -15,3 +15,7 @@ services: build: dockerfile: comps/retrievers/vdms/langchain/Dockerfile image: ${REGISTRY:-opea}/retriever-vdms:${TAG:-latest} + retriever-multimodal-redis: + build: + dockerfile: comps/retrievers/multimodal/redis/langchain/Dockerfile + image: ${REGISTRY:-opea}/retriever-multimodal-redis:${TAG:-latest} diff --git a/comps/agent/langchain/README.md b/comps/agent/langchain/README.md index 9df97e994..106a1edca 100644 --- a/comps/agent/langchain/README.md +++ b/comps/agent/langchain/README.md @@ -57,7 +57,7 @@ python agent.py ```bash cd GenAIComps/ # back to GenAIComps/ folder -docker build -t opea/comps-agent-langchain:latest -f comps/agent/langchain/Dockerfile . +docker build -t opea/agent-langchain:latest -f comps/agent/langchain/Dockerfile . ``` #### 2.2.2 Start microservices @@ -75,7 +75,7 @@ docker run -d --runtime=habana --name "comps-tgi-gaudi-service" -p 8080:80 -v ./ docker logs comps-tgi-gaudi-service # Agent -docker run -d --runtime=runc --name="comps-langchain-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 9090:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=tgi -e recursion_limit=5 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:latest +docker run -d --runtime=runc --name="comps-langchain-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 9090:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=tgi -e recursion_limit=5 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:latest # check status docker logs comps-langchain-agent-endpoint @@ -84,7 +84,7 @@ docker logs comps-langchain-agent-endpoint > debug mode > > ```bash -> docker run --rm --runtime=runc --name="comps-langchain-agent-endpoint" -v ./comps/agent/langchain/:/home/user/comps/agent/langchain/ -p 9090:9090 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=tgi -e recursion_limit=5 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:latest +> docker run --rm --runtime=runc --name="comps-langchain-agent-endpoint" -v ./comps/agent/langchain/:/home/user/comps/agent/langchain/ -p 9090:9090 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=tgi -e recursion_limit=5 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:latest > ``` ## 🚀 3. Validate Microservice @@ -159,7 +159,7 @@ def opea_rag_query(query): ```bash # Agent -docker run -d --runtime=runc --name="comps-langchain-agent-endpoint" -v my_tools:/home/user/comps/agent/langchain/tools -p 9090:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=tgi -e recursive_limit=5 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:latest +docker run -d --runtime=runc --name="comps-langchain-agent-endpoint" -v my_tools:/home/user/comps/agent/langchain/tools -p 9090:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=tgi -e recursive_limit=5 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:latest ``` - validate with my_tools diff --git a/tests/agent/test_agent_langchain_on_intel_hpu.sh b/tests/agent/test_agent_langchain_on_intel_hpu.sh index c75da9a01..6e0b0327c 100644 --- a/tests/agent/test_agent_langchain_on_intel_hpu.sh +++ b/tests/agent/test_agent_langchain_on_intel_hpu.sh @@ -17,12 +17,12 @@ function build_docker_images() { echo "Building the docker images" cd $WORKPATH echo $WORKPATH - docker build --no-cache -t opea/comps-agent-langchain:comps -f comps/agent/langchain/Dockerfile . + docker build --no-cache -t opea/agent-langchain:comps -f comps/agent/langchain/Dockerfile . if [ $? -ne 0 ]; then - echo "opea/comps-agent-langchain built fail" + echo "opea/agent-langchain built fail" exit 1 else - echo "opea/comps-agent-langchain built successful" + echo "opea/agent-langchain built successful" fi } @@ -50,7 +50,7 @@ function start_tgi_service() { function start_react_langchain_agent_service() { echo "Starting react_langchain agent microservice" - docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=react_langchain -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:comps + docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=react_langchain -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:comps sleep 5s docker logs test-comps-agent-endpoint @@ -60,7 +60,7 @@ function start_react_langchain_agent_service() { function start_react_langgraph_agent_service() { echo "Starting react_langgraph agent microservice" - docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=react_langgraph -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:comps + docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=react_langgraph -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:comps sleep 5s docker logs test-comps-agent-endpoint echo "Service started successfully" @@ -68,7 +68,7 @@ function start_react_langgraph_agent_service() { function start_react_langgraph_agent_service_openai() { echo "Starting react_langgraph agent microservice" - docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e model=gpt-4o-mini-2024-07-18 -e strategy=react_langgraph -e llm_engine=openai -e OPENAI_API_KEY=${OPENAI_API_KEY} -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:comps + docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e model=gpt-4o-mini-2024-07-18 -e strategy=react_langgraph -e llm_engine=openai -e OPENAI_API_KEY=${OPENAI_API_KEY} -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:comps sleep 5s docker logs test-comps-agent-endpoint echo "Service started successfully" @@ -77,7 +77,7 @@ function start_react_langgraph_agent_service_openai() { function start_ragagent_agent_service() { echo "Starting rag agent microservice" - docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=rag_agent -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:comps + docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=rag_agent -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:comps sleep 5s docker logs test-comps-agent-endpoint echo "Service started successfully"