update image build compose (#698)
Signed-off-by: chensuyue <suyue.chen@intel.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
@@ -3,7 +3,7 @@
|
||||
|
||||
# this file should be run in the root of the repo
|
||||
services:
|
||||
comps-agent-langchain:
|
||||
agent-langchain:
|
||||
build:
|
||||
dockerfile: comps/agent/langchain/Dockerfile
|
||||
image: ${REGISTRY:-opea}/comps-agent-langchain:${TAG:-latest}
|
||||
image: ${REGISTRY:-opea}/agent-langchain:${TAG:-latest}
|
||||
|
||||
@@ -19,10 +19,6 @@ services:
|
||||
build:
|
||||
dockerfile: comps/dataprep/pinecone/langchain/Dockerfile
|
||||
image: ${REGISTRY:-opea}/dataprep-pinecone:${TAG:-latest}
|
||||
dataprep-multimodal-redis:
|
||||
build:
|
||||
dockerfile: comps/dataprep/multimodal/redis/langchain/Dockerfile
|
||||
image: ${REGISTRY:-opea}/dataprep-multimodal-redis:${TAG:-latest}
|
||||
dataprep-vdms:
|
||||
build:
|
||||
dockerfile: comps/dataprep/vdms/langchain/Dockerfile
|
||||
|
||||
@@ -21,3 +21,7 @@ services:
|
||||
build:
|
||||
dockerfile: comps/dataprep/vdms/multimodal_langchain/Dockerfile
|
||||
image: ${REGISTRY:-opea}/dataprep-multimodal-vdms:${TAG:-latest}
|
||||
dataprep-multimodal-redis:
|
||||
build:
|
||||
dockerfile: comps/dataprep/multimodal/redis/langchain/Dockerfile
|
||||
image: ${REGISTRY:-opea}/dataprep-multimodal-redis:${TAG:-latest}
|
||||
|
||||
@@ -14,18 +14,10 @@ services:
|
||||
build:
|
||||
dockerfile: comps/embeddings/tei/llama_index/Dockerfile
|
||||
image: ${REGISTRY:-opea}/embedding-tei-llama-index:${TAG:-latest}
|
||||
bridgetower-embedder:
|
||||
build:
|
||||
dockerfile: comps/embeddings/multimodal/bridgetower/Dockerfile
|
||||
image: ${REGISTRY:-opea}/bridgetower-embedder:${TAG:-latest}
|
||||
bridgetower-embedder-gaudi:
|
||||
embedding-multimodal-bridgetower-gaudi:
|
||||
build:
|
||||
dockerfile: comps/embeddings/multimodal/bridgetower/Dockerfile.intel_hpu
|
||||
image: ${REGISTRY:-opea}/bridgetower-embedder-gaudi:${TAG:-latest}
|
||||
embedding-multimodal:
|
||||
build:
|
||||
dockerfile: comps/embeddings/multimodal/multimodal_langchain/Dockerfile
|
||||
image: ${REGISTRY:-opea}/embedding-multimodal:${TAG:-latest}
|
||||
image: ${REGISTRY:-opea}/embedding-multimodal-bridgetower-gaudi:${TAG:-latest}
|
||||
embedding-predictionguard:
|
||||
build:
|
||||
dockerfile: comps/embeddings/predictionguard/Dockerfile
|
||||
|
||||
@@ -12,3 +12,11 @@ services:
|
||||
build:
|
||||
dockerfile: comps/embeddings/multimodal_clip/Dockerfile
|
||||
image: ${REGISTRY:-opea}/embedding-multimodal-clip:${TAG:-latest}
|
||||
embedding-multimodal-bridgetower:
|
||||
build:
|
||||
dockerfile: comps/embeddings/multimodal/bridgetower/Dockerfile
|
||||
image: ${REGISTRY:-opea}/embedding-multimodal-bridgetower:${TAG:-latest}
|
||||
embedding-multimodal:
|
||||
build:
|
||||
dockerfile: comps/embeddings/multimodal/multimodal_langchain/Dockerfile
|
||||
image: ${REGISTRY:-opea}/embedding-multimodal:${TAG:-latest}
|
||||
|
||||
@@ -3,10 +3,6 @@
|
||||
|
||||
# this file should be run in the root of the repo
|
||||
services:
|
||||
finetuning:
|
||||
build:
|
||||
dockerfile: comps/finetuning/Dockerfile
|
||||
image: ${REGISTRY:-opea}/finetuning:${TAG:-latest}
|
||||
finetuning-gaudi:
|
||||
build:
|
||||
dockerfile: comps/finetuning/Dockerfile.intel_hpu
|
||||
|
||||
9
.github/workflows/docker/compose/finetuning-compose.yaml
vendored
Normal file
9
.github/workflows/docker/compose/finetuning-compose.yaml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# this file should be run in the root of the repo
|
||||
services:
|
||||
finetuning:
|
||||
build:
|
||||
dockerfile: comps/finetuning/Dockerfile
|
||||
image: ${REGISTRY:-opea}/finetuning:${TAG:-latest}
|
||||
@@ -9,13 +9,3 @@ services:
|
||||
build:
|
||||
dockerfile: comps/guardrails/llama_guard/langchain/Dockerfile
|
||||
image: ${REGISTRY:-opea}/guardrails-tgi:${TAG:-latest}
|
||||
|
||||
guardrails-bias-detection:
|
||||
build:
|
||||
dockerfile: comps/guardrails/bias_detection/Dockerfile
|
||||
image: ${REGISTRY:-opea}/guardrails-bias-detection:${TAG:-latest}
|
||||
|
||||
guardrails-toxicity-detection:
|
||||
build:
|
||||
dockerfile: comps/guardrails/toxicity_detection/Dockerfile
|
||||
image: ${REGISTRY:-opea}/guardrails-toxicity-detection:${TAG:-latest}
|
||||
|
||||
@@ -3,16 +3,6 @@
|
||||
|
||||
# this file should be run in the root of the repo
|
||||
services:
|
||||
lvm:
|
||||
build:
|
||||
dockerfile: comps/lvms/llava/Dockerfile
|
||||
image: ${REGISTRY:-opea}/lvm:${TAG:-latest}
|
||||
# Xeon CPU
|
||||
llava:
|
||||
build:
|
||||
dockerfile: comps/lvms/llava/dependency/Dockerfile
|
||||
image: ${REGISTRY:-opea}/llava:${TAG:-latest}
|
||||
# Gaudi2 HPU
|
||||
llava-hpu:
|
||||
build:
|
||||
dockerfile: comps/lvms/llava/dependency/Dockerfile.intel_hpu
|
||||
|
||||
@@ -15,3 +15,11 @@ services:
|
||||
build:
|
||||
dockerfile: comps/lvms/video-llama/dependency/Dockerfile
|
||||
image: ${REGISTRY:-opea}/video-llama-lvm-server:${TAG:-latest}
|
||||
lvm-llava:
|
||||
build:
|
||||
dockerfile: comps/lvms/llava/dependency/Dockerfile
|
||||
image: ${REGISTRY:-opea}/lvm-llava:${TAG:-latest}
|
||||
lvm-llava-svc:
|
||||
build:
|
||||
dockerfile: comps/lvms/llava/Dockerfile
|
||||
image: ${REGISTRY:-opea}/lvm-llava-svc:${TAG:-latest}
|
||||
|
||||
@@ -23,10 +23,6 @@ services:
|
||||
build:
|
||||
dockerfile: comps/retrievers/pathway/langchain/Dockerfile
|
||||
image: ${REGISTRY:-opea}/retriever-pathway:${TAG:-latest}
|
||||
multimodal-retriever-redis:
|
||||
build:
|
||||
dockerfile: comps/retrievers/multimodal/redis/langchain/Dockerfile
|
||||
image: ${REGISTRY:-opea}/multimodal-retriever-redis:${TAG:-latest}
|
||||
retriever-neo4j:
|
||||
build:
|
||||
dockerfile: comps/retrievers/neo4j/langchain/Dockerfile
|
||||
|
||||
@@ -15,3 +15,7 @@ services:
|
||||
build:
|
||||
dockerfile: comps/retrievers/vdms/langchain/Dockerfile
|
||||
image: ${REGISTRY:-opea}/retriever-vdms:${TAG:-latest}
|
||||
retriever-multimodal-redis:
|
||||
build:
|
||||
dockerfile: comps/retrievers/multimodal/redis/langchain/Dockerfile
|
||||
image: ${REGISTRY:-opea}/retriever-multimodal-redis:${TAG:-latest}
|
||||
|
||||
@@ -57,7 +57,7 @@ python agent.py
|
||||
|
||||
```bash
|
||||
cd GenAIComps/ # back to GenAIComps/ folder
|
||||
docker build -t opea/comps-agent-langchain:latest -f comps/agent/langchain/Dockerfile .
|
||||
docker build -t opea/agent-langchain:latest -f comps/agent/langchain/Dockerfile .
|
||||
```
|
||||
|
||||
#### 2.2.2 Start microservices
|
||||
@@ -75,7 +75,7 @@ docker run -d --runtime=habana --name "comps-tgi-gaudi-service" -p 8080:80 -v ./
|
||||
docker logs comps-tgi-gaudi-service
|
||||
|
||||
# Agent
|
||||
docker run -d --runtime=runc --name="comps-langchain-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 9090:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=tgi -e recursion_limit=5 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:latest
|
||||
docker run -d --runtime=runc --name="comps-langchain-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 9090:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=tgi -e recursion_limit=5 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:latest
|
||||
|
||||
# check status
|
||||
docker logs comps-langchain-agent-endpoint
|
||||
@@ -84,7 +84,7 @@ docker logs comps-langchain-agent-endpoint
|
||||
> debug mode
|
||||
>
|
||||
> ```bash
|
||||
> docker run --rm --runtime=runc --name="comps-langchain-agent-endpoint" -v ./comps/agent/langchain/:/home/user/comps/agent/langchain/ -p 9090:9090 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=tgi -e recursion_limit=5 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:latest
|
||||
> docker run --rm --runtime=runc --name="comps-langchain-agent-endpoint" -v ./comps/agent/langchain/:/home/user/comps/agent/langchain/ -p 9090:9090 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=tgi -e recursion_limit=5 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:latest
|
||||
> ```
|
||||
|
||||
## 🚀 3. Validate Microservice
|
||||
@@ -159,7 +159,7 @@ def opea_rag_query(query):
|
||||
|
||||
```bash
|
||||
# Agent
|
||||
docker run -d --runtime=runc --name="comps-langchain-agent-endpoint" -v my_tools:/home/user/comps/agent/langchain/tools -p 9090:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=tgi -e recursive_limit=5 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:latest
|
||||
docker run -d --runtime=runc --name="comps-langchain-agent-endpoint" -v my_tools:/home/user/comps/agent/langchain/tools -p 9090:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=tgi -e recursive_limit=5 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:latest
|
||||
```
|
||||
|
||||
- validate with my_tools
|
||||
|
||||
@@ -17,12 +17,12 @@ function build_docker_images() {
|
||||
echo "Building the docker images"
|
||||
cd $WORKPATH
|
||||
echo $WORKPATH
|
||||
docker build --no-cache -t opea/comps-agent-langchain:comps -f comps/agent/langchain/Dockerfile .
|
||||
docker build --no-cache -t opea/agent-langchain:comps -f comps/agent/langchain/Dockerfile .
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "opea/comps-agent-langchain built fail"
|
||||
echo "opea/agent-langchain built fail"
|
||||
exit 1
|
||||
else
|
||||
echo "opea/comps-agent-langchain built successful"
|
||||
echo "opea/agent-langchain built successful"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ function start_tgi_service() {
|
||||
|
||||
function start_react_langchain_agent_service() {
|
||||
echo "Starting react_langchain agent microservice"
|
||||
docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=react_langchain -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:comps
|
||||
docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=react_langchain -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:comps
|
||||
sleep 5s
|
||||
|
||||
docker logs test-comps-agent-endpoint
|
||||
@@ -60,7 +60,7 @@ function start_react_langchain_agent_service() {
|
||||
|
||||
function start_react_langgraph_agent_service() {
|
||||
echo "Starting react_langgraph agent microservice"
|
||||
docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=react_langgraph -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:comps
|
||||
docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=react_langgraph -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:comps
|
||||
sleep 5s
|
||||
docker logs test-comps-agent-endpoint
|
||||
echo "Service started successfully"
|
||||
@@ -68,7 +68,7 @@ function start_react_langgraph_agent_service() {
|
||||
|
||||
function start_react_langgraph_agent_service_openai() {
|
||||
echo "Starting react_langgraph agent microservice"
|
||||
docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e model=gpt-4o-mini-2024-07-18 -e strategy=react_langgraph -e llm_engine=openai -e OPENAI_API_KEY=${OPENAI_API_KEY} -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:comps
|
||||
docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e model=gpt-4o-mini-2024-07-18 -e strategy=react_langgraph -e llm_engine=openai -e OPENAI_API_KEY=${OPENAI_API_KEY} -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:comps
|
||||
sleep 5s
|
||||
docker logs test-comps-agent-endpoint
|
||||
echo "Service started successfully"
|
||||
@@ -77,7 +77,7 @@ function start_react_langgraph_agent_service_openai() {
|
||||
|
||||
function start_ragagent_agent_service() {
|
||||
echo "Starting rag agent microservice"
|
||||
docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=rag_agent -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:comps
|
||||
docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=rag_agent -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:comps
|
||||
sleep 5s
|
||||
docker logs test-comps-agent-endpoint
|
||||
echo "Service started successfully"
|
||||
|
||||
Reference in New Issue
Block a user