98 lines
3.3 KiB
YAML
98 lines
3.3 KiB
YAML
# Copyright (C) 2024 Intel Corporation
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
services:
|
|
agent-tgi-server:
|
|
image: ${AGENTQNA_TGI_IMAGE}
|
|
container_name: agent-tgi-server
|
|
ports:
|
|
- "${AGENTQNA_TGI_SERVICE_PORT-8085}:80"
|
|
volumes:
|
|
- ${HF_CACHE_DIR:-/var/opea/agent-service/}:/data
|
|
environment:
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
TGI_LLM_ENDPOINT: "http://${HOST_IP}:${AGENTQNA_TGI_SERVICE_PORT}"
|
|
HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
|
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
|
shm_size: 1g
|
|
devices:
|
|
- /dev/kfd:/dev/kfd
|
|
- /dev/dri/${AGENTQNA_CARD_ID}:/dev/dri/${AGENTQNA_CARD_ID}
|
|
- /dev/dri/${AGENTQNA_RENDER_ID}:/dev/dri/${AGENTQNA_RENDER_ID}
|
|
cap_add:
|
|
- SYS_PTRACE
|
|
group_add:
|
|
- video
|
|
security_opt:
|
|
- seccomp:unconfined
|
|
ipc: host
|
|
command: --model-id ${LLM_MODEL_ID} --max-input-length 4096 --max-total-tokens 8192
|
|
|
|
worker-rag-agent:
|
|
image: opea/agent:latest
|
|
container_name: rag-agent-endpoint
|
|
volumes:
|
|
# - ${WORKDIR}/GenAIExamples/AgentQnA/docker_image_build/GenAIComps/comps/agent/langchain/:/home/user/comps/agent/langchain/
|
|
- ${TOOLSET_PATH}:/home/user/tools/
|
|
ports:
|
|
- "9095:9095"
|
|
ipc: host
|
|
environment:
|
|
ip_address: ${ip_address}
|
|
strategy: rag_agent_llama
|
|
recursion_limit: ${recursion_limit_worker}
|
|
llm_engine: tgi
|
|
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
|
llm_endpoint_url: ${LLM_ENDPOINT_URL}
|
|
model: ${LLM_MODEL_ID}
|
|
temperature: ${temperature}
|
|
max_new_tokens: ${max_new_tokens}
|
|
stream: false
|
|
tools: /home/user/tools/worker_agent_tools.yaml
|
|
require_human_feedback: false
|
|
RETRIEVAL_TOOL_URL: ${RETRIEVAL_TOOL_URL}
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
LANGCHAIN_API_KEY: ${LANGCHAIN_API_KEY}
|
|
LANGCHAIN_TRACING_V2: ${LANGCHAIN_TRACING_V2}
|
|
LANGCHAIN_PROJECT: "opea-worker-agent-service"
|
|
port: 9095
|
|
|
|
supervisor-react-agent:
|
|
image: opea/agent:latest
|
|
container_name: react-agent-endpoint
|
|
depends_on:
|
|
- agent-tgi-server
|
|
- worker-rag-agent
|
|
volumes:
|
|
# - ${WORKDIR}/GenAIExamples/AgentQnA/docker_image_build/GenAIComps/comps/agent/langchain/:/home/user/comps/agent/langchain/
|
|
- ${TOOLSET_PATH}:/home/user/tools/
|
|
ports:
|
|
- "${AGENTQNA_FRONTEND_PORT}:9090"
|
|
ipc: host
|
|
environment:
|
|
ip_address: ${ip_address}
|
|
strategy: react_langgraph
|
|
recursion_limit: ${recursion_limit_supervisor}
|
|
llm_engine: tgi
|
|
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
|
llm_endpoint_url: ${LLM_ENDPOINT_URL}
|
|
model: ${LLM_MODEL_ID}
|
|
temperature: ${temperature}
|
|
max_new_tokens: ${max_new_tokens}
|
|
stream: false
|
|
tools: /home/user/tools/supervisor_agent_tools.yaml
|
|
require_human_feedback: false
|
|
no_proxy: ${no_proxy}
|
|
http_proxy: ${http_proxy}
|
|
https_proxy: ${https_proxy}
|
|
LANGCHAIN_API_KEY: ${LANGCHAIN_API_KEY}
|
|
LANGCHAIN_TRACING_V2: ${LANGCHAIN_TRACING_V2}
|
|
LANGCHAIN_PROJECT: "opea-supervisor-agent-service"
|
|
CRAG_SERVER: $CRAG_SERVER
|
|
WORKER_AGENT_URL: $WORKER_AGENT_URL
|
|
port: 9090
|