Add EdgeCraftRag as a GenAIExample (#1072)
Signed-off-by: ZePan110 <ze.pan@intel.com> Signed-off-by: chensuyue <suyue.chen@intel.com> Signed-off-by: Zhu, Yongbo <yongbo.zhu@intel.com> Signed-off-by: Wang, Xigui <xigui.wang@intel.com> Co-authored-by: ZePan110 <ze.pan@intel.com> Co-authored-by: chen, suyue <suyue.chen@intel.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: xiguiw <111278656+xiguiw@users.noreply.github.com> Co-authored-by: lvliang-intel <liang1.lv@intel.com>
This commit is contained in:
78
EdgeCraftRAG/docker_compose/intel/gpu/arc/compose.yaml
Normal file
78
EdgeCraftRAG/docker_compose/intel/gpu/arc/compose.yaml
Normal file
@@ -0,0 +1,78 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
services:
|
||||
server:
|
||||
image: ${REGISTRY:-opea}/edgecraftrag-server:${TAG:-latest}
|
||||
container_name: edgecraftrag-server
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
HF_ENDPOINT: ${HF_ENDPOINT}
|
||||
vLLM_ENDPOINT: ${vLLM_ENDPOINT}
|
||||
volumes:
|
||||
- ${MODEL_PATH:-${PWD}}:/home/user/models
|
||||
- ${DOC_PATH:-${PWD}}:/home/user/docs
|
||||
ports:
|
||||
- ${PIPELINE_SERVICE_PORT:-16010}:${PIPELINE_SERVICE_PORT:-16010}
|
||||
devices:
|
||||
- /dev/dri:/dev/dri
|
||||
group_add:
|
||||
- video
|
||||
ecrag:
|
||||
image: ${REGISTRY:-opea}/edgecraftrag:${TAG:-latest}
|
||||
container_name: edgecraftrag
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
MEGA_SERVICE_PORT: ${MEGA_SERVICE_PORT:-16011}
|
||||
MEGA_SERVICE_HOST_IP: ${MEGA_SERVICE_HOST_IP:-${HOST_IP}}
|
||||
PIPELINE_SERVICE_PORT: ${PIPELINE_SERVICE_PORT:-16010}
|
||||
PIPELINE_SERVICE_HOST_IP: ${PIPELINE_SERVICE_HOST_IP:-${HOST_IP}}
|
||||
ports:
|
||||
- ${MEGA_SERVICE_PORT:-16011}:${MEGA_SERVICE_PORT:-16011}
|
||||
depends_on:
|
||||
- server
|
||||
ui:
|
||||
image: ${REGISTRY:-opea}/edgecraftrag-ui:${TAG:-latest}
|
||||
container_name: edgecraftrag-ui
|
||||
environment:
|
||||
no_proxy: ${no_proxy}
|
||||
http_proxy: ${http_proxy}
|
||||
https_proxy: ${https_proxy}
|
||||
MEGA_SERVICE_PORT: ${MEGA_SERVICE_PORT:-16011}
|
||||
MEGA_SERVICE_HOST_IP: ${MEGA_SERVICE_HOST_IP:-${HOST_IP}}
|
||||
PIPELINE_SERVICE_PORT: ${PIPELINE_SERVICE_PORT:-16010}
|
||||
PIPELINE_SERVICE_HOST_IP: ${PIPELINE_SERVICE_HOST_IP:-${HOST_IP}}
|
||||
UI_SERVICE_PORT: ${UI_SERVICE_PORT:-8082}
|
||||
UI_SERVICE_HOST_IP: ${UI_SERVICE_HOST_IP:-0.0.0.0}
|
||||
ports:
|
||||
- ${UI_SERVICE_PORT:-8082}:${UI_SERVICE_PORT:-8082}
|
||||
restart: always
|
||||
depends_on:
|
||||
- server
|
||||
- ecrag
|
||||
# vllm-service:
|
||||
# image: vllm:openvino
|
||||
# container_name: vllm-openvino-server
|
||||
# ports:
|
||||
# - "8008:80"
|
||||
# environment:
|
||||
# no_proxy: ${no_proxy}
|
||||
# http_proxy: ${http_proxy}
|
||||
# https_proxy: ${https_proxy}
|
||||
# vLLM_ENDPOINT: ${vLLM_ENDPOINT}
|
||||
# LLM_MODEL: ${LLM_MODEL}
|
||||
# entrypoint: /bin/bash -c "\
|
||||
# cd / && \
|
||||
# export VLLM_CPU_KVCACHE_SPACE=50 && \
|
||||
# python3 -m vllm.entrypoints.openai.api_server \
|
||||
# --model '${LLM_MODEL}' \
|
||||
# --host 0.0.0.0 \
|
||||
# --port 80"
|
||||
|
||||
networks:
|
||||
default:
|
||||
driver: bridge
|
||||
Reference in New Issue
Block a user