Compare commits

..

39 Commits

Author SHA1 Message Date
Zhenzhong1
ebd2ab0222 Update tuned_single_gaudi_with_rerank.yaml 2024-11-06 16:00:41 +08:00
Zhenzhong1
2f1f80bbae fixed the issue of cm 2024-10-29 03:03:21 -07:00
Zhenzhong1
5158b5e822 updateto vllm images 2024-10-29 02:44:27 -07:00
Zhenzhong1
1c3f55602a added vllm 2024-10-29 02:13:06 -07:00
Zhenzhong1
bb4c1dbc44 Update configmap.yaml 2024-10-28 19:36:32 +08:00
Zhenzhong1
16018085b0 added some envs 2024-10-25 09:22:36 +03:00
Zhenzhong1
93bbd5131f updated oob manifests 2024-10-24 05:11:23 +03:00
chensuyue
4f32f867ec update cpu core into 80
Signed-off-by: chensuyue <suyue.chen@intel.com>
2024-10-23 14:49:04 +08:00
Zhenzhong1
4f183c2a0d restore README 2024-10-23 14:33:10 +08:00
Zhenzhong1
1046aad26f removed benchmark template 2024-10-23 09:30:03 +03:00
pre-commit-ci[bot]
2876677214 [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
2024-10-22 09:11:46 +00:00
Zhenzhong1
a9536321a0 added the tuned tgi params 2024-10-22 12:11:22 +03:00
Zhenzhong1
24de14e58a fixed the audioqna benchmark path 2024-10-22 11:29:30 +03:00
pre-commit-ci[bot]
065222f29b [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
2024-10-22 08:19:15 +00:00
Zhenzhong1
3f596d9747 update README 2024-10-22 11:18:49 +03:00
pre-commit-ci[bot]
9da0c09b18 [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
2024-10-22 08:10:20 +00:00
Zhenzhong1
b9c646a2b8 update README 2024-10-22 11:09:50 +03:00
Zhenzhong1
27e9832af4 fixed visualqna issues 2024-10-22 10:40:05 +03:00
Zhenzhong1
f3cbcadfa2 fixed visualqna image issues & tgi params issues 2024-10-22 10:26:44 +03:00
Zhenzhong1
e21ee76f24 updated tgiparams 2024-10-22 09:15:11 +03:00
pre-commit-ci[bot]
8effe7a4eb [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
2024-10-22 05:38:44 +00:00
Zhenzhong1
0d3876d6fa removed multiple yamls 2024-10-22 08:38:15 +03:00
Zhenzhong1
bb46f5b355 added visual qna & update deployment template 2024-10-22 05:45:00 +03:00
Zhenzhong1
bcaffd7db4 added more cases 2024-10-21 12:21:02 +03:00
Zhenzhong1
124143ea40 removed values.yaml 2024-10-21 12:10:59 +03:00
Zhenzhong1
6dc4bb5c79 refactoered image 2024-10-21 11:54:18 +03:00
pre-commit-ci[bot]
d290bd811f [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
2024-10-21 08:14:22 +00:00
Zhenzhong Xu
d68ce801e4 refactored AudioQNA 2024-10-21 11:12:29 +03:00
Zhenzhong Xu
048b4e1df9 refactored AudioQNA 2024-10-21 11:06:37 +03:00
Zhenzhong Xu
fdb8a33a6e refactored GaqGen 2024-10-21 10:48:16 +03:00
Zhenzhong Xu
4e1237d410 refactored GaqGen 2024-10-21 10:46:12 +03:00
Zhenzhong Xu
58ff7d9518 moved HUGGINGFACEHUB_API_TOKEN 2024-10-21 10:41:20 +03:00
Zhenzhong Xu
9ee1a7410b rename 2024-10-21 10:31:27 +03:00
Zhenzhong Xu
24166615d7 removed spec 2024-10-21 09:01:00 +03:00
Zhenzhong Xu
a0b2263fd3 updated customize deployment template 2024-10-21 08:49:38 +03:00
Zhenzhong Xu
5c2f3f0301 move image & replicas path 2024-10-21 07:04:31 +03:00
Zhenzhong Xu
a70775d3d6 updated chatqna helmcharts image name 2024-10-21 06:54:27 +03:00
Zhenzhong Xu
3dd5475773 updated chatqna helmcharts 2024-10-21 06:40:46 +03:00
Zhenzhong1
d6b04b3405 benchmark helmcharts (#995)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
2024-10-21 11:13:24 +08:00
450 changed files with 5801 additions and 15452 deletions

View File

@@ -1,2 +0,0 @@
ModelIn
modelin

View File

@@ -40,11 +40,6 @@ on:
default: "main"
required: false
type: string
inject_commit:
default: false
required: false
type: string
jobs:
####################################################################################################
# Image Build
@@ -77,10 +72,6 @@ jobs:
git clone https://github.com/vllm-project/vllm.git
cd vllm && git rev-parse HEAD && cd ../
fi
if [[ $(grep -c "vllm-hpu:" ${docker_compose_path}) != 0 ]]; then
git clone https://github.com/HabanaAI/vllm-fork.git
cd vllm-fork && git rev-parse HEAD && cd ../
fi
git clone https://github.com/opea-project/GenAIComps.git
cd GenAIComps && git checkout ${{ inputs.opea_branch }} && git rev-parse HEAD && cd ../
@@ -92,7 +83,6 @@ jobs:
docker_compose_path: ${{ github.workspace }}/${{ inputs.example }}/docker_image_build/build.yaml
service_list: ${{ inputs.services }}
registry: ${OPEA_IMAGE_REPO}opea
inject_commit: ${{ inputs.inject_commit }}
tag: ${{ inputs.tag }}
####################################################################################################

View File

@@ -90,16 +90,10 @@ jobs:
echo "Validate ${{ inputs.example }} successful!"
else
echo "Validate ${{ inputs.example }} failure!!!"
echo "Check the logs in 'Dump logs when e2e test failed' step!!!"
exit 1
.github/workflows/scripts/k8s-utils.sh dump_all_pod_logs $NAMESPACE
fi
fi
- name: Dump logs when e2e test failed
if: failure()
run: |
.github/workflows/scripts/k8s-utils.sh dump_all_pod_logs $NAMESPACE
- name: Kubectl uninstall
if: always()
run: |

View File

@@ -119,8 +119,6 @@ jobs:
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
PINECONE_KEY: ${{ secrets.PINECONE_KEY }}
PINECONE_KEY_LANGCHAIN_TEST: ${{ secrets.PINECONE_KEY_LANGCHAIN_TEST }}
SDK_BASE_URL: ${{ secrets.SDK_BASE_URL }}
SERVING_TOKEN: ${{ secrets.SERVING_TOKEN }}
IMAGE_REPO: ${{ inputs.registry }}
IMAGE_TAG: ${{ inputs.tag }}
example: ${{ inputs.example }}
@@ -141,11 +139,7 @@ jobs:
flag=${flag#test_}
yaml_file=$(find . -type f -wholename "*${{ inputs.hardware }}/${flag}.yaml")
echo $yaml_file
container_list=$(cat $yaml_file | grep container_name | cut -d':' -f2)
for container_name in $container_list; do
cid=$(docker ps -aq --filter "name=$container_name")
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid && sleep 1s; fi
done
docker compose -f $yaml_file stop && docker compose -f $yaml_file rm -f || true
docker system prune -f
docker rmi $(docker images --filter reference="*:5000/*/*" -q) || true

View File

@@ -1,35 +0,0 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
name: Check Online Document Building
permissions: {}
on:
pull_request:
branches: [main]
paths:
- "**.md"
- "**.rst"
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
path: GenAIExamples
- name: Checkout docs
uses: actions/checkout@v4
with:
repository: opea-project/docs
path: docs
- name: Build Online Document
shell: bash
run: |
echo "build online doc"
cd docs
bash scripts/build.sh

View File

@@ -50,11 +50,6 @@ on:
description: 'OPEA branch for image build'
required: false
type: string
inject_commit:
default: true
description: "inject commit to docker images true or false"
required: false
type: string
permissions: read-all
jobs:
@@ -106,5 +101,4 @@ jobs:
test_k8s: ${{ fromJSON(inputs.test_k8s) }}
test_gmc: ${{ fromJSON(inputs.test_gmc) }}
opea_branch: ${{ inputs.opea_branch }}
inject_commit: ${{ inputs.inject_commit }}
secrets: inherit

View File

@@ -30,12 +30,6 @@ on:
description: 'OPEA branch for image build'
required: false
type: string
inject_commit:
default: true
description: "inject commit to docker images true or false"
required: false
type: string
jobs:
get-test-matrix:
runs-on: ubuntu-latest
@@ -62,5 +56,4 @@ jobs:
services: ${{ inputs.services }}
tag: ${{ inputs.tag }}
opea_branch: ${{ inputs.opea_branch }}
inject_commit: ${{ inputs.inject_commit }}
secrets: inherit

View File

@@ -1,70 +0,0 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
name: Nightly build/publish latest docker images
on:
schedule:
- cron: "30 13 * * *" # UTC time
workflow_dispatch:
env:
EXAMPLES: "AgentQnA,AudioQnA,ChatQnA,CodeGen,CodeTrans,DocIndexRetriever,DocSum,FaqGen,InstructionTuning,MultimodalQnA,ProductivitySuite,RerankFinetuning,SearchQnA,Translation,VideoQnA,VisualQnA"
TAG: "latest"
PUBLISH_TAGS: "latest"
jobs:
get-build-matrix:
runs-on: ubuntu-latest
outputs:
examples_json: ${{ steps.get-matrix.outputs.examples_json }}
EXAMPLES: ${{ steps.get-matrix.outputs.EXAMPLES }}
TAG: ${{ steps.get-matrix.outputs.TAG }}
PUBLISH_TAGS: ${{ steps.get-matrix.outputs.PUBLISH_TAGS }}
steps:
- name: Create Matrix
id: get-matrix
run: |
examples=($(echo ${EXAMPLES} | tr ',' ' '))
examples_json=$(printf '%s\n' "${examples[@]}" | sort -u | jq -R '.' | jq -sc '.')
echo "examples_json=$examples_json" >> $GITHUB_OUTPUT
echo "EXAMPLES=$EXAMPLES" >> $GITHUB_OUTPUT
echo "TAG=$TAG" >> $GITHUB_OUTPUT
echo "PUBLISH_TAGS=$PUBLISH_TAGS" >> $GITHUB_OUTPUT
build:
needs: get-build-matrix
strategy:
matrix:
example: ${{ fromJSON(needs.get-build-matrix.outputs.examples_json) }}
fail-fast: false
uses: ./.github/workflows/_example-workflow.yml
with:
node: gaudi
example: ${{ matrix.example }}
secrets: inherit
get-image-list:
needs: get-build-matrix
uses: ./.github/workflows/_get-image-list.yml
with:
examples: ${{ needs.get-build-matrix.outputs.EXAMPLES }}
publish:
needs: [get-build-matrix, get-image-list, build]
strategy:
matrix:
image: ${{ fromJSON(needs.get-image-list.outputs.matrix) }}
runs-on: "docker-build-gaudi"
steps:
- uses: docker/login-action@v3.2.0
with:
username: ${{ secrets.DOCKERHUB_USER }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Image Publish
uses: opea-project/validation/actions/image-publish@main
with:
local_image_ref: ${OPEA_IMAGE_REPO}opea/${{ matrix.image }}:${{ needs.get-build-matrix.outputs.TAG }}
image_name: opea/${{ matrix.image }}
publish_tags: ${{ needs.get-build-matrix.outputs.PUBLISH_TAGS }}

View File

@@ -12,7 +12,7 @@ on:
- "**/tests/test_gmc**"
- "!**.md"
- "!**.txt"
- "!**/kubernetes/**/manifest/**"
- "!**/kubernetes/**/manifests/**"
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}

View File

@@ -10,7 +10,7 @@ on:
paths:
- "**/Dockerfile**"
- "**.py"
- "**/kubernetes/**/manifest/**"
- "**/kubernetes/**/manifests/**"
- "**/tests/test_manifest**"
- "!**.md"
- "!**.txt"

View File

@@ -61,14 +61,14 @@ jobs:
changed_files="$(git diff --name-status --diff-filter=ARM ${{ github.event.pull_request.base.sha }} ${merged_commit} | awk '/\.md$/ {print $NF}')"
if [ -n "$changed_files" ]; then
for changed_file in $changed_files; do
# echo $changed_file
echo $changed_file
url_lines=$(grep -H -Eo '\]\(http[s]?://[^)]+\)' "$changed_file" | grep -Ev 'GenAIExamples/blob/main') || true
if [ -n "$url_lines" ]; then
for url_line in $url_lines; do
# echo $url_line
echo $url_line
url=$(echo "$url_line"|cut -d '(' -f2 | cut -d ')' -f1|sed 's/\.git$//')
path=$(echo "$url_line"|cut -d':' -f1 | cut -d'/' -f2-)
response=$(curl -L -s -o /dev/null -w "%{http_code}" "$url")|| true
response=$(curl -L -s -o /dev/null -w "%{http_code}" "$url")
if [ "$response" -ne 200 ]; then
echo "**********Validation failed, try again**********"
response_retry=$(curl -s -o /dev/null -w "%{http_code}" "$url")

View File

@@ -9,15 +9,12 @@ set -e
changed_files=$changed_files
test_mode=$test_mode
run_matrix="{\"include\":["
hardware_list="xeon gaudi" # current support hardware list
examples=$(printf '%s\n' "${changed_files[@]}" | grep '/' | cut -d'/' -f1 | sort -u)
for example in ${examples}; do
cd $WORKSPACE/$example
if [[ ! $(find . -type f | grep ${test_mode}) ]]; then continue; fi
cd tests
ls -l
hardware_list=$(find . -type f -name "test_compose*_on_*.sh" | cut -d/ -f2 | cut -d. -f1 | awk -F'_on_' '{print $2}'| sort -u)
echo "Test supported hardware list = ${hardware_list}"
run_hardware=""
if [[ $(printf '%s\n' "${changed_files[@]}" | grep ${example} | cut -d'/' -f2 | grep -E '*.py|Dockerfile*|ui|docker_image_build' ) ]]; then

View File

@@ -81,13 +81,17 @@ flowchart LR
3. Hierarchical agent can further improve performance.
Expert worker agents, such as retrieval agent, knowledge graph agent, SQL agent, etc., can provide high-quality output for different aspects of a complex query, and the supervisor agent can aggregate the information together to provide a comprehensive answer.
## Deployment with docker
### Roadmap
1. Build agent docker image
- v0.9: Worker agent uses open-source websearch tool (duckduckgo), agents use OpenAI GPT-4o-mini as llm backend.
- v1.0: Worker agent uses OPEA retrieval megaservice as tool.
- v1.0 or later: agents use open-source llm backend.
- v1.1 or later: add safeguards
Note: this is optional. The docker images will be automatically pulled when running the docker compose commands. This step is only needed if pulling images failed.
## Getting started
First, clone the opea GenAIComps repo.
1. Build agent docker image </br>
First, clone the opea GenAIComps repo
```
export WORKDIR=<your-work-directory>
@@ -102,63 +106,35 @@ flowchart LR
docker build -t opea/agent-langchain:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/agent/langchain/Dockerfile .
```
2. Set up environment for this example </br>
First, clone this repo.
```
cd $WORKDIR
git clone https://github.com/opea-project/GenAIExamples.git
```
Second, set up env vars.
```
export TOOLSET_PATH=$WORKDIR/GenAIExamples/AgentQnA/tools/
# for using open-source llms
export HUGGINGFACEHUB_API_TOKEN=<your-HF-token>
export HF_CACHE_DIR=<directory-where-llms-are-downloaded> #so that no need to redownload every time
# optional: OPANAI_API_KEY if you want to use OpenAI models
export OPENAI_API_KEY=<your-openai-key>
```
3. Deploy the retrieval tool (i.e., DocIndexRetriever mega-service)
First, launch the mega-service.
```
cd $WORKDIR/GenAIExamples/AgentQnA/retrieval_tool
bash launch_retrieval_tool.sh
```
Then, ingest data into the vector database. Here we provide an example. You can ingest your own data.
```
bash run_ingest_data.sh
```
4. Launch other tools. </br>
2. Launch tool services </br>
In this example, we will use some of the mock APIs provided in the Meta CRAG KDD Challenge to demonstrate the benefits of gaining additional context from mock knowledge graphs.
```
docker run -d -p=8080:8000 docker.io/aicrowd/kdd-cup-24-crag-mock-api:v0
```
5. Launch agent services</br>
We provide two options for `llm_engine` of the agents: 1. open-source LLMs, 2. OpenAI models via API calls.
To use open-source LLMs on Gaudi2, run commands below.
3. Set up environment for this example </br>
First, clone this repo
```
cd $WORKDIR/GenAIExamples/AgentQnA/docker_compose/intel/hpu/gaudi
bash launch_tgi_gaudi.sh
bash launch_agent_service_tgi_gaudi.sh
cd $WORKDIR
git clone https://github.com/opea-project/GenAIExamples.git
```
To use OpenAI models, run commands below.
Second, set up env vars
```
cd $WORKDIR/GenAIExamples/AgentQnA/docker_compose/intel/cpu/xeon
export TOOLSET_PATH=$WORKDIR/GenAIExamples/AgentQnA/tools/
# optional: OPANAI_API_KEY
export OPENAI_API_KEY=<your-openai-key>
```
4. Launch agent services</br>
The configurations of the supervisor agent and the worker agent are defined in the docker-compose yaml file. We currently use openAI GPT-4o-mini as LLM, and we plan to add support for llama3.1-70B-instruct (served by TGI-Gaudi) in a subsequent release.
To use openai llm, run command below.
```
cd docker_compose/intel/cpu/xeon
bash launch_agent_service_openai.sh
```
@@ -167,12 +143,10 @@ flowchart LR
First look at logs of the agent docker containers:
```
# worker agent
docker logs rag-agent-endpoint
docker logs docgrader-agent-endpoint
```
```
# supervisor agent
docker logs react-agent-endpoint
```
@@ -196,4 +170,4 @@ curl http://${ip_address}:9090/v1/chat/completions -X POST -H "Content-Type: app
## How to register your own tools with agent
You can take a look at the tools yaml and python files in this example. For more details, please refer to the "Provide your own tools" section in the instructions [here](https://github.com/opea-project/GenAIComps/tree/main/comps/agent/langchain/README.md).
You can take a look at the tools yaml and python files in this example. For more details, please refer to the "Provide your own tools" section in the instructions [here](https://github.com/opea-project/GenAIComps/tree/main/comps/agent/langchain/README.md#5-customize-agent-strategy).

View File

@@ -1,3 +0,0 @@
# Deployment on Xeon
We deploy the retrieval tool on Xeon. For LLMs, we support OpenAI models via API calls. For instructions on using open-source LLMs, please refer to the deployment guide [here](../../../../README.md).

View File

@@ -2,10 +2,11 @@
# SPDX-License-Identifier: Apache-2.0
services:
worker-rag-agent:
worker-docgrader-agent:
image: opea/agent-langchain:latest
container_name: rag-agent-endpoint
container_name: docgrader-agent-endpoint
volumes:
- ${WORKDIR}/GenAIComps/comps/agent/langchain/:/home/user/comps/agent/langchain/
- ${TOOLSET_PATH}:/home/user/tools/
ports:
- "9095:9095"
@@ -35,9 +36,8 @@ services:
supervisor-react-agent:
image: opea/agent-langchain:latest
container_name: react-agent-endpoint
depends_on:
- worker-rag-agent
volumes:
- ${WORKDIR}/GenAIComps/comps/agent/langchain/:/home/user/comps/agent/langchain/
- ${TOOLSET_PATH}:/home/user/tools/
ports:
- "9090:9090"

View File

@@ -7,7 +7,7 @@ export recursion_limit_worker=12
export recursion_limit_supervisor=10
export model="gpt-4o-mini-2024-07-18"
export temperature=0
export max_new_tokens=4096
export max_new_tokens=512
export OPENAI_API_KEY=${OPENAI_API_KEY}
export WORKER_AGENT_URL="http://${ip_address}:9095/v1/chat/completions"
export RETRIEVAL_TOOL_URL="http://${ip_address}:8889/v1/retrievaltool"

View File

@@ -2,9 +2,37 @@
# SPDX-License-Identifier: Apache-2.0
services:
worker-rag-agent:
tgi-server:
image: ghcr.io/huggingface/tgi-gaudi:2.0.5
container_name: tgi-server
ports:
- "8085:80"
volumes:
- ${HF_CACHE_DIR}:/data
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
HF_HUB_DISABLE_PROGRESS_BARS: 1
HF_HUB_ENABLE_HF_TRANSFER: 0
HABANA_VISIBLE_DEVICES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none
PT_HPU_ENABLE_LAZY_COLLECTIVES: true
ENABLE_HPU_GRAPH: true
LIMIT_HPU_GRAPH: true
USE_FLASH_ATTENTION: true
FLASH_ATTENTION_RECOMPUTE: true
runtime: habana
cap_add:
- SYS_NICE
ipc: host
command: --model-id ${LLM_MODEL_ID} --max-input-length 4096 --max-total-tokens 8192 --sharded true --num-shard ${NUM_SHARDS}
worker-docgrader-agent:
image: opea/agent-langchain:latest
container_name: rag-agent-endpoint
container_name: docgrader-agent-endpoint
depends_on:
- tgi-server
volumes:
# - ${WORKDIR}/GenAIExamples/AgentQnA/docker_image_build/GenAIComps/comps/agent/langchain/:/home/user/comps/agent/langchain/
- ${TOOLSET_PATH}:/home/user/tools/
@@ -13,7 +41,7 @@ services:
ipc: host
environment:
ip_address: ${ip_address}
strategy: rag_agent_llama
strategy: rag_agent
recursion_limit: ${recursion_limit_worker}
llm_engine: tgi
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
@@ -38,7 +66,8 @@ services:
image: opea/agent-langchain:latest
container_name: react-agent-endpoint
depends_on:
- worker-rag-agent
- tgi-server
- worker-docgrader-agent
volumes:
# - ${WORKDIR}/GenAIExamples/AgentQnA/docker_image_build/GenAIComps/comps/agent/langchain/:/home/user/comps/agent/langchain/
- ${TOOLSET_PATH}:/home/user/tools/
@@ -47,7 +76,7 @@ services:
ipc: host
environment:
ip_address: ${ip_address}
strategy: react_llama
strategy: react_langgraph
recursion_limit: ${recursion_limit_supervisor}
llm_engine: tgi
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}

View File

@@ -15,7 +15,7 @@ export LLM_MODEL_ID="meta-llama/Meta-Llama-3.1-70B-Instruct"
export NUM_SHARDS=4
export LLM_ENDPOINT_URL="http://${ip_address}:8085"
export temperature=0.01
export max_new_tokens=4096
export max_new_tokens=512
# agent related environment variables
export TOOLSET_PATH=$WORKDIR/GenAIExamples/AgentQnA/tools/
@@ -27,3 +27,17 @@ export RETRIEVAL_TOOL_URL="http://${ip_address}:8889/v1/retrievaltool"
export CRAG_SERVER=http://${ip_address}:8080
docker compose -f compose.yaml up -d
sleep 5s
echo "Waiting tgi gaudi ready"
n=0
until [[ "$n" -ge 100 ]] || [[ $ready == true ]]; do
docker logs tgi-server &> tgi-gaudi-service.log
n=$((n+1))
if grep -q Connected tgi-gaudi-service.log; then
break
fi
sleep 5s
done
sleep 5s
echo "Service started successfully"

View File

@@ -1,25 +0,0 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# LLM related environment variables
export HF_CACHE_DIR=${HF_CACHE_DIR}
ls $HF_CACHE_DIR
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export LLM_MODEL_ID="meta-llama/Meta-Llama-3.1-70B-Instruct"
export NUM_SHARDS=4
docker compose -f tgi_gaudi.yaml up -d
sleep 5s
echo "Waiting tgi gaudi ready"
n=0
until [[ "$n" -ge 100 ]] || [[ $ready == true ]]; do
docker logs tgi-server &> tgi-gaudi-service.log
n=$((n+1))
if grep -q Connected tgi-gaudi-service.log; then
break
fi
sleep 5s
done
sleep 5s
echo "Service started successfully"

View File

@@ -1,30 +0,0 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
services:
tgi-server:
image: ghcr.io/huggingface/tgi-gaudi:2.0.5
container_name: tgi-server
ports:
- "8085:80"
volumes:
- ${HF_CACHE_DIR}:/data
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
HF_HUB_DISABLE_PROGRESS_BARS: 1
HF_HUB_ENABLE_HF_TRANSFER: 0
HABANA_VISIBLE_DEVICES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none
PT_HPU_ENABLE_LAZY_COLLECTIVES: true
ENABLE_HPU_GRAPH: true
LIMIT_HPU_GRAPH: true
USE_FLASH_ATTENTION: true
FLASH_ATTENTION_RECOMPUTE: true
runtime: habana
cap_add:
- SYS_NICE
ipc: host
command: --model-id ${LLM_MODEL_ID} --max-input-length 4096 --max-total-tokens 8192 --sharded true --num-shard ${NUM_SHARDS}

View File

@@ -17,12 +17,6 @@ if [ ! -d "$HF_CACHE_DIR" ]; then
fi
ls $HF_CACHE_DIR
function start_tgi(){
echo "Starting tgi-gaudi server"
cd $WORKDIR/GenAIExamples/AgentQnA/docker_compose/intel/hpu/gaudi
bash launch_tgi_gaudi.sh
}
function start_agent_and_api_server() {
echo "Starting CRAG server"
@@ -31,7 +25,6 @@ function start_agent_and_api_server() {
echo "Starting Agent services"
cd $WORKDIR/GenAIExamples/AgentQnA/docker_compose/intel/hpu/gaudi
bash launch_agent_service_tgi_gaudi.sh
sleep 10
}
function validate() {
@@ -50,22 +43,18 @@ function validate() {
function validate_agent_service() {
echo "----------------Test agent ----------------"
# local CONTENT=$(http_proxy="" curl http://${ip_address}:9095/v1/chat/completions -X POST -H "Content-Type: application/json" -d '{
# "query": "Tell me about Michael Jackson song thriller"
# }')
export agent_port="9095"
local CONTENT=$(python3 $WORKDIR/GenAIExamples/AgentQnA/tests/test.py)
local EXIT_CODE=$(validate "$CONTENT" "Thriller" "rag-agent-endpoint")
docker logs rag-agent-endpoint
local CONTENT=$(http_proxy="" curl http://${ip_address}:9095/v1/chat/completions -X POST -H "Content-Type: application/json" -d '{
"query": "Tell me about Michael Jackson song thriller"
}')
local EXIT_CODE=$(validate "$CONTENT" "Thriller" "react-agent-endpoint")
docker logs docgrader-agent-endpoint
if [ "$EXIT_CODE" == "1" ]; then
exit 1
fi
# local CONTENT=$(http_proxy="" curl http://${ip_address}:9090/v1/chat/completions -X POST -H "Content-Type: application/json" -d '{
# "query": "Tell me about Michael Jackson song thriller"
# }')
export agent_port="9090"
local CONTENT=$(python3 $WORKDIR/GenAIExamples/AgentQnA/tests/test.py)
local CONTENT=$(http_proxy="" curl http://${ip_address}:9090/v1/chat/completions -X POST -H "Content-Type: application/json" -d '{
"query": "Tell me about Michael Jackson song thriller"
}')
local EXIT_CODE=$(validate "$CONTENT" "Thriller" "react-agent-endpoint")
docker logs react-agent-endpoint
if [ "$EXIT_CODE" == "1" ]; then
@@ -75,10 +64,6 @@ function validate_agent_service() {
}
function main() {
echo "==================== Start TGI ===================="
start_tgi
echo "==================== TGI started ===================="
echo "==================== Start agent ===================="
start_agent_and_api_server
echo "==================== Agent started ===================="

View File

@@ -1,25 +0,0 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import requests
def generate_answer_agent_api(url, prompt):
proxies = {"http": ""}
payload = {
"query": prompt,
}
response = requests.post(url, json=payload, proxies=proxies)
answer = response.json()["text"]
return answer
if __name__ == "__main__":
ip_address = os.getenv("ip_address", "localhost")
agent_port = os.getenv("agent_port", "9095")
url = f"http://{ip_address}:{agent_port}/v1/chat/completions"
prompt = "Tell me about Michael Jackson song thriller"
answer = generate_answer_agent_api(url, prompt)
print(answer)

View File

@@ -19,6 +19,7 @@ function stop_crag() {
function stop_agent_docker() {
cd $WORKPATH/docker_compose/intel/hpu/gaudi/
# docker compose -f compose.yaml down
container_list=$(cat compose.yaml | grep container_name | cut -d':' -f2)
for container_name in $container_list; do
cid=$(docker ps -aq --filter "name=$container_name")
@@ -27,21 +28,11 @@ function stop_agent_docker() {
done
}
function stop_tgi(){
cd $WORKPATH/docker_compose/intel/hpu/gaudi/
container_list=$(cat tgi_gaudi.yaml | grep container_name | cut -d':' -f2)
for container_name in $container_list; do
cid=$(docker ps -aq --filter "name=$container_name")
echo "Stopping container $container_name"
if [[ ! -z "$cid" ]]; then docker rm $cid -f && sleep 1s; fi
done
}
function stop_retrieval_tool() {
echo "Stopping Retrieval tool"
local RETRIEVAL_TOOL_PATH=$WORKPATH/../DocIndexRetriever
cd $RETRIEVAL_TOOL_PATH/docker_compose/intel/cpu/xeon/
# docker compose -f compose.yaml down
container_list=$(cat compose.yaml | grep container_name | cut -d':' -f2)
for container_name in $container_list; do
cid=$(docker ps -aq --filter "name=$container_name")
@@ -52,26 +43,25 @@ function stop_retrieval_tool() {
echo "workpath: $WORKPATH"
echo "=================== Stop containers ===================="
stop_crag
stop_tgi
stop_agent_docker
stop_retrieval_tool
cd $WORKPATH/tests
echo "=================== #1 Building docker images===================="
bash step1_build_images.sh
bash 1_build_images.sh
echo "=================== #1 Building docker images completed===================="
echo "=================== #2 Start retrieval tool===================="
bash step2_start_retrieval_tool.sh
bash 2_start_retrieval_tool.sh
echo "=================== #2 Retrieval tool started===================="
echo "=================== #3 Ingest data and validate retrieval===================="
bash step3_ingest_data_and_validate_retrieval.sh
bash 3_ingest_data_and_validate_retrieval.sh
echo "=================== #3 Data ingestion and validation completed===================="
echo "=================== #4 Start agent and API server===================="
bash step4_launch_and_validate_agent_tgi.sh
bash 4_launch_and_validate_agent_tgi.sh
echo "=================== #4 Agent test passed ===================="
echo "=================== #5 Stop agent and API server===================="
@@ -80,6 +70,4 @@ stop_agent_docker
stop_retrieval_tool
echo "=================== #5 Agent and API server stopped===================="
echo y | docker system prune
echo "ALL DONE!"

View File

@@ -25,7 +25,7 @@ get_billboard_rank_date:
args_schema:
rank:
type: int
description: the rank of interest, for example 1 for top 1
description: song name
date:
type: str
description: date

View File

@@ -12,31 +12,16 @@ def search_knowledge_base(query: str) -> str:
print(url)
proxies = {"http": ""}
payload = {
"messages": query,
"text": query,
}
response = requests.post(url, json=payload, proxies=proxies)
print(response)
if "documents" in response.json():
docs = response.json()["documents"]
context = ""
for i, doc in enumerate(docs):
if i == 0:
context = doc
else:
context += "\n" + doc
# print(context)
return context
elif "text" in response.json():
return response.json()["text"]
elif "reranked_docs" in response.json():
docs = response.json()["reranked_docs"]
context = ""
for i, doc in enumerate(docs):
if i == 0:
context = doc["text"]
else:
context += "\n" + doc["text"]
# print(context)
return context
else:
return "Error parsing response from the knowledge base."
docs = response.json()["documents"]
context = ""
for i, doc in enumerate(docs):
if i == 0:
context = doc
else:
context += "\n" + doc
print(context)
return context

View File

@@ -36,9 +36,9 @@ Evaluate the performance with the LLM:
```py
# validate the offline model
# python offline_eval.py
# python offline_evaluate.py
# validate the online asr microservice accuracy
python online_eval.py
python online_evaluate.py
```
### Performance Result

View File

@@ -2,4 +2,4 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
python online_eval.py
python online_evaluate.py

View File

@@ -41,7 +41,7 @@ services:
environment:
TTS_ENDPOINT: ${TTS_ENDPOINT}
tgi-service:
image: ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu
image: ghcr.io/huggingface/text-generation-inference:sha-e4201f4-intel-cpu
container_name: tgi-service
ports:
- "3006:80"

View File

@@ -26,7 +26,7 @@ services:
https_proxy: ${https_proxy}
restart: unless-stopped
tgi-service:
image: ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu
image: ghcr.io/huggingface/text-generation-inference:sha-e4201f4-intel-cpu
container_name: tgi-service
ports:
- "3006:80"

View File

@@ -7,14 +7,14 @@
## Deploy On Xeon
```
cd GenAIExamples/AudioQnA/kubernetes/intel/cpu/xeon/manifest
cd GenAIExamples/AudioQnA/kubernetes/intel/cpu/xeon/manifests
export HUGGINGFACEHUB_API_TOKEN="YourOwnToken"
sed -i "s/insert-your-huggingface-token-here/${HUGGINGFACEHUB_API_TOKEN}/g" audioqna.yaml
kubectl apply -f audioqna.yaml
```
## Deploy On Gaudi
```
cd GenAIExamples/AudioQnA/kubernetes/intel/hpu/gaudi/manifest
cd GenAIExamples/AudioQnA/kubernetes/intel/hpu/gaudi/manifests
export HUGGINGFACEHUB_API_TOKEN="YourOwnToken"
sed -i "s/insert-your-huggingface-token-here/${HUGGINGFACEHUB_API_TOKEN}/g" audioqna.yaml
kubectl apply -f audioqna.yaml

View File

@@ -247,7 +247,7 @@ spec:
- envFrom:
- configMapRef:
name: audio-qna-config
image: "ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu"
image: "ghcr.io/huggingface/text-generation-inference:sha-e4201f4-intel-cpu"
name: llm-dependency-deploy-demo
securityContext:
capabilities:

View File

@@ -1,8 +0,0 @@
*.safetensors
*.bin
*.model
*.log
docker_compose/intel/cpu/xeon/data
docker_compose/intel/hpu/gaudi/data
inputs/
outputs/

View File

@@ -1,33 +0,0 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
FROM python:3.11-slim
RUN apt-get update -y && apt-get install -y --no-install-recommends --fix-missing \
libgl1-mesa-glx \
libjemalloc-dev \
vim \
git
RUN useradd -m -s /bin/bash user && \
mkdir -p /home/user && \
chown -R user /home/user/
WORKDIR /home/user/
RUN git clone https://github.com/opea-project/GenAIComps.git
WORKDIR /home/user/GenAIComps
RUN pip install --no-cache-dir --upgrade pip && \
pip install --no-cache-dir -r /home/user/GenAIComps/requirements.txt
COPY ./avatarchatbot.py /home/user/avatarchatbot.py
ENV PYTHONPATH=$PYTHONPATH:/home/user/GenAIComps
USER user
WORKDIR /home/user
ENTRYPOINT ["python", "avatarchatbot.py"]

View File

@@ -1,105 +0,0 @@
# AvatarChatbot Application
The AvatarChatbot service can be effortlessly deployed on either Intel Gaudi2 or Intel XEON Scalable Processors.
## AI Avatar Workflow
The AI Avatar example is implemented using both megaservices and the component-level microservices defined in [GenAIComps](https://github.com/opea-project/GenAIComps). The flow chart below shows the information flow between different megaservices and microservices for this example.
```mermaid
---
config:
flowchart:
nodeSpacing: 100
rankSpacing: 100
curve: linear
themeVariables:
fontSize: 42px
---
flowchart LR
classDef blue fill:#ADD8E6,stroke:#ADD8E6,stroke-width:2px,fill-opacity:0.5
classDef thistle fill:#D8BFD8,stroke:#ADD8E6,stroke-width:2px,fill-opacity:0.5
classDef orange fill:#FBAA60,stroke:#ADD8E6,stroke-width:2px,fill-opacity:0.5
classDef orchid fill:#C26DBC,stroke:#ADD8E6,stroke-width:2px,fill-opacity:0.5
classDef invisible fill:transparent,stroke:transparent;
style AvatarChatbot-Megaservice stroke:#000000
subgraph AvatarChatbot-Megaservice["AvatarChatbot Megaservice"]
direction LR
ASR([ASR Microservice]):::blue
LLM([LLM Microservice]):::blue
TTS([TTS Microservice]):::blue
animation([Animation Microservice]):::blue
end
subgraph UserInterface["User Interface"]
direction LR
invis1[ ]:::invisible
USER1([User Audio Query]):::orchid
USER2([User Image/Video Query]):::orchid
UI([UI server<br>]):::orchid
end
GW([AvatarChatbot GateWay<br>]):::orange
subgraph .
direction LR
X([OPEA Microservice]):::blue
Y{{Open Source Service}}:::thistle
Z([OPEA Gateway]):::orange
Z1([UI]):::orchid
end
WHISPER{{Whisper service}}:::thistle
TGI{{LLM service}}:::thistle
T5{{Speecht5 service}}:::thistle
WAV2LIP{{Wav2Lip service}}:::thistle
%% Connections %%
direction LR
USER1 -->|1| UI
UI -->|2| GW
GW <==>|3| AvatarChatbot-Megaservice
ASR ==>|4| LLM ==>|5| TTS ==>|6| animation
direction TB
ASR <-.->|3'| WHISPER
LLM <-.->|4'| TGI
TTS <-.->|5'| T5
animation <-.->|6'| WAV2LIP
USER2 -->|1| UI
UI <-.->|6'| WAV2LIP
```
## Deploy AvatarChatbot Service
The AvatarChatbot service can be deployed on either Intel Gaudi2 AI Accelerator or Intel Xeon Scalable Processor.
### Deploy AvatarChatbot on Gaudi
Refer to the [Gaudi Guide](./docker_compose/intel/hpu/gaudi/README.md) for instructions on deploying AvatarChatbot on Gaudi, and on setting up an UI for the application.
### Deploy AvatarChatbot on Xeon
Refer to the [Xeon Guide](./docker_compose/intel/cpu/xeon/README.md) for instructions on deploying AvatarChatbot on Xeon.
## Supported Models
### ASR
The default model is [openai/whisper-small](https://huggingface.co/openai/whisper-small). It also supports all models in the Whisper family, such as `openai/whisper-large-v3`, `openai/whisper-medium`, `openai/whisper-base`, `openai/whisper-tiny`, etc.
To replace the model, please edit the `compose.yaml` and add the `command` line to pass the name of the model you want to use:
```yaml
services:
whisper-service:
...
command: --model_name_or_path openai/whisper-tiny
```
### TTS
The default model is [microsoft/SpeechT5](https://huggingface.co/microsoft/speecht5_tts). We currently do not support replacing the model. More models under the commercial license will be added in the future.
### Animation
The default model is [Rudrabha/Wav2Lip](https://github.com/Rudrabha/Wav2Lip) and [TencentARC/GFPGAN](https://github.com/TencentARC/GFPGAN). We currently do not support replacing the model. More models under the commercial license such as [OpenTalker/SadTalker](https://github.com/OpenTalker/SadTalker) will be added in the future.

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

Binary file not shown.

Before

Width:  |  Height:  |  Size: 595 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 148 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 158 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.5 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 992 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.7 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.6 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 169 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 121 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

View File

@@ -1,93 +0,0 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import asyncio
import os
import sys
from comps import AvatarChatbotGateway, MicroService, ServiceOrchestrator, ServiceType
MEGA_SERVICE_HOST_IP = os.getenv("MEGA_SERVICE_HOST_IP", "0.0.0.0")
MEGA_SERVICE_PORT = int(os.getenv("MEGA_SERVICE_PORT", 8888))
ASR_SERVICE_HOST_IP = os.getenv("ASR_SERVICE_HOST_IP", "0.0.0.0")
ASR_SERVICE_PORT = int(os.getenv("ASR_SERVICE_PORT", 9099))
LLM_SERVICE_HOST_IP = os.getenv("LLM_SERVICE_HOST_IP", "0.0.0.0")
LLM_SERVICE_PORT = int(os.getenv("LLM_SERVICE_PORT", 9000))
TTS_SERVICE_HOST_IP = os.getenv("TTS_SERVICE_HOST_IP", "0.0.0.0")
TTS_SERVICE_PORT = int(os.getenv("TTS_SERVICE_PORT", 9088))
ANIMATION_SERVICE_HOST_IP = os.getenv("ANIMATION_SERVICE_HOST_IP", "0.0.0.0")
ANIMATION_SERVICE_PORT = int(os.getenv("ANIMATION_SERVICE_PORT", 9066))
def check_env_vars(env_var_list):
for var in env_var_list:
if os.getenv(var) is None:
print(f"Error: The environment variable '{var}' is not set.")
sys.exit(1) # Exit the program with a non-zero status code
print("All environment variables are set.")
class AvatarChatbotService:
def __init__(self, host="0.0.0.0", port=8000):
self.host = host
self.port = port
self.megaservice = ServiceOrchestrator()
def add_remote_service(self):
asr = MicroService(
name="asr",
host=ASR_SERVICE_HOST_IP,
port=ASR_SERVICE_PORT,
endpoint="/v1/audio/transcriptions",
use_remote_service=True,
service_type=ServiceType.ASR,
)
llm = MicroService(
name="llm",
host=LLM_SERVICE_HOST_IP,
port=LLM_SERVICE_PORT,
endpoint="/v1/chat/completions",
use_remote_service=True,
service_type=ServiceType.LLM,
)
tts = MicroService(
name="tts",
host=TTS_SERVICE_HOST_IP,
port=TTS_SERVICE_PORT,
endpoint="/v1/audio/speech",
use_remote_service=True,
service_type=ServiceType.TTS,
)
animation = MicroService(
name="animation",
host=ANIMATION_SERVICE_HOST_IP,
port=ANIMATION_SERVICE_PORT,
endpoint="/v1/animation",
use_remote_service=True,
service_type=ServiceType.ANIMATION,
)
self.megaservice.add(asr).add(llm).add(tts).add(animation)
self.megaservice.flow_to(asr, llm)
self.megaservice.flow_to(llm, tts)
self.megaservice.flow_to(tts, animation)
self.gateway = AvatarChatbotGateway(megaservice=self.megaservice, host="0.0.0.0", port=self.port)
if __name__ == "__main__":
check_env_vars(
[
"MEGA_SERVICE_HOST_IP",
"MEGA_SERVICE_PORT",
"ASR_SERVICE_HOST_IP",
"ASR_SERVICE_PORT",
"LLM_SERVICE_HOST_IP",
"LLM_SERVICE_PORT",
"TTS_SERVICE_HOST_IP",
"TTS_SERVICE_PORT",
"ANIMATION_SERVICE_HOST_IP",
"ANIMATION_SERVICE_PORT",
]
)
avatarchatbot = AvatarChatbotService(host=MEGA_SERVICE_HOST_IP, port=MEGA_SERVICE_PORT)
avatarchatbot.add_remote_service()

View File

@@ -1,210 +0,0 @@
# Build Mega Service of AvatarChatbot on Xeon
This document outlines the deployment process for a AvatarChatbot application utilizing the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline on Intel Xeon server.
## 🚀 Build Docker images
### 1. Source Code install GenAIComps
```bash
git clone https://github.com/opea-project/GenAIComps.git
cd GenAIComps
```
### 2. Build ASR Image
```bash
docker build -t opea/whisper:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/asr/whisper/dependency/Dockerfile .
docker build -t opea/asr:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/asr/whisper/Dockerfile .
```
### 3. Build LLM Image
```bash
docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
```
### 4. Build TTS Image
```bash
docker build -t opea/speecht5:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/tts/speecht5/dependency/Dockerfile .
docker build -t opea/tts:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/tts/speecht5/Dockerfile .
```
### 5. Build Animation Image
```bash
docker build -t opea/wav2lip:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/animation/wav2lip/dependency/Dockerfile .
docker build -t opea/animation:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/animation/wav2lip/Dockerfile .
```
### 6. Build MegaService Docker Image
To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `audioqna.py` Python script. Build the MegaService Docker image using the command below:
```bash
git clone https://github.com/opea-project/GenAIExamples.git
cd GenAIExamples/AvatarChatbot/
docker build --no-cache -t opea/avatarchatbot:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
```
Then run the command `docker images`, you will have following images ready:
1. `opea/whisper:latest`
2. `opea/asr:latest`
3. `opea/llm-tgi:latest`
4. `opea/speecht5:latest`
5. `opea/tts:latest`
6. `opea/wav2lip:latest`
7. `opea/animation:latest`
8. `opea/avatarchatbot:latest`
## 🚀 Set the environment variables
Before starting the services with `docker compose`, you have to recheck the following environment variables.
```bash
export HUGGINGFACEHUB_API_TOKEN=<your_hf_token>
export host_ip=$(hostname -I | awk '{print $1}')
export TGI_LLM_ENDPOINT=http://$host_ip:3006
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3
export ASR_ENDPOINT=http://$host_ip:7066
export TTS_ENDPOINT=http://$host_ip:7055
export WAV2LIP_ENDPOINT=http://$host_ip:7860
export MEGA_SERVICE_HOST_IP=${host_ip}
export ASR_SERVICE_HOST_IP=${host_ip}
export TTS_SERVICE_HOST_IP=${host_ip}
export LLM_SERVICE_HOST_IP=${host_ip}
export ANIMATION_SERVICE_HOST_IP=${host_ip}
export MEGA_SERVICE_PORT=8888
export ASR_SERVICE_PORT=3001
export TTS_SERVICE_PORT=3002
export LLM_SERVICE_PORT=3007
export ANIMATION_SERVICE_PORT=3008
```
- Xeon CPU
```bash
export DEVICE="cpu"
export WAV2LIP_PORT=7860
export INFERENCE_MODE='wav2lip_only'
export CHECKPOINT_PATH='/usr/local/lib/python3.11/site-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
export FACE="assets/img/avatar1.jpg"
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
export AUDIO='None'
export FACESIZE=96
export OUTFILE="/outputs/result.mp4"
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
export UPSCALE_FACTOR=1
export FPS=10
```
## 🚀 Start the MegaService
```bash
cd GenAIExamples/AvatarChatbot/docker_compose/intel/cpu/xeon/
docker compose -f compose.yaml up -d
```
## 🚀 Test MicroServices
```bash
# whisper service
curl http://${host_ip}:7066/v1/asr \
-X POST \
-d '{"audio": "UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA"}' \
-H 'Content-Type: application/json'
# asr microservice
curl http://${host_ip}:3001/v1/audio/transcriptions \
-X POST \
-d '{"byte_str": "UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA"}' \
-H 'Content-Type: application/json'
# tgi service
curl http://${host_ip}:3006/generate \
-X POST \
-d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}' \
-H 'Content-Type: application/json'
# llm microservice
curl http://${host_ip}:3007/v1/chat/completions\
-X POST \
-d '{"query":"What is Deep Learning?","max_tokens":17,"top_k":10,"top_p":0.95,"typical_p":0.95,"temperature":0.01,"repetition_penalty":1.03,"streaming":false}' \
-H 'Content-Type: application/json'
# speecht5 service
curl http://${host_ip}:7055/v1/tts \
-X POST \
-d '{"text": "Who are you?"}' \
-H 'Content-Type: application/json'
# tts microservice
curl http://${host_ip}:3002/v1/audio/speech \
-X POST \
-d '{"text": "Who are you?"}' \
-H 'Content-Type: application/json'
# wav2lip service
cd ../../../..
curl http://${host_ip}:7860/v1/wav2lip \
-X POST \
-d @assets/audio/sample_minecraft.json \
-H 'Content-Type: application/json'
# animation microservice
curl http://${host_ip}:3008/v1/animation \
-X POST \
-d @assets/audio/sample_question.json \
-H "Content-Type: application/json"
```
## 🚀 Test MegaService
```bash
curl http://${host_ip}:3009/v1/avatarchatbot \
-X POST \
-d @assets/audio/sample_whoareyou.json \
-H 'Content-Type: application/json'
```
If the megaservice is running properly, you should see the following output:
```bash
"/outputs/result.mp4"
```
The output file will be saved in the current working directory, as `${PWD}` is mapped to `/outputs` inside the wav2lip-service Docker container.
## Gradio UI
```bash
cd $WORKPATH/GenAIExamples/AvatarChatbot
python3 ui/gradio/app_gradio_demo_avatarchatbot.py
```
The UI can be viewed at http://${host_ip}:7861
<img src="../../../../assets/img/UI.png" alt="UI Example" width="60%">
In the current version v1.0, you need to set the avatar figure image/video and the DL model choice in the environment variables before starting AvatarChatbot backend service and running the UI. Please just customize the audio question in the UI.
\*\* We will enable change of avatar figure between runs in v2.0
## Troubleshooting
```bash
cd GenAIExamples/AvatarChatbot/tests
export IMAGE_REPO="opea"
export IMAGE_TAG="latest"
export HUGGINGFACEHUB_API_TOKEN=<your_hf_token>
test_avatarchatbot_on_xeon.sh
```

View File

@@ -1,138 +0,0 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
services:
whisper-service:
image: ${REGISTRY:-opea}/whisper:${TAG:-latest}
container_name: whisper-service
ports:
- "7066:7066"
ipc: host
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
restart: unless-stopped
asr:
image: ${REGISTRY:-opea}/asr:${TAG:-latest}
container_name: asr-service
ports:
- "3001:9099"
ipc: host
environment:
ASR_ENDPOINT: ${ASR_ENDPOINT}
speecht5-service:
image: ${REGISTRY:-opea}/speecht5:${TAG:-latest}
container_name: speecht5-service
ports:
- "7055:7055"
ipc: host
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
restart: unless-stopped
tts:
image: ${REGISTRY:-opea}/tts:${TAG:-latest}
container_name: tts-service
ports:
- "3002:9088"
ipc: host
environment:
TTS_ENDPOINT: ${TTS_ENDPOINT}
tgi-service:
image: ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu
container_name: tgi-service
ports:
- "3006:80"
volumes:
- "./data:/data"
shm_size: 1g
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0
llm:
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
container_name: llm-tgi-server
depends_on:
- tgi-service
ports:
- "3007:9000"
ipc: host
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
restart: unless-stopped
wav2lip-service:
image: ${REGISTRY:-opea}/wav2lip:${TAG:-latest}
container_name: wav2lip-service
ports:
- "7860:7860"
ipc: host
volumes:
- ${PWD}:/outputs
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
DEVICE: ${DEVICE}
INFERENCE_MODE: ${INFERENCE_MODE}
CHECKPOINT_PATH: ${CHECKPOINT_PATH}
FACE: ${FACE}
AUDIO: ${AUDIO}
FACESIZE: ${FACESIZE}
OUTFILE: ${OUTFILE}
GFPGAN_MODEL_VERSION: ${GFPGAN_MODEL_VERSION}
UPSCALE_FACTOR: ${UPSCALE_FACTOR}
FPS: ${FPS}
WAV2LIP_PORT: ${WAV2LIP_PORT}
restart: unless-stopped
animation:
image: ${REGISTRY:-opea}/animation:${TAG:-latest}
container_name: animation-server
ports:
- "3008:9066"
ipc: host
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
WAV2LIP_ENDPOINT: ${WAV2LIP_ENDPOINT}
restart: unless-stopped
avatarchatbot-xeon-backend-server:
image: ${REGISTRY:-opea}/avatarchatbot:${TAG:-latest}
container_name: avatarchatbot-xeon-backend-server
depends_on:
- asr
- llm
- tts
- animation
ports:
- "3009:8888"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP}
- MEGA_SERVICE_PORT=${MEGA_SERVICE_PORT}
- ASR_SERVICE_HOST_IP=${ASR_SERVICE_HOST_IP}
- ASR_SERVICE_PORT=${ASR_SERVICE_PORT}
- LLM_SERVICE_HOST_IP=${LLM_SERVICE_HOST_IP}
- LLM_SERVICE_PORT=${LLM_SERVICE_PORT}
- TTS_SERVICE_HOST_IP=${TTS_SERVICE_HOST_IP}
- TTS_SERVICE_PORT=${TTS_SERVICE_PORT}
- ANIMATION_SERVICE_HOST_IP=${ANIMATION_SERVICE_HOST_IP}
- ANIMATION_SERVICE_PORT=${ANIMATION_SERVICE_PORT}
ipc: host
restart: always
networks:
default:
driver: bridge

View File

@@ -1,220 +0,0 @@
# Build Mega Service of AvatarChatbot on Gaudi
This document outlines the deployment process for a AvatarChatbot application utilizing the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline on Intel Gaudi server.
## 🚀 Build Docker images
### 1. Source Code install GenAIComps
```bash
git clone https://github.com/opea-project/GenAIComps.git
cd GenAIComps
```
### 2. Build ASR Image
```bash
docker build -t opea/whisper-gaudi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/asr/whisper/dependency/Dockerfile.intel_hpu .
docker build -t opea/asr:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/asr/whisper/Dockerfile .
```
### 3. Build LLM Image
```bash
docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
```
### 4. Build TTS Image
```bash
docker build -t opea/speecht5-gaudi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/tts/speecht5/dependency/Dockerfile.intel_hpu .
docker build -t opea/tts:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/tts/speecht5/Dockerfile .
```
### 5. Build Animation Image
```bash
docker build -t opea/wav2lip-gaudi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/animation/wav2lip/dependency/Dockerfile.intel_hpu .
docker build -t opea/animation:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/animation/wav2lip/Dockerfile .
```
### 6. Build MegaService Docker Image
To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `audioqna.py` Python script. Build the MegaService Docker image using the command below:
```bash
git clone https://github.com/opea-project/GenAIExamples.git
cd GenAIExamples/AvatarChatbot/
docker build --no-cache -t opea/avatarchatbot:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
```
Then run the command `docker images`, you will have following images ready:
1. `opea/whisper-gaudi:latest`
2. `opea/asr:latest`
3. `opea/llm-tgi:latest`
4. `opea/speecht5-gaudi:latest`
5. `opea/tts:latest`
6. `opea/wav2lip-gaudi:latest`
7. `opea/animation:latest`
8. `opea/avatarchatbot:latest`
## 🚀 Set the environment variables
Before starting the services with `docker compose`, you have to recheck the following environment variables.
```bash
export HUGGINGFACEHUB_API_TOKEN=<your_hf_token>
export host_ip=$(hostname -I | awk '{print $1}')
export TGI_LLM_ENDPOINT=http://$host_ip:3006
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3
export ASR_ENDPOINT=http://$host_ip:7066
export TTS_ENDPOINT=http://$host_ip:7055
export WAV2LIP_ENDPOINT=http://$host_ip:7860
export MEGA_SERVICE_HOST_IP=${host_ip}
export ASR_SERVICE_HOST_IP=${host_ip}
export TTS_SERVICE_HOST_IP=${host_ip}
export LLM_SERVICE_HOST_IP=${host_ip}
export ANIMATION_SERVICE_HOST_IP=${host_ip}
export MEGA_SERVICE_PORT=8888
export ASR_SERVICE_PORT=3001
export TTS_SERVICE_PORT=3002
export LLM_SERVICE_PORT=3007
export ANIMATION_SERVICE_PORT=3008
```
- Gaudi2 HPU
```bash
export DEVICE="hpu"
export WAV2LIP_PORT=7860
export INFERENCE_MODE='wav2lip_only'
export CHECKPOINT_PATH='/usr/local/lib/python3.10/dist-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
export FACE="assets/img/avatar1.jpg"
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
export AUDIO='None'
export FACESIZE=96
export OUTFILE="/outputs/result.mp4"
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
export UPSCALE_FACTOR=1
export FPS=10
```
## 🚀 Start the MegaService
```bash
cd GenAIExamples/AvatarChatbot/docker_compose/intel/hpu/gaudi/
docker compose -f compose.yaml up -d
```
## 🚀 Test MicroServices
```bash
# whisper service
curl http://${host_ip}:7066/v1/asr \
-X POST \
-d '{"audio": "UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA"}' \
-H 'Content-Type: application/json'
# asr microservice
curl http://${host_ip}:3001/v1/audio/transcriptions \
-X POST \
-d '{"byte_str": "UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA"}' \
-H 'Content-Type: application/json'
# tgi service
curl http://${host_ip}:3006/generate \
-X POST \
-d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}' \
-H 'Content-Type: application/json'
# llm microservice
curl http://${host_ip}:3007/v1/chat/completions\
-X POST \
-d '{"query":"What is Deep Learning?","max_tokens":17,"top_k":10,"top_p":0.95,"typical_p":0.95,"temperature":0.01,"repetition_penalty":1.03,"streaming":false}' \
-H 'Content-Type: application/json'
# speecht5 service
curl http://${host_ip}:7055/v1/tts \
-X POST \
-d '{"text": "Who are you?"}' \
-H 'Content-Type: application/json'
# tts microservice
curl http://${host_ip}:3002/v1/audio/speech \
-X POST \
-d '{"text": "Who are you?"}' \
-H 'Content-Type: application/json'
# wav2lip service
cd ../../../..
curl http://${host_ip}:7860/v1/wav2lip \
-X POST \
-d @assets/audio/sample_minecraft.json \
-H 'Content-Type: application/json'
# animation microservice
curl http://${host_ip}:3008/v1/animation \
-X POST \
-d @assets/audio/sample_question.json \
-H "Content-Type: application/json"
```
## 🚀 Test MegaService
```bash
curl http://${host_ip}:3009/v1/avatarchatbot \
-X POST \
-d @assets/audio/sample_whoareyou.json \
-H 'Content-Type: application/json'
```
If the megaservice is running properly, you should see the following output:
```bash
"/outputs/result.mp4"
```
The output file will be saved in the current working directory, as `${PWD}` is mapped to `/outputs` inside the wav2lip-service Docker container.
## Gradio UI
```bash
sudo apt update
sudo apt install -y yasm pkg-config libx264-dev nasm
cd $WORKPATH
git clone https://github.com/FFmpeg/FFmpeg.git
cd FFmpeg
sudo ./configure --enable-gpl --enable-libx264 && sudo make -j$(nproc-1) && sudo make install && hash -r
pip install gradio==4.38.1 soundfile
```
```bash
cd $WORKPATH/GenAIExamples/AvatarChatbot
python3 ui/gradio/app_gradio_demo_avatarchatbot.py
```
The UI can be viewed at http://${host_ip}:7861
<img src="../../../../assets/img/UI.png" alt="UI Example" width="60%">
In the current version v1.0, you need to set the avatar figure image/video and the DL model choice in the environment variables before starting AvatarChatbot backend service and running the UI. Please just customize the audio question in the UI.
\*\* We will enable change of avatar figure between runs in v2.0
## Troubleshooting
```bash
cd GenAIExamples/AvatarChatbot/tests
export IMAGE_REPO="opea"
export IMAGE_TAG="latest"
export HUGGINGFACEHUB_API_TOKEN=<your_hf_token>
test_avatarchatbot_on_gaudi.sh
```

View File

@@ -1,171 +0,0 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
version: "3.8"
services:
whisper-service:
image: ${REGISTRY:-opea}/whisper-gaudi:${TAG:-latest}
container_name: whisper-service
ports:
- "7066:7066"
ipc: host
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
HABANA_VISIBLE_MODULES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none
runtime: habana
cap_add:
- SYS_NICE
restart: unless-stopped
asr:
image: ${REGISTRY:-opea}/asr:${TAG:-latest}
container_name: asr-service
ports:
- "3001:9099"
ipc: host
environment:
ASR_ENDPOINT: ${ASR_ENDPOINT}
speecht5-service:
image: ${REGISTRY:-opea}/speecht5-gaudi:${TAG:-latest}
container_name: speecht5-service
ports:
- "7055:7055"
ipc: host
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
HABANA_VISIBLE_MODULES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none
runtime: habana
cap_add:
- SYS_NICE
restart: unless-stopped
tts:
image: ${REGISTRY:-opea}/tts:${TAG:-latest}
container_name: tts-service
ports:
- "3002:9088"
ipc: host
environment:
TTS_ENDPOINT: ${TTS_ENDPOINT}
tgi-service:
image: ghcr.io/huggingface/tgi-gaudi:2.0.5
container_name: tgi-gaudi-server
ports:
- "3006:80"
volumes:
- "./data:/data"
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
HF_HUB_DISABLE_PROGRESS_BARS: 1
HF_HUB_ENABLE_HF_TRANSFER: 0
HABANA_VISIBLE_MODULES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none
ENABLE_HPU_GRAPH: true
LIMIT_HPU_GRAPH: true
USE_FLASH_ATTENTION: true
FLASH_ATTENTION_RECOMPUTE: true
runtime: habana
cap_add:
- SYS_NICE
ipc: host
command: --model-id ${LLM_MODEL_ID} --max-input-length 128 --max-total-tokens 256
llm:
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
container_name: llm-tgi-gaudi-server
depends_on:
- tgi-service
ports:
- "3007:9000"
ipc: host
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
restart: unless-stopped
wav2lip-service:
image: ${REGISTRY:-opea}/wav2lip-gaudi:${TAG:-latest}
container_name: wav2lip-service
ports:
- "7860:7860"
ipc: host
volumes:
- ${PWD}:/outputs
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
HABANA_VISIBLE_MODULES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none
DEVICE: ${DEVICE}
INFERENCE_MODE: ${INFERENCE_MODE}
CHECKPOINT_PATH: ${CHECKPOINT_PATH}
FACE: ${FACE}
AUDIO: ${AUDIO}
FACESIZE: ${FACESIZE}
OUTFILE: ${OUTFILE}
GFPGAN_MODEL_VERSION: ${GFPGAN_MODEL_VERSION}
UPSCALE_FACTOR: ${UPSCALE_FACTOR}
FPS: ${FPS}
WAV2LIP_PORT: ${WAV2LIP_PORT}
runtime: habana
cap_add:
- SYS_NICE
restart: unless-stopped
animation:
image: ${REGISTRY:-opea}/animation:${TAG:-latest}
container_name: animation-gaudi-server
ports:
- "3008:9066"
ipc: host
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
HABANA_VISIBLE_MODULES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none
WAV2LIP_ENDPOINT: ${WAV2LIP_ENDPOINT}
runtime: habana
cap_add:
- SYS_NICE
restart: unless-stopped
avatarchatbot-gaudi-backend-server:
image: ${REGISTRY:-opea}/avatarchatbot:${TAG:-latest}
container_name: avatarchatbot-gaudi-backend-server
depends_on:
- asr
- llm
- tts
- animation
ports:
- "3009:8888"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP}
- MEGA_SERVICE_PORT=${MEGA_SERVICE_PORT}
- ASR_SERVICE_HOST_IP=${ASR_SERVICE_HOST_IP}
- ASR_SERVICE_PORT=${ASR_SERVICE_PORT}
- LLM_SERVICE_HOST_IP=${LLM_SERVICE_HOST_IP}
- LLM_SERVICE_PORT=${LLM_SERVICE_PORT}
- TTS_SERVICE_HOST_IP=${TTS_SERVICE_HOST_IP}
- TTS_SERVICE_PORT=${TTS_SERVICE_PORT}
- ANIMATION_SERVICE_HOST_IP=${ANIMATION_SERVICE_HOST_IP}
- ANIMATION_SERVICE_PORT=${ANIMATION_SERVICE_PORT}
ipc: host
restart: always
networks:
default:
driver: bridge

View File

@@ -1,73 +0,0 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
services:
avatarchatbot:
build:
args:
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
no_proxy: ${no_proxy}
context: ../
dockerfile: ./Dockerfile
image: ${REGISTRY:-opea}/avatarchatbot:${TAG:-latest}
whisper-gaudi:
build:
context: GenAIComps
dockerfile: comps/asr/whisper/dependency/Dockerfile.intel_hpu
extends: avatarchatbot
image: ${REGISTRY:-opea}/whisper-gaudi:${TAG:-latest}
whisper:
build:
context: GenAIComps
dockerfile: comps/asr/whisper/dependency/Dockerfile
extends: avatarchatbot
image: ${REGISTRY:-opea}/whisper:${TAG:-latest}
asr:
build:
context: GenAIComps
dockerfile: comps/asr/whisper/Dockerfile
extends: avatarchatbot
image: ${REGISTRY:-opea}/asr:${TAG:-latest}
llm-tgi:
build:
context: GenAIComps
dockerfile: comps/llms/text-generation/tgi/Dockerfile
extends: avatarchatbot
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
speecht5-gaudi:
build:
context: GenAIComps
dockerfile: comps/tts/speecht5/dependency/Dockerfile.intel_hpu
extends: avatarchatbot
image: ${REGISTRY:-opea}/speecht5-gaudi:${TAG:-latest}
speecht5:
build:
context: GenAIComps
dockerfile: comps/tts/speecht5/dependency/Dockerfile
extends: avatarchatbot
image: ${REGISTRY:-opea}/speecht5:${TAG:-latest}
tts:
build:
context: GenAIComps
dockerfile: comps/tts/speecht5/Dockerfile
extends: avatarchatbot
image: ${REGISTRY:-opea}/tts:${TAG:-latest}
wav2lip-gaudi:
build:
context: GenAIComps
dockerfile: comps/animation/wav2lip/dependency/Dockerfile.intel_hpu
extends: avatarchatbot
image: ${REGISTRY:-opea}/wav2lip-gaudi:${TAG:-latest}
wav2lip:
build:
context: GenAIComps
dockerfile: comps/animation/wav2lip/dependency/Dockerfile
extends: avatarchatbot
image: ${REGISTRY:-opea}/wav2lip:${TAG:-latest}
animation:
build:
context: GenAIComps
dockerfile: comps/animation/wav2lip/Dockerfile
extends: avatarchatbot
image: ${REGISTRY:-opea}/animation:${TAG:-latest}

View File

@@ -1,147 +0,0 @@
#!/bin/bash
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
export REGISTRY=${IMAGE_REPO}
export TAG=${IMAGE_TAG}
WORKPATH=$(dirname "$PWD")
LOG_PATH="$WORKPATH/tests"
if ls $LOG_PATH/*.log 1> /dev/null 2>&1; then
rm $LOG_PATH/*.log
echo "Log files removed."
else
echo "No log files to remove."
fi
ip_address=$(hostname -I | awk '{print $1}')
function build_docker_images() {
cd $WORKPATH/docker_image_build
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="avatarchatbot whisper-gaudi asr llm-tgi speecht5-gaudi tts wav2lip-gaudi animation"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.5
docker images && sleep 1s
}
function start_services() {
cd $WORKPATH/docker_compose/intel/hpu/gaudi
export HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN
export host_ip=$(hostname -I | awk '{print $1}')
export TGI_LLM_ENDPOINT=http://$host_ip:3006
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3
export ASR_ENDPOINT=http://$host_ip:7066
export TTS_ENDPOINT=http://$host_ip:7055
export WAV2LIP_ENDPOINT=http://$host_ip:7860
export MEGA_SERVICE_HOST_IP=${host_ip}
export ASR_SERVICE_HOST_IP=${host_ip}
export TTS_SERVICE_HOST_IP=${host_ip}
export LLM_SERVICE_HOST_IP=${host_ip}
export ANIMATION_SERVICE_HOST_IP=${host_ip}
export MEGA_SERVICE_PORT=8888
export ASR_SERVICE_PORT=3001
export TTS_SERVICE_PORT=3002
export LLM_SERVICE_PORT=3007
export ANIMATION_SERVICE_PORT=3008
export DEVICE="hpu"
export WAV2LIP_PORT=7860
export INFERENCE_MODE='wav2lip+gfpgan'
export CHECKPOINT_PATH='/usr/local/lib/python3.10/dist-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
export FACE="assets/img/avatar1.jpg"
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
export AUDIO='None'
export FACESIZE=96
export OUTFILE="/outputs/result.mp4"
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
export UPSCALE_FACTOR=1
export FPS=10
# Start Docker Containers
docker compose up -d
n=0
until [[ "$n" -ge 100 ]]; do
docker logs tgi-gaudi-server > $LOG_PATH/tgi_service_start.log
if grep -q Connected $LOG_PATH/tgi_service_start.log; then
break
fi
sleep 5s
n=$((n+1))
done
# sleep 5m
echo "All services are up and running"
sleep 5s
}
function validate_megaservice() {
cd $WORKPATH
result=$(http_proxy="" curl http://${ip_address}:3009/v1/avatarchatbot -X POST -d @assets/audio/sample_whoareyou.json -H 'Content-Type: application/json')
echo "result is === $result"
if [[ $result == *"mp4"* ]]; then
echo "Result correct."
else
docker logs whisper-service > $LOG_PATH/whisper-service.log
docker logs asr-service > $LOG_PATH/asr-service.log
docker logs speecht5-service > $LOG_PATH/speecht5-service.log
docker logs tts-service > $LOG_PATH/tts-service.log
docker logs tgi-gaudi-server > $LOG_PATH/tgi-gaudi-server.log
docker logs llm-tgi-gaudi-server > $LOG_PATH/llm-tgi-gaudi-server.log
docker logs wav2lip-service > $LOG_PATH/wav2lip-service.log
docker logs animation-gaudi-server > $LOG_PATH/animation-gaudi-server.log
echo "Result wrong."
exit 1
fi
}
#function validate_frontend() {
#}
function stop_docker() {
cd $WORKPATH/docker_compose/intel/hpu/gaudi
docker compose down
}
function main() {
stop_docker
echo y | docker builder prune --all
echo y | docker image prune
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
start_services
# validate_microservices
validate_megaservice
# validate_frontend
stop_docker
echo y | docker builder prune --all
echo y | docker image prune
}
main

View File

@@ -1,142 +0,0 @@
#!/bin/bash
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set -e
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
export REGISTRY=${IMAGE_REPO}
export TAG=${IMAGE_TAG}
WORKPATH=$(dirname "$PWD")
LOG_PATH="$WORKPATH/tests"
if ls $LOG_PATH/*.log 1> /dev/null 2>&1; then
rm $LOG_PATH/*.log
echo "Log files removed."
else
echo "No log files to remove."
fi
ip_address=$(hostname -I | awk '{print $1}')
function build_docker_images() {
cd $WORKPATH/docker_image_build
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="avatarchatbot whisper asr llm-tgi speecht5 tts wav2lip animation"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.5
docker images && sleep 1s
}
function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon
export HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN
export host_ip=$(hostname -I | awk '{print $1}')
export TGI_LLM_ENDPOINT=http://$host_ip:3006
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3
export ASR_ENDPOINT=http://$host_ip:7066
export TTS_ENDPOINT=http://$host_ip:7055
export WAV2LIP_ENDPOINT=http://$host_ip:7860
export MEGA_SERVICE_HOST_IP=${host_ip}
export ASR_SERVICE_HOST_IP=${host_ip}
export TTS_SERVICE_HOST_IP=${host_ip}
export LLM_SERVICE_HOST_IP=${host_ip}
export ANIMATION_SERVICE_HOST_IP=${host_ip}
export MEGA_SERVICE_PORT=8888
export ASR_SERVICE_PORT=3001
export TTS_SERVICE_PORT=3002
export LLM_SERVICE_PORT=3007
export ANIMATION_SERVICE_PORT=3008
export DEVICE="cpu"
export WAV2LIP_PORT=7860
export INFERENCE_MODE='wav2lip+gfpgan'
export CHECKPOINT_PATH='/usr/local/lib/python3.11/site-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
export FACE="assets/img/avatar5.png"
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
export AUDIO='None'
export FACESIZE=96
export OUTFILE="/outputs/result.mp4"
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
export UPSCALE_FACTOR=1
export FPS=10
# Start Docker Containers
docker compose up -d
n=0
until [[ "$n" -ge 100 ]]; do
docker logs tgi-service > $LOG_PATH/tgi_service_start.log
if grep -q Connected $LOG_PATH/tgi_service_start.log; then
break
fi
sleep 5s
n=$((n+1))
done
echo "All services are up and running"
sleep 5s
}
function validate_megaservice() {
cd $WORKPATH
result=$(http_proxy="" curl http://${ip_address}:3009/v1/avatarchatbot -X POST -d @assets/audio/sample_whoareyou.json -H 'Content-Type: application/json')
echo "result is === $result"
if [[ $result == *"mp4"* ]]; then
echo "Result correct."
else
docker logs whisper-service > $LOG_PATH/whisper-service.log
docker logs asr-service > $LOG_PATH/asr-service.log
docker logs speecht5-service > $LOG_PATH/speecht5-service.log
docker logs tts-service > $LOG_PATH/tts-service.log
docker logs tgi-service > $LOG_PATH/tgi-service.log
docker logs llm-tgi-server > $LOG_PATH/llm-tgi-server.log
docker logs wav2lip-service > $LOG_PATH/wav2lip-service.log
docker logs animation-server > $LOG_PATH/animation-server.log
echo "Result wrong."
exit 1
fi
}
#function validate_frontend() {
#}
function stop_docker() {
cd $WORKPATH/docker_compose/intel/cpu/xeon
docker compose down
}
function main() {
stop_docker
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
start_services
# validate_microservices
validate_megaservice
# validate_frontend
stop_docker
echo y | docker builder prune --all
echo y | docker image prune
}
main

View File

@@ -1,349 +0,0 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import asyncio
import base64
import io
import os
import shutil
import subprocess
import time
import aiohttp
import docker
import ffmpeg
import gradio as gr
import numpy as np
import soundfile as sf
from PIL import Image
# %% Docker Management
def update_env_var_in_container(container_name, env_var, new_value):
return
# %% AudioQnA functions
def preprocess_audio(audio):
"""The audio data is a 16-bit integer array with values ranging from -32768 to 32767 and the shape of the audio data array is (samples,)"""
sr, y = audio
# Convert to normalized float32 audio
y = y.astype(np.float32)
y /= np.max(np.abs(y))
# Save to memory
buf = io.BytesIO()
sf.write(buf, y, sr, format="WAV")
buf.seek(0) # Reset the buffer position to the beginning
# Encode the WAV file to base64 string
base64_bytes = base64.b64encode(buf.read())
base64_string = base64_bytes.decode("utf-8")
return base64_string
def base64_to_int16(base64_string):
wav_bytes = base64.b64decode(base64_string)
buf = io.BytesIO(wav_bytes)
y, sr = sf.read(buf, dtype="int16")
return sr, y
async def transcribe(audio_input, face_input, model_choice):
"""Input: mic audio; Output: ai audio, text, text"""
global ai_chatbot_url, chat_history, count
chat_history = ""
# Preprocess the audio
base64bytestr = preprocess_audio(audio_input)
# Send the audio to the AvatarChatbot backend server endpoint
initial_inputs = {"audio": base64bytestr, "max_tokens": 64}
# TO-DO: update wav2lip-service with the chosen face_input
# update_env_var_in_container("wav2lip-service", "DEVICE", "new_device_value")
async with aiohttp.ClientSession() as session:
async with session.post(ai_chatbot_url, json=initial_inputs) as response:
# Check the response status code
if response.status == 200:
# response_json = await response.json()
# # Decode the base64 string
# sampling_rate, audio_int16 = base64_to_int16(response_json["byte_str"])
# chat_history += f"User: {response_json['query']}\n\n"
# chat_ai = response_json["text"]
# hitted_ends = [",", ".", "?", "!", "。", ";"]
# last_punc_idx = max([chat_ai.rfind(punc) for punc in hitted_ends])
# if last_punc_idx != -1:
# chat_ai = chat_ai[: last_punc_idx + 1]
# chat_history += f"AI: {chat_ai}"
# chat_history = chat_history.replace("OPEX", "OPEA")
# return (sampling_rate, audio_int16) # handle the response
result = await response.text()
return "docker_compose/intel/hpu/gaudi/result.mp4"
else:
return {"error": "Failed to transcribe audio", "status_code": response.status_code}
def resize_image(image_pil, size=(720, 720)):
"""Resize the image to the specified size."""
return image_pil.resize(size, Image.LANCZOS)
def resize_video(video_path, save_path, size=(720, 1280)):
"""Resize the video to the specified size, and save to the save path."""
ffmpeg.input(video_path).output(save_path, vf=f"scale={size[0]}:{size[1]}").overwrite_output().run()
# %% AI Avatar demo function
async def aiavatar_demo(audio_input, face_input, model_choice):
"""Input: mic/preloaded audio, avatar file path;
Output: ai video"""
# Wait for response from AvatarChatbot backend
output_video = await transcribe(audio_input, face_input, model_choice) # output video path
if isinstance(output_video, dict): # in case of an error
return None, None
else:
return output_video
# %% Main
if __name__ == "__main__":
# HOST_IP = os.getenv("host_ip")
HOST_IP = subprocess.check_output("hostname -I | awk '{print $1}'", shell=True).decode("utf-8").strip()
# Fetch the AudioQnA backend server
ai_chatbot_url = f"http://{HOST_IP}:3009/v1/avatarchatbot"
# Collect chat history to print in the interface
chat_history = ""
# Prepare 3 image paths and 3 video paths
# image_pils = [
# Image.open(os.path.join("assets/img/woman1.png")),
# Image.open(os.path.join("assets/img/man1.png")),
# Image.open(os.path.join("assets/img/woman2.png")),
# ]
# video_paths = [
# os.path.join("assets/video/man1.mp4"),
# os.path.join("assets/video/woman2.mp4"),
# os.path.join("assets/video/man4.mp4"),
# ]
def image_to_base64(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
# Convert your images to Base64
xeon_base64 = image_to_base64("assets/img/xeon.jpg")
gaudi_base64 = image_to_base64("assets/img/gaudi.png")
# List of prerecorded WAV files containing audio questions
# audio_filepaths = [
# "assets/audio/intel2.wav",
# "assets/audio/intel4.wav",
# ]
# audio_questions = [
# "1. What's the objective of the Open Platform for Enterprise AI? How is it helpful to enterprises building AI solutions?",
# "2. What kinds of Intel AI tools are available to accelerate AI workloads?",
# ]
# Demo frontend
demo = gr.Blocks()
with demo:
# Define processing functions
count = 0
# Make necessary folders:
if not os.path.exists("inputs"):
os.makedirs("inputs")
if not os.path.exists("outputs"):
os.makedirs("outputs")
def initial_process(audio_input, face_input, model_choice):
global count
start_time = time.time()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
video_file = loop.run_until_complete(aiavatar_demo(audio_input, face_input, model_choice))
count += 1
end_time = time.time()
return video_file, f"The entire application took {(end_time - start_time):.1f} seconds"
# def update_selected_image_state(image_index):
# image_index = int(image_index)
# selected_image_state.value = image_index
# # change image_input here
# if image_index < len(image_pils):
# return f"inputs/face_{image_index}.png"
# else:
# return f"inputs/video_{image_index - len(image_pils)}.mp4"
# def update_audio_input(audio_choice):
# if audio_choice:
# audio_index = int(audio_choice.split(".")[0]) - 1
# audio_filepath_gradio = f"inputs/audio_{audio_index:d}.wav"
# shutil.copyfile(audio_filepaths[audio_index], audio_filepath_gradio)
# return audio_filepath_gradio
# UI Components
# Title & Introduction
gr.Markdown("<h1 style='font-size: 36px;'>A PyTorch and OPEA based AI Avatar Audio Chatbot</h1>")
with gr.Row():
with gr.Column(scale=8):
gr.Markdown(
"""
<p style='font-size: 24px;'>Welcome to our AI Avatar Audio Chatbot! This application leverages PyTorch and <strong>OPEA (Open Platform for Enterprise AI) v0.8</strong> to provide you with a human-like conversational experience. It's run on Intel® Gaudi® AI Accelerator and Intel® Xeon® Processor, with hardware and software optimizations.<br>
Please feel free to interact with the AI avatar by choosing your own avatar and talking into the mic.</p>
"""
)
with gr.Column(scale=1):
# with gr.Row():
# gr.Markdown(f"""
# <img src='data:image/png;base64,{opea_qr_base64}' alt='OPEA QR Code' style='width: 150px; height: auto;'>
# """, label="OPEA QR Code")
# gr.Markdown(f"""
# <img src='data:image/png;base64,{opea_gh_qr_base64}' alt='OPEA GitHub QR Code' style='width: 150px; height: auto;'>
# """, label="OPEA GitHub QR Code")
with gr.Row():
gr.Markdown(
f"""
<img src='data:image/png;base64,{gaudi_base64}' alt='Intel®Gaudi' style='width: 120px; height: auto;'>""",
label="Intel®Gaudi",
)
gr.Markdown(
f"""
<img src='data:image/png;base64,{xeon_base64}' alt='Intel®Xeon' style='width: 120px; height: auto;'>""",
label="Intel®Xeon",
)
gr.Markdown("<hr>") # Divider
# Inputs
# Image gallery
selected_image_state = gr.State(value=-1)
image_clicks = []
image_click_buttons = []
video_clicks = []
video_click_buttons = []
with gr.Row():
with gr.Column(scale=1):
audio_input = gr.Audio(
sources=["upload", "microphone"], format="wav", label="🎤 or 📤 for your Input audio!"
)
# audio_choice = gr.Dropdown(
# choices=audio_questions,
# label="Choose an audio question",
# value=None, # default value
# )
# Update audio_input when a selection is made from the dropdown
# audio_choice.change(fn=update_audio_input, inputs=audio_choice, outputs=audio_input)
face_input = gr.File(
file_count="single",
file_types=["image", "video"],
label="Choose an avatar or 📤 an image or video!",
)
model_choice = gr.Dropdown(
choices=["wav2lip", "wav2lip+GAN", "wav2lip+GFPGAN"],
label="Choose a DL model",
)
# with gr.Column(scale=2):
# # Display 3 images and buttons
# with gr.Row():
# for i, image_pil in enumerate(image_pils):
# image_pil = resize_image(image_pil)
# save_path = f"inputs/face_{int(i)}.png"
# image_pil.save(save_path, "PNG")
# image_clicks.append(gr.Image(type="filepath", value=save_path, label=f"Avatar {int(i)+1}"))
# with gr.Row():
# for i in range(len(image_pils)):
# image_click_buttons.append(gr.Button(f"Use Image {i+1}"))
# # Display 3 videos and buttons
# with gr.Row():
# for i, video_path in enumerate(video_paths):
# save_path = f"inputs/video_{int(i)}.mp4"
# resize_video(video_path, save_path)
# video_clicks.append(gr.Video(value=save_path, label=f"Video {int(i)+1}"))
# with gr.Row():
# for i in range(len(video_paths)):
# video_click_buttons.append(gr.Button(f"Use Video {int(i)+1}"))
submit_button = gr.Button("Submit")
# Outputs
gr.Markdown("<hr>") # Divider
with gr.Row():
with gr.Column():
video_output = gr.Video(label="Your AI Avatar video: ", format="mp4", width=1280, height=720)
video_time_text = gr.Textbox(label="Video processing time", value="0.0 seconds")
# Technical details
gr.Markdown("<hr>") # Divider
with gr.Row():
gr.Markdown(
"""
<p style='font-size: 24px;'>OPEA megaservice deployed: <br>
<ul style='font-size: 24px;'>
<li><strong>AvatarChatbot</strong></li>
</ul></p>
<p style='font-size: 24px;'>OPEA microservices deployed:
<ul style='font-size: 24px;'>
<li><strong>ASR</strong> (service: opea/whisper-gaudi, model: openai/whisper-small)</li>
<li><strong>LLM 'text-generation'</strong> (service: opea/llm-tgi, model: Intel/neural-chat-7b-v3-3)</li>
<li><strong>TTS</strong> (service: opea/speecht5-gaudi, model: microsoft/speecht5_tts)</li>
<li><strong>Animation</strong> (service: opea/animation, model: wav2lip+gfpgan)</li>
</ul></p>
"""
)
with gr.Row():
gr.Image("assets/img/flowchart.png", label="Megaservice Flowchart")
with gr.Row():
gr.Markdown(
"""
<p style='font-size: 24px;'>The AI Avatar Audio Chatbot is powered by the following Intel® AI software:<br>
<ul style='font-size: 24px;'>
<li><strong>Intel Gaudi Software v1.17.0</strong></li>
<li><strong>PyTorch v2.3.1 (Eager mode + torch.compile) </strong></li>
<li><strong>HPU Graph</strong></li>
<li><strong>Intel Neural Compressor (INC)</strong></li>
</ul></p>
"""
)
# Disclaimer
gr.Markdown("<hr>") # Divider
gr.Markdown("<h2 style='font-size: 24px;'>Notices & Disclaimers</h1>")
gr.Markdown(
"""
<p style='font-size: 20px;'>Intel is committed to respecting human rights and avoiding complicity in human rights abuses. See Intel's Global Human Rights Principles. Intel's products and software are intended only to be used in applications that do not cause or contribute to a violation of an internationally recognized human right.<br></p>
<p style='font-size: 20px;'>© Intel Corporation. Intel, the Intel logo, and other Intel marks are trademarks of Intel Corporation or its subsidiaries. Other names and brands may be claimed as the property of others.<br></p>
<p style='font-size: 20px;'>You may not use or facilitate the use of this document in connection with any infringement or other legal analysis concerning Intel products described herein. You agree to grant Intel a non-exclusive, royalty-free license to any patent claim thereafter drafted which includes subject matter disclosed herein.<br></p>
"""
)
# State transitions
# for i in range(len(image_pils)):
# image_click_buttons[i].click(
# update_selected_image_state, inputs=[gr.Number(value=i, visible=False)], outputs=[face_input]
# )
# for i in range(len(video_paths)):
# video_click_buttons[i].click(
# update_selected_image_state,
# inputs=[gr.Number(value=i + len(image_pils), visible=False)],
# outputs=[face_input],
# )
submit_button.click(
initial_process,
inputs=[audio_input, face_input, model_choice],
outputs=[
video_output,
video_time_text,
],
)
demo.queue().launch(server_name="0.0.0.0", server_port=7861)

View File

@@ -206,6 +206,8 @@ cd GenAIExamples/ChatQnA/docker_compose/intel/hpu/gaudi/
docker compose up -d
```
> Notice: Currently only the **Habana Driver 1.16.x** is supported for Gaudi.
Refer to the [Gaudi Guide](./docker_compose/intel/hpu/gaudi/README.md) to build docker images from source.
### Deploy ChatQnA on Xeon

View File

@@ -41,11 +41,11 @@ class MultiHop_Evaluator(Evaluator):
return []
def get_retrieved_documents(self, query, arguments):
data = {"inputs": query}
data = {"text": query}
headers = {"Content-Type": "application/json"}
response = requests.post(arguments.tei_embedding_endpoint + "/embed", data=json.dumps(data), headers=headers)
response = requests.post(arguments.embedding_endpoint, data=json.dumps(data), headers=headers)
if response.ok:
embedding = response.json()[0]
embedding = response.json()["embedding"]
else:
print(f"Request for embedding failed due to {response.text}.")
return []

View File

@@ -1,6 +1,6 @@
# ChatQnA Deployment
# Benchmarking Deployment
This document guides you through deploying ChatQnA pipelines using Helm charts. Helm charts simplify managing Kubernetes applications by packaging configuration and resources.
This document guides you through deploying this example pipelines using Helm charts. Helm charts simplify managing Kubernetes applications by packaging configuration and resources.
## Getting Started
@@ -8,29 +8,19 @@ This document guides you through deploying ChatQnA pipelines using Helm charts.
```bash
# on k8s-master node
cd GenAIExamples/ChatQnA/benchmark/performance/helm_charts
cd GenAIExamples/{example_name}/benchmark/performance/helm_charts
# Replace the key of HUGGINGFACEHUB_API_TOKEN with your actual Hugging Face token:
# vim customize.yaml
# vim hpu_with_rerank.yaml or hpu_without_rerank.yaml
HUGGINGFACEHUB_API_TOKEN: hf_xxxxx
```
### Deploy your ChatQnA
### Deployment
```bash
# Deploy a ChatQnA pipeline using the specified YAML configuration.
# To deploy with different configurations, simply provide a different YAML file.
helm install chatqna helm_charts/ -f customize.yaml
# Options:
# --num_nodes choices=[1, 2, 4, 8]
# --mode choices=["tuned", "oob"]
# --workflow choices=["with_rerank", "without_rerank"]
python deployment.py --workflow=with_rerank --mode=tuned --num_nodes=1
```
Notes: The provided [BKC manifests](https://github.com/opea-project/GenAIExamples/tree/main/ChatQnA/benchmark) for single, two, and four node Kubernetes clusters are generated using this tool.
## Customize your own ChatQnA pipelines. (Optional)
There are two yaml configs you can specify.
- customize.yaml
This file can specify image names, the number of replicas and CPU cores to manage your pods.
- values.yaml
This file contains the default microservice configurations for ChatQnA. Please review and understand each parameter before making any changes.

View File

@@ -1,71 +1,48 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
podSpecs:
- name: chatqna-backend-server-deploy
spec:
image_name: opea/chatqna
image_tag: latest
replicas: 2
resources:
limits:
cpu: "8"
memory: "8000Mi"
requests:
cpu: "8"
memory: "8000Mi"
replicas: 2
resources:
limits:
cpu: "8"
memory: "8000Mi"
requests:
cpu: "8"
memory: "8000Mi"
- name: embedding-dependency-deploy
spec:
image_name: ghcr.io/huggingface/text-embeddings-inference
image_tag: cpu-1.5
replicas: 1
resources:
limits:
cpu: "80"
memory: "20000Mi"
requests:
cpu: "80"
memory: "20000Mi"
replicas: 1
resources:
limits:
cpu: "80"
memory: "20000Mi"
requests:
cpu: "80"
memory: "20000Mi"
- name: reranking-dependency-deploy
spec:
image_name: opea/tei-gaudi
image_tag: latest
replicas: 1
resources:
limits:
habana.ai/gaudi: 1
replicas: 1
resources:
limits:
habana.ai/gaudi: 1
- name: llm-dependency-deploy
spec:
image_name: ghcr.io/huggingface/tgi-gaudi
image_tag: 2.0.4
replicas: 7
resources:
limits:
habana.ai/gaudi: 1
replicas: 7
resources:
limits:
habana.ai/gaudi: 1
- name: dataprep-deploy
spec:
image_name: opea/dataprep-redis
image_tag: latest
replicas: 1
replicas: 1
- name: vector-db
spec:
image_name: redis/redis-stack
image_tag: 7.2.0-v9
replicas: 1
replicas: 1
- name: retriever-deploy
spec:
image_name: opea/retriever-redis
image_tag: latest
replicas: 2
resources:
requests:
cpu: "4"
memory: "4000Mi"
replicas: 2
resources:
requests:
cpu: "4"
memory: "4000Mi"

View File

@@ -0,0 +1,168 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import argparse
import os
import subprocess
import yaml
def generate_yaml(num_nodes, mode="oob", with_rerank="True"):
common_pods = [
"chatqna-backend-server-deploy",
"embedding-dependency-deploy",
"dataprep-deploy",
"vector-db",
"retriever-deploy",
]
if with_rerank:
pods_list = common_pods + ["reranking-dependency-deploy", "llm-dependency-deploy"]
else:
pods_list = common_pods + ["llm-dependency-deploy"]
if num_nodes == 1:
replicas = [
{"name": "chatqna-backend-server-deploy", "replicas": 2},
{"name": "embedding-dependency-deploy", "replicas": 1},
{"name": "reranking-dependency-deploy", "replicas": 1} if with_rerank else None,
{"name": "llm-dependency-deploy", "replicas": 7 if with_rerank else 8},
{"name": "dataprep-deploy", "replicas": 1},
{"name": "vector-db", "replicas": 1},
{"name": "retriever-deploy", "replicas": 2},
]
else:
replicas = [
{"name": "chatqna-backend-server-deploy", "replicas": 1 * num_nodes},
{"name": "embedding-dependency-deploy", "replicas": 1 * num_nodes},
{"name": "reranking-dependency-deploy", "replicas": 1} if with_rerank else None,
{"name": "llm-dependency-deploy", "replicas": (8 * num_nodes) - 1 if with_rerank else 8 * num_nodes},
{"name": "dataprep-deploy", "replicas": 1},
{"name": "vector-db", "replicas": 1},
{"name": "retriever-deploy", "replicas": 1 * num_nodes},
]
resources = [
{
"name": "chatqna-backend-server-deploy",
"resources": {"limits": {"cpu": "16", "memory": "8000Mi"}, "requests": {"cpu": "16", "memory": "8000Mi"}},
},
{
"name": "embedding-dependency-deploy",
"resources": {"limits": {"cpu": "80", "memory": "20000Mi"}, "requests": {"cpu": "80", "memory": "20000Mi"}},
},
(
{"name": "reranking-dependency-deploy", "resources": {"limits": {"habana.ai/gaudi": 1}}}
if with_rerank
else None
),
{"name": "llm-dependency-deploy", "resources": {"limits": {"habana.ai/gaudi": 1}}},
{"name": "retriever-deploy", "resources": {"requests": {"cpu": "8", "memory": "8000Mi"}}},
]
replicas = [replica for replica in replicas if replica]
resources = [resource for resource in resources if resource]
tgi_params = [
{
"name": "llm-dependency-deploy",
"args": [
{"name": "--model-id", "value": "$(LLM_MODEL_ID)"},
{"name": "--max-input-length", "value": 1280},
{"name": "--max-total-tokens", "value": 2048},
{"name": "--max-batch-total-tokens", "value": 65536},
{"name": "--max-batch-prefill-tokens", "value": 4096},
],
},
]
replicas_dict = {item["name"]: item["replicas"] for item in replicas}
resources_dict = {item["name"]: item["resources"] for item in resources}
tgi_params_dict = {item["name"]: item["args"] for item in tgi_params}
dicts_to_check = [
{"dict": replicas_dict, "key": "replicas"},
]
if mode == "tuned":
dicts_to_check.extend([{"dict": resources_dict, "key": "resources"}, {"dict": tgi_params_dict, "key": "args"}])
merged_specs = {"podSpecs": []}
for pod in pods_list:
pod_spec = {"name": pod}
for item in dicts_to_check:
if pod in item["dict"]:
pod_spec[item["key"]] = item["dict"][pod]
if len(pod_spec) > 1:
merged_specs["podSpecs"].append(pod_spec)
yaml_data = yaml.dump(merged_specs, default_flow_style=False)
print(yaml_data)
if with_rerank:
filename = f"{mode}_{num_nodes}_gaudi_with_rerank.yaml"
else:
filename = f"{mode}_{num_nodes}_gaudi_without_rerank.yaml"
with open(filename, "w") as file:
file.write(yaml_data)
current_dir = os.getcwd()
filepath = os.path.join(current_dir, filename)
print(f"YAML file {filepath} has been generated.")
return filepath
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--name", help="The name of example pipelines", default="chatqna")
parser.add_argument("--folder", help="The path of helmcharts folder", default=".")
parser.add_argument(
"--num_nodes", help="Number of nodes to deploy", type=int, choices=[1, 2, 4, 8], default=1, required=True
)
parser.add_argument(
"--mode", help="set up your chatqna in the specified mode", type=str, choices=["oob", "tuned"], default="oob"
)
parser.add_argument(
"--workflow",
help="with rerank in the pipeline",
type=str,
choices=["with_rerank", "without_rerank"],
default="with_rerank",
)
parser.add_argument("--template", help="helm template", action="store_true")
args = parser.parse_args()
if args.workflow == "with_rerank":
with_rerank = True
workflow_file = "./hpu_with_rerank.yaml"
else:
with_rerank = False
workflow_file = "./hpu_without_rerank.yaml"
customize_filepath = generate_yaml(args.num_nodes, mode=args.mode, with_rerank=with_rerank)
if args.template:
subprocess.run(
["helm", "template", args.folder, "-f", workflow_file, "-f", customize_filepath],
check=True,
text=True,
capture_output=False,
)
else:
subprocess.run(
["helm", "install", args.name, args.folder, "-f", workflow_file, "-f", customize_filepath],
check=True,
text=True,
capture_output=False,
)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,223 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
namespace: default
config:
CONFIG_MAP_NAME: chatqna-config
NODE_SELECTOR: opea
EMBEDDING_MODEL_ID: BAAI/bge-base-en-v1.5
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
RERANK_MODEL_ID: BAAI/bge-reranker-base
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
REDIS_URL: redis://vector-db.default.svc.cluster.local:6379
RERANK_SERVER_HOST_IP: reranking-dependency-svc
RETRIEVER_SERVICE_HOST_IP: retriever-svc
LLM_SERVER_HOST_IP: llm-dependency-svc
INDEX_NAME: rag-redis
EMBEDDING_SERVER_HOST_IP: embedding-dependency-svc
TEI_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
LLM_SERVER_PORT: 9009
RERANK_SERVER_PORT: 8808
EMBEDDING_SERVER_PORT: 6006
microservices:
- name: chatqna-backend-server-deploy
image: opea/chatqna:latest
replicas: 1
ports:
- containerPort: 8888
- name: dataprep-deploy
image: opea/dataprep-redis:latest
replicas: 1
ports:
- containerPort: 6007
- name: vector-db
image: redis/redis-stack:7.2.0-v9
replicas: 1
ports:
- containerPort: 6379
- containerPort: 8001
- name: retriever-deploy
image: opea/retriever-redis:latest
replicas: 1
ports:
- containerPort: 7000
- name: embedding-dependency-deploy
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
replicas: 1
ports:
- containerPort: 80
args:
- name: "--model-id"
value: $(EMBEDDING_MODEL_ID)
- name: "--auto-truncate"
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
- name: reranking-dependency-deploy
image: opea/tei-gaudi:latest
replicas: 1
resources:
limits:
habana.ai/gaudi: 1
args:
- name: "--model-id"
- value: $(RERANK_MODEL_ID)
- name: "--auto-truncate"
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: "true"
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: MAX_WARMUP_SEQUENCE_LENGTH
value: "512"
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
- name: llm-dependency-deploy
image: ghcr.io/huggingface/tgi-gaudi:2.0.4
replicas: 1
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
args:
- name: "--model-id"
value: $(LLM_MODEL_ID)
- name: "--max-input-length"
value: "2048"
- name: "--max-total-tokens"
value: "4096"
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: "true"
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
services:
- name: chatqna-backend-server-svc
spec:
ports:
- name: service
nodePort: 30888
port: 8888
targetPort: 8888
selector:
app: chatqna-backend-server-deploy
type: NodePort
- name: dataprep-svc
spec:
ports:
- name: port1
port: 6007
targetPort: 6007
selector:
app: dataprep-deploy
type: ClusterIP
- name: embedding-dependency-svc
spec:
ports:
- name: service
port: 6006
targetPort: 80
selector:
app: embedding-dependency-deploy
type: ClusterIP
- name: llm-dependency-svc
spec:
ports:
- name: service
port: 9009
targetPort: 80
selector:
app: llm-dependency-deploy
type: ClusterIP
- name: reranking-dependency-svc
spec:
ports:
- name: service
port: 8808
targetPort: 80
selector:
app: reranking-dependency-deploy
type: ClusterIP
- name: retriever-svc
spec:
ports:
- name: service
port: 7000
targetPort: 7000
selector:
app: retriever-deploy
type: ClusterIP
- name: vector-db
spec:
ports:
- name: vector-db-service
port: 6379
targetPort: 6379
- name: vector-db-insight
port: 8001
targetPort: 8001
selector:
app: vector-db
type: ClusterIP

View File

@@ -0,0 +1,166 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
namespace: default
config:
CONFIG_MAP_NAME: chatqna-config
NODE_SELECTOR: opea
EMBEDDING_MODEL_ID: BAAI/bge-base-en-v1.5
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
RERANK_MODEL_ID: BAAI/bge-reranker-base
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
microservices:
- name: chatqna-backend-server-deploy
image: opea/chatqna-without-rerank:latest
replicas: 1
ports:
- containerPort: 8888
- name: dataprep-deploy
image: opea/dataprep-redis:latest
replicas: 1
ports:
- containerPort: 6007
- name: vector-db
image: redis/redis-stack:7.2.0-v9
replicas: 1
ports:
- containerPort: 6379
- containerPort: 8001
- name: retriever-deploy
image: opea/retriever-redis:latest
replicas: 1
ports:
- containerPort: 7000
- name: embedding-dependency-deploy
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
replicas: 1
ports:
- containerPort: 80
args:
- name: "--model-id"
value: $(EMBEDDING_MODEL_ID)
- name: "--auto-truncate"
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
- name: llm-dependency-deploy
image: ghcr.io/huggingface/tgi-gaudi:2.0.4
replicas: 1
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
args:
- name: "--model-id"
value: $(LLM_MODEL_ID)
- name: "--max-input-length"
value: "2048"
- name: "--max-total-tokens"
value: "4096"
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: "true"
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
services:
- name: chatqna-backend-server-svc
spec:
ports:
- name: service
nodePort: 30888
port: 8888
targetPort: 8888
selector:
app: chatqna-backend-server-deploy
type: NodePort
- name: dataprep-svc
spec:
ports:
- name: port1
port: 6007
targetPort: 6007
selector:
app: dataprep-deploy
type: ClusterIP
- name: embedding-dependency-svc
spec:
ports:
- name: service
port: 6006
targetPort: 80
selector:
app: embedding-dependency-deploy
type: ClusterIP
- name: llm-dependency-svc
spec:
ports:
- name: service
port: 9009
targetPort: 80
selector:
app: llm-dependency-deploy
type: ClusterIP
- name: retriever-svc
spec:
ports:
- name: service
port: 7000
targetPort: 7000
selector:
app: retriever-deploy
type: ClusterIP
- name: vector-db
spec:
ports:
- name: vector-db-service
port: 6379
targetPort: 6379
- name: vector-db-insight
port: 8001
targetPort: 8001
selector:
app: vector-db
type: ClusterIP

View File

@@ -4,22 +4,22 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: qna-config
name: {{ .Values.config.CONFIG_MAP_NAME }}
namespace: default
data:
EMBEDDING_MODEL_ID: {{ .Values.config.EMBEDDING_MODEL_ID }}
EMBEDDING_SERVER_HOST_IP: embedding-dependency-svc
HUGGINGFACEHUB_API_TOKEN: {{ .Values.HUGGINGFACEHUB_API_TOKEN }}
INDEX_NAME: rag-redis
LLM_MODEL_ID: {{ .Values.config.LLM_MODEL_ID }}
LLM_SERVER_HOST_IP: llm-dependency-svc
NODE_SELECTOR: chatqna-opea
REDIS_URL: redis://vector-db.default.svc.cluster.local:6379
HUGGINGFACEHUB_API_TOKEN: {{ .Values.config.HUGGINGFACEHUB_API_TOKEN }}
NODE_SELECTOR: {{ .Values.config.NODE_SELECTOR }}
RERANK_MODEL_ID: {{ .Values.config.RERANK_MODEL_ID }}
LLM_MODEL_ID: {{ .Values.config.LLM_MODEL_ID }}
REDIS_URL: redis://vector-db.default.svc.cluster.local:6379
RERANK_SERVER_HOST_IP: reranking-dependency-svc
RETRIEVER_SERVICE_HOST_IP: retriever-svc
TEI_EMBEDDING_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
LLM_SERVER_HOST_IP: llm-dependency-svc
INDEX_NAME: rag-redis
EMBEDDING_SERVER_HOST_IP: embedding-dependency-svc
TEI_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_RERANKING_ENDPOINT: http://reranking-dependency-svc.default.svc.cluster.local:8808
TGI_LLM_ENDPOINT: http://llm-dependency-svc.default.svc.cluster.local:9009
LLM_SERVER_PORT: "9009"
RERANK_SERVER_PORT: "8808"
EMBEDDING_SERVER_PORT: "6006"
---

View File

@@ -2,33 +2,46 @@
# SPDX-License-Identifier: Apache-2.0
{{- $global := .Values }}
{{- range $deployment := .Values.deployments }}
{{- range $podSpec := $global.podSpecs }}
{{- if eq $podSpec.name $deployment.name }}
{{- range $microservice := .Values.microservices }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ $deployment.name }}
name: {{ $microservice.name }}
namespace: default
spec:
replicas: {{ $podSpec.spec.replicas }}
{{- $replicas := $microservice.replicas }}
{{- range $podSpec := $global.podSpecs }}
{{- if eq $podSpec.name $microservice.name }}
{{- $replicas = $podSpec.replicas | default $microservice.replicas }}
{{- end }}
{{- end }}
replicas: {{ $replicas }}
selector:
matchLabels:
app: {{ $deployment.name }}
app: {{ $microservice.name }}
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: {{ $deployment.name }}
app: {{ $microservice.name }}
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
{{- if $deployment.spec.args }}
name: {{ $global.config.CONFIG_MAP_NAME }}
{{- $args := $microservice.args }}
{{- range $podSpec := $global.podSpecs }}
{{- if eq $podSpec.name $microservice.name }}
{{- $args = $podSpec.args | default $microservice.args }}
{{- end }}
{{- end }}
{{- if $microservice.args }}
args:
{{- range $arg := $deployment.spec.args }}
{{- range $arg := $args }}
{{- if $arg.name }}
- {{ $arg.name }}
{{- end }}
@@ -38,31 +51,46 @@ spec:
{{- end }}
{{- end }}
{{- if $deployment.spec.env }}
{{- if $microservice.env }}
env:
{{- range $env := $deployment.spec.env }}
{{- range $env := $microservice.env }}
- name: {{ $env.name }}
value: "{{ $env.value }}"
{{- end }}
{{- end }}
image: {{ $podSpec.spec.image_name }}:{{ $podSpec.spec.image_tag }}
imagePullPolicy: IfNotPresent
name: {{ $podSpec.name }}
{{- $image := $microservice.image }}
{{- range $podSpec := $global.podSpecs }}
{{- if eq $podSpec.name $microservice.name }}
{{- $image = $podSpec.image | default $microservice.image }}
{{- end }}
{{- end }}
image: {{ $image }}
{{- if $deployment.spec.ports }}
imagePullPolicy: IfNotPresent
name: {{ $microservice.name }}
{{- if $microservice.ports }}
ports:
{{- range $port := $deployment.spec.ports }}
{{- range $port := $microservice.ports }}
{{- range $port_name, $port_id := $port }}
- {{ $port_name }}: {{ $port_id }}
{{- end }}
{{- end }}
{{- end }}
{{- $resources := $microservice.resources }}
{{- range $podSpec := $global.podSpecs }}
{{- if eq $podSpec.name $microservice.name }}
{{- if $podSpec.resources }}
{{- $resources = $podSpec.resources }}
{{- end }}
{{- end }}
{{- end }}
{{- if $podSpec.spec.resources }}
{{- if $resources }}
resources:
{{- range $resourceType, $resource := $podSpec.spec.resources }}
{{- range $resourceType, $resource := $resources }}
{{ $resourceType }}:
{{- range $limitType, $limit := $resource }}
{{ $limitType }}: {{ $limit }}
@@ -70,9 +98,9 @@ spec:
{{- end }}
{{- end }}
{{- if $deployment.spec.volumeMounts }}
{{- if $microservice.volumeMounts }}
volumeMounts:
{{- range $volumeMount := $deployment.spec.volumeMounts }}
{{- range $volumeMount := $microservice.volumeMounts }}
- mountPath: {{ $volumeMount.mountPath }}
name: {{ $volumeMount.name }}
{{- end }}
@@ -80,20 +108,20 @@ spec:
hostIPC: true
nodeSelector:
node-type: chatqna-opea
node-type: {{ $global.config.NODE_SELECTOR }}
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: {{ $deployment.name }}
app: {{ $microservice.name }}
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
{{- if $deployment.spec.volumes }}
{{- if $microservice.volumes }}
volumes:
{{- range $index, $volume := $deployment.spec.volumes }}
{{- range $index, $volume := $microservice.volumes }}
- name: {{ $volume.name }}
{{- if $volume.hostPath }}
hostPath:
@@ -109,5 +137,3 @@ spec:
---
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,203 +0,0 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
namespace: default
config:
EMBEDDING_MODEL_ID: BAAI/bge-base-en-v1.5
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
RERANK_MODEL_ID: BAAI/bge-reranker-base
deployments:
- name: chatqna-backend-server-deploy
spec:
ports:
- containerPort: 8888
- name: dataprep-deploy
spec:
ports:
- containerPort: 6007
- name: vector-db
spec:
ports:
- containerPort: 6379
- containerPort: 8001
- name: retriever-deploy
spec:
ports:
- containerPort: 7000
- name: embedding-dependency-deploy
spec:
ports:
- containerPort: 80
args:
- name: "--model-id"
value: $(EMBEDDING_MODEL_ID)
- name: "--auto-truncate"
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
- name: reranking-dependency-deploy
spec:
args:
- name: "--model-id"
- value: $(RERANK_MODEL_ID)
- name: "--auto-truncate"
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: "true"
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HF_TOKEN
value: ${HF_TOKEN}
- name: MAX_WARMUP_SEQUENCE_LENGTH
value: "512"
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
- name: llm-dependency-deploy
spec:
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
args:
- name: "--model-id"
value: $(LLM_MODEL_ID)
- name: "--max-input-length"
value: "2048"
- name: "--max-total-tokens"
value: "4096"
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: "true"
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HF_TOKEN
value: ${HF_TOKEN}
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
services:
- name: chatqna-backend-server-svc
spec:
ports:
- name: service
nodePort: 30888
port: 8888
targetPort: 8888
selector:
app: chatqna-backend-server-deploy
type: NodePort
- name: dataprep-svc
spec:
ports:
- name: port1
port: 6007
targetPort: 6007
selector:
app: dataprep-deploy
type: ClusterIP
- name: embedding-dependency-svc
spec:
ports:
- name: service
port: 6006
targetPort: 80
selector:
app: embedding-dependency-deploy
type: ClusterIP
- name: llm-dependency-svc
spec:
ports:
- name: service
port: 9009
targetPort: 80
selector:
app: llm-dependency-deploy
type: ClusterIP
- name: reranking-dependency-svc
spec:
ports:
- name: service
port: 8808
targetPort: 80
selector:
app: reranking-dependency-deploy
type: ClusterIP
- name: retriever-svc
spec:
ports:
- name: service
port: 7000
targetPort: 7000
selector:
app: retriever-deploy
type: ClusterIP
- name: vector-db
spec:
ports:
- name: vector-db-service
port: 6379
targetPort: 6379
- name: vector-db-insight
port: 8001
targetPort: 8001
selector:
app: vector-db
type: ClusterIP

View File

@@ -29,7 +29,7 @@ metadata:
name: chatqna-backend-server-deploy
namespace: default
spec:
replicas: 1
replicas: 4
selector:
matchLabels:
app: chatqna-backend-server-deploy
@@ -381,7 +381,7 @@ metadata:
name: retriever-deploy
namespace: default
spec:
replicas: 1
replicas: 4
selector:
matchLabels:
app: retriever-deploy

View File

@@ -29,7 +29,7 @@ metadata:
name: chatqna-backend-server-deploy
namespace: default
spec:
replicas: 1
replicas: 4
selector:
matchLabels:
app: chatqna-backend-server-deploy
@@ -295,7 +295,7 @@ metadata:
name: retriever-deploy
namespace: default
spec:
replicas: 1
replicas: 4
selector:
matchLabels:
app: retriever-deploy

View File

@@ -167,10 +167,10 @@ spec:
- containerPort: 80
resources:
limits:
cpu: 76
cpu: 80
memory: 20000Mi
requests:
cpu: 76
cpu: 80
memory: 20000Mi
volumeMounts:
- mountPath: /data

View File

@@ -167,10 +167,10 @@ spec:
- containerPort: 80
resources:
limits:
cpu: 76
cpu: 80
memory: 20000Mi
requests:
cpu: 76
cpu: 80
memory: 20000Mi
volumeMounts:
- mountPath: /data

View File

@@ -167,10 +167,10 @@ spec:
- containerPort: 80
resources:
limits:
cpu: 76
cpu: 80
memory: 20000Mi
requests:
cpu: 76
cpu: 80
memory: 20000Mi
volumeMounts:
- mountPath: /data

View File

@@ -167,10 +167,10 @@ spec:
- containerPort: 80
resources:
limits:
cpu: 76
cpu: 80
memory: 20000Mi
requests:
cpu: 76
cpu: 80
memory: 20000Mi
volumeMounts:
- mountPath: /data

View File

@@ -167,10 +167,10 @@ spec:
- containerPort: 80
resources:
limits:
cpu: 76
cpu: 80
memory: 20000Mi
requests:
cpu: 76
cpu: 80
memory: 20000Mi
volumeMounts:
- mountPath: /data

View File

@@ -167,10 +167,10 @@ spec:
- containerPort: 80
resources:
limits:
cpu: 76
cpu: 80
memory: 20000Mi
requests:
cpu: 76
cpu: 80
memory: 20000Mi
volumeMounts:
- mountPath: /data

View File

@@ -167,10 +167,10 @@ spec:
- containerPort: 80
resources:
limits:
cpu: 76
cpu: 80
memory: 20000Mi
requests:
cpu: 76
cpu: 80
memory: 20000Mi
volumeMounts:
- mountPath: /data

View File

@@ -167,10 +167,10 @@ spec:
- containerPort: 80
resources:
limits:
cpu: 76
cpu: 80
memory: 20000Mi
requests:
cpu: 76
cpu: 80
memory: 20000Mi
volumeMounts:
- mountPath: /data

View File

@@ -0,0 +1,507 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: v1
data:
EMBEDDING_MODEL_ID: BAAI/bge-base-en-v1.5
EMBEDDING_SERVER_HOST_IP: embedding-dependency-svc
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
INDEX_NAME: rag-redis
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
LLM_SERVER_HOST_IP: llm-dependency-svc
NODE_SELECTOR: chatqna-opea
REDIS_URL: redis://vector-db.default.svc.cluster.local:6379
RERANK_MODEL_ID: BAAI/bge-reranker-base
RERANK_SERVER_HOST_IP: reranking-dependency-svc
RETRIEVER_SERVICE_HOST_IP: retriever-svc
TEI_EMBEDDING_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_RERANKING_ENDPOINT: http://reranking-dependency-svc.default.svc.cluster.local:8808
TGI_LLM_ENDPOINT: http://llm-dependency-svc.default.svc.cluster.local:9009
kind: ConfigMap
metadata:
name: qna-config
namespace: default
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: chatqna-backend-server-deploy
namespace: default
spec:
replicas: 8
selector:
matchLabels:
app: chatqna-backend-server-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: chatqna-backend-server-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/chatqna:latest
imagePullPolicy: IfNotPresent
name: chatqna-backend-server-deploy
ports:
- containerPort: 8888
resources:
limits:
cpu: 8
memory: 8000Mi
requests:
cpu: 8
memory: 8000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: chatqna-backend-server-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: chatqna-backend-server-svc
namespace: default
spec:
ports:
- name: service
nodePort: 30888
port: 8888
targetPort: 8888
selector:
app: chatqna-backend-server-deploy
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dataprep-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: dataprep-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: dataprep-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/dataprep-redis:latest
imagePullPolicy: IfNotPresent
name: dataprep-deploy
ports:
- containerPort: 6007
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: dataprep-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: dataprep-svc
namespace: default
spec:
ports:
- name: port1
port: 6007
targetPort: 6007
selector:
app: dataprep-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-dependency-deploy
namespace: default
spec:
replicas: 8
selector:
matchLabels:
app: embedding-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(EMBEDDING_MODEL_ID)
- --auto-truncate
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
imagePullPolicy: IfNotPresent
name: embedding-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
cpu: 80
memory: 20000Mi
requests:
cpu: 80
memory: 20000Mi
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: embedding-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: embedding-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 6006
targetPort: 80
selector:
app: embedding-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-dependency-deploy
namespace: default
spec:
replicas: 63
selector:
matchLabels:
app: llm-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(LLM_MODEL_ID)
- --max-input-length
- '1280'
- --max-total-tokens
- '2048'
- --max-batch-total-tokens
- '65536'
- --max-batch-prefill-tokens
- '4096'
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HF_TOKEN
value: ${HF_TOKEN}
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/tgi-gaudi:2.0.5
imagePullPolicy: IfNotPresent
name: llm-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
securityContext:
capabilities:
add:
- SYS_NICE
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: llm-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: llm-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 9009
targetPort: 80
selector:
app: llm-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: reranking-dependency-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: reranking-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: reranking-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(RERANK_MODEL_ID)
- --auto-truncate
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HF_TOKEN
value: ${HF_TOKEN}
- name: MAX_WARMUP_SEQUENCE_LENGTH
value: '512'
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/tei-gaudi:latest
imagePullPolicy: IfNotPresent
name: reranking-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: reranking-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: reranking-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 8808
targetPort: 80
selector:
app: reranking-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: retriever-deploy
namespace: default
spec:
replicas: 8
selector:
matchLabels:
app: retriever-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: retriever-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/retriever-redis:latest
imagePullPolicy: IfNotPresent
name: retriever-deploy
ports:
- containerPort: 7000
resources:
requests:
cpu: 4
memory: 4000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: retriever-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: retriever-svc
namespace: default
spec:
ports:
- name: service
port: 7000
targetPort: 7000
selector:
app: retriever-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: vector-db
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: vector-db
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: vector-db
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: redis/redis-stack:7.2.0-v9
imagePullPolicy: IfNotPresent
name: vector-db
ports:
- containerPort: 6379
- containerPort: 8001
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: vector-db
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: vector-db
namespace: default
spec:
ports:
- name: vector-db-service
port: 6379
targetPort: 6379
- name: vector-db-insight
port: 8001
targetPort: 8001
selector:
app: vector-db
type: ClusterIP
---

View File

@@ -0,0 +1,507 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: v1
data:
EMBEDDING_MODEL_ID: BAAI/bge-base-en-v1.5
EMBEDDING_SERVER_HOST_IP: embedding-dependency-svc
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
INDEX_NAME: rag-redis
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
LLM_SERVER_HOST_IP: llm-dependency-svc
NODE_SELECTOR: chatqna-opea
REDIS_URL: redis://vector-db.default.svc.cluster.local:6379
RERANK_MODEL_ID: BAAI/bge-reranker-base
RERANK_SERVER_HOST_IP: reranking-dependency-svc
RETRIEVER_SERVICE_HOST_IP: retriever-svc
TEI_EMBEDDING_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_RERANKING_ENDPOINT: http://reranking-dependency-svc.default.svc.cluster.local:8808
TGI_LLM_ENDPOINT: http://llm-dependency-svc.default.svc.cluster.local:9009
kind: ConfigMap
metadata:
name: qna-config
namespace: default
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: chatqna-backend-server-deploy
namespace: default
spec:
replicas: 4
selector:
matchLabels:
app: chatqna-backend-server-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: chatqna-backend-server-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/chatqna:latest
imagePullPolicy: IfNotPresent
name: chatqna-backend-server-deploy
ports:
- containerPort: 8888
resources:
limits:
cpu: 8
memory: 8000Mi
requests:
cpu: 8
memory: 8000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: chatqna-backend-server-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: chatqna-backend-server-svc
namespace: default
spec:
ports:
- name: service
nodePort: 30888
port: 8888
targetPort: 8888
selector:
app: chatqna-backend-server-deploy
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dataprep-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: dataprep-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: dataprep-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/dataprep-redis:latest
imagePullPolicy: IfNotPresent
name: dataprep-deploy
ports:
- containerPort: 6007
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: dataprep-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: dataprep-svc
namespace: default
spec:
ports:
- name: port1
port: 6007
targetPort: 6007
selector:
app: dataprep-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-dependency-deploy
namespace: default
spec:
replicas: 4
selector:
matchLabels:
app: embedding-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(EMBEDDING_MODEL_ID)
- --auto-truncate
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
imagePullPolicy: IfNotPresent
name: embedding-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
cpu: 80
memory: 20000Mi
requests:
cpu: 80
memory: 20000Mi
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: embedding-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: embedding-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 6006
targetPort: 80
selector:
app: embedding-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-dependency-deploy
namespace: default
spec:
replicas: 31
selector:
matchLabels:
app: llm-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(LLM_MODEL_ID)
- --max-input-length
- '1280'
- --max-total-tokens
- '2048'
- --max-batch-total-tokens
- '65536'
- --max-batch-prefill-tokens
- '4096'
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HF_TOKEN
value: ${HF_TOKEN}
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/tgi-gaudi:2.0.5
imagePullPolicy: IfNotPresent
name: llm-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
securityContext:
capabilities:
add:
- SYS_NICE
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: llm-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: llm-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 9009
targetPort: 80
selector:
app: llm-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: reranking-dependency-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: reranking-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: reranking-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(RERANK_MODEL_ID)
- --auto-truncate
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HF_TOKEN
value: ${HF_TOKEN}
- name: MAX_WARMUP_SEQUENCE_LENGTH
value: '512'
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/tei-gaudi:latest
imagePullPolicy: IfNotPresent
name: reranking-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: reranking-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: reranking-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 8808
targetPort: 80
selector:
app: reranking-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: retriever-deploy
namespace: default
spec:
replicas: 4
selector:
matchLabels:
app: retriever-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: retriever-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/retriever-redis:latest
imagePullPolicy: IfNotPresent
name: retriever-deploy
ports:
- containerPort: 7000
resources:
requests:
cpu: 4
memory: 4000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: retriever-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: retriever-svc
namespace: default
spec:
ports:
- name: service
port: 7000
targetPort: 7000
selector:
app: retriever-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: vector-db
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: vector-db
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: vector-db
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: redis/redis-stack:7.2.0-v9
imagePullPolicy: IfNotPresent
name: vector-db
ports:
- containerPort: 6379
- containerPort: 8001
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: vector-db
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: vector-db
namespace: default
spec:
ports:
- name: vector-db-service
port: 6379
targetPort: 6379
- name: vector-db-insight
port: 8001
targetPort: 8001
selector:
app: vector-db
type: ClusterIP
---

View File

@@ -0,0 +1,514 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: v1
kind: ConfigMap
metadata:
name: chatqna-config
namespace: default
data:
EMBEDDING_MODEL_ID: BAAI/bge-base-en-v1.5
EMBEDDING_SERVER_HOST_IP: embedding-dependency-svc
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
INDEX_NAME: rag-redis
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
LLM_SERVER_HOST_IP: llm-dependency-svc
NODE_SELECTOR: opea
REDIS_URL: redis://vector-db.default.svc.cluster.local:6379
RERANK_MODEL_ID: BAAI/bge-reranker-base
RERANK_SERVER_HOST_IP: reranking-dependency-svc
RETRIEVER_SERVICE_HOST_IP: retriever-svc
TEI_EMBEDDING_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_RERANKING_ENDPOINT: http://reranking-dependency-svc.default.svc.cluster.local:8808
TGI_LLM_ENDPOINT: http://llm-dependency-svc.default.svc.cluster.local:9009
LLM_SERVER_PORT: '9009'
RERANK_SERVER_PORT: '8808'
EMBEDDING_SERVER_PORT: '6006'
---
# Source: chatqna-charts/templates/service.yaml
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: v1
kind: Service
metadata:
name: chatqna-backend-server-svc
namespace: default
spec:
ports:
- name: service
nodePort: 30888
port: 8888
targetPort: 8888
selector:
app: chatqna-backend-server-deploy
type: NodePort
---
# Source: chatqna-charts/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: dataprep-svc
namespace: default
spec:
ports:
- name: port1
port: 6007
targetPort: 6007
selector:
app: dataprep-deploy
type: ClusterIP
---
# Source: chatqna-charts/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: embedding-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 6006
targetPort: 80
selector:
app: embedding-dependency-deploy
type: ClusterIP
---
# Source: chatqna-charts/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: llm-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 9009
targetPort: 80
selector:
app: llm-dependency-deploy
type: ClusterIP
---
# Source: chatqna-charts/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: reranking-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 8808
targetPort: 80
selector:
app: reranking-dependency-deploy
type: ClusterIP
---
# Source: chatqna-charts/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: retriever-svc
namespace: default
spec:
ports:
- name: service
port: 7000
targetPort: 7000
selector:
app: retriever-deploy
type: ClusterIP
---
# Source: chatqna-charts/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: vector-db
namespace: default
spec:
ports:
- name: vector-db-service
port: 6379
targetPort: 6379
- name: vector-db-insight
port: 8001
targetPort: 8001
selector:
app: vector-db
type: ClusterIP
---
# Source: chatqna-charts/templates/deployment.yaml
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: chatqna-backend-server-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: chatqna-backend-server-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: chatqna-backend-server-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: chatqna-config
image: opea/chatqna-model-fixed-root:latest
imagePullPolicy: IfNotPresent
name: chatqna-backend-server-deploy
ports:
- containerPort: 8888
hostIPC: true
nodeSelector:
node-type: opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: chatqna-backend-server-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
# Source: chatqna-charts/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: dataprep-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: dataprep-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: dataprep-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: chatqna-config
image: opea/dataprep-redis:latest
imagePullPolicy: IfNotPresent
name: dataprep-deploy
ports:
- containerPort: 6007
hostIPC: true
nodeSelector:
node-type: opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: dataprep-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
# Source: chatqna-charts/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: vector-db
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: vector-db
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: vector-db
spec:
containers:
- envFrom:
- configMapRef:
name: chatqna-config
image: redis/redis-stack:7.2.0-v9
imagePullPolicy: IfNotPresent
name: vector-db
ports:
- containerPort: 6379
- containerPort: 8001
hostIPC: true
nodeSelector:
node-type: opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: vector-db
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
# Source: chatqna-charts/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: retriever-deploy
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: retriever-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: retriever-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: chatqna-config
image: opea/retriever-redis:latest
imagePullPolicy: IfNotPresent
name: retriever-deploy
ports:
- containerPort: 7000
hostIPC: true
nodeSelector:
node-type: opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: retriever-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
# Source: chatqna-charts/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-dependency-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: embedding-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-dependency-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: chatqna-config
args:
- --model-id
- "$(EMBEDDING_MODEL_ID)"
- --auto-truncate
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
imagePullPolicy: IfNotPresent
name: embedding-dependency-deploy
ports:
- containerPort: 80
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: embedding-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- name: model-volume
hostPath:
path: /mnt/models
type: Directory
- name: shm
emptyDir:
medium: Memory
sizeLimit: 1Gi
---
# Source: chatqna-charts/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: reranking-dependency-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: reranking-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: reranking-dependency-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: chatqna-config
args:
- --model-id
- "$(RERANK_MODEL_ID)"
- --auto-truncate
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: "none"
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: "true"
- name: runtime
value: "habana"
- name: HABANA_VISIBLE_DEVICES
value: "all"
- name: MAX_WARMUP_SEQUENCE_LENGTH
value: "512"
image: opea/tei-gaudi:latest
imagePullPolicy: IfNotPresent
name: reranking-dependency-deploy
resources:
limits:
habana.ai/gaudi: 1
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: reranking-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- name: model-volume
hostPath:
path: /mnt/models
type: Directory
- name: shm
emptyDir:
medium: Memory
sizeLimit: 1Gi
---
# Source: chatqna-charts/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-dependency-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: llm-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-dependency-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: chatqna-config
command: ["/bin/bash", "-c"]
args: ["python3 -m vllm.entrypoints.openai.api_server --model $LLM_MODEL_ID --tensor-parallel-size 1 --host 0.0.0.0 --port 80 --block-size 128 --max-num-seqs 256 --max-seq_len-to-capture 2048"]
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: "none"
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: "true"
- name: runtime
value: "habana"
- name: HABANA_VISIBLE_DEVICES
value: "all"
image: opea/llm-vllm-hpu:latest
imagePullPolicy: IfNotPresent
name: llm-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: llm-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- name: model-volume
hostPath:
path: /mnt/models
type: Directory
- name: shm
emptyDir:
medium: Memory
sizeLimit: 1Gi

View File

@@ -0,0 +1,507 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: v1
data:
EMBEDDING_MODEL_ID: BAAI/bge-base-en-v1.5
EMBEDDING_SERVER_HOST_IP: embedding-dependency-svc
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
INDEX_NAME: rag-redis
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
LLM_SERVER_HOST_IP: llm-dependency-svc
NODE_SELECTOR: chatqna-opea
REDIS_URL: redis://vector-db.default.svc.cluster.local:6379
RERANK_MODEL_ID: BAAI/bge-reranker-base
RERANK_SERVER_HOST_IP: reranking-dependency-svc
RETRIEVER_SERVICE_HOST_IP: retriever-svc
TEI_EMBEDDING_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_RERANKING_ENDPOINT: http://reranking-dependency-svc.default.svc.cluster.local:8808
TGI_LLM_ENDPOINT: http://llm-dependency-svc.default.svc.cluster.local:9009
kind: ConfigMap
metadata:
name: qna-config
namespace: default
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: chatqna-backend-server-deploy
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: chatqna-backend-server-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: chatqna-backend-server-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/chatqna:latest
imagePullPolicy: IfNotPresent
name: chatqna-backend-server-deploy
ports:
- containerPort: 8888
resources:
limits:
cpu: 8
memory: 8000Mi
requests:
cpu: 8
memory: 8000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: chatqna-backend-server-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: chatqna-backend-server-svc
namespace: default
spec:
ports:
- name: service
nodePort: 30888
port: 8888
targetPort: 8888
selector:
app: chatqna-backend-server-deploy
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dataprep-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: dataprep-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: dataprep-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/dataprep-redis:latest
imagePullPolicy: IfNotPresent
name: dataprep-deploy
ports:
- containerPort: 6007
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: dataprep-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: dataprep-svc
namespace: default
spec:
ports:
- name: port1
port: 6007
targetPort: 6007
selector:
app: dataprep-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-dependency-deploy
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: embedding-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(EMBEDDING_MODEL_ID)
- --auto-truncate
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
imagePullPolicy: IfNotPresent
name: embedding-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
cpu: 80
memory: 20000Mi
requests:
cpu: 80
memory: 20000Mi
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: embedding-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: embedding-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 6006
targetPort: 80
selector:
app: embedding-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-dependency-deploy
namespace: default
spec:
replicas: 15
selector:
matchLabels:
app: llm-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(LLM_MODEL_ID)
- --max-input-length
- '1280'
- --max-total-tokens
- '2048'
- --max-batch-total-tokens
- '65536'
- --max-batch-prefill-tokens
- '4096'
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HF_TOKEN
value: ${HF_TOKEN}
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/tgi-gaudi:2.0.5
imagePullPolicy: IfNotPresent
name: llm-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
securityContext:
capabilities:
add:
- SYS_NICE
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: llm-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: llm-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 9009
targetPort: 80
selector:
app: llm-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: reranking-dependency-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: reranking-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: reranking-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(RERANK_MODEL_ID)
- --auto-truncate
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HF_TOKEN
value: ${HF_TOKEN}
- name: MAX_WARMUP_SEQUENCE_LENGTH
value: '512'
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/tei-gaudi:latest
imagePullPolicy: IfNotPresent
name: reranking-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: reranking-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: reranking-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 8808
targetPort: 80
selector:
app: reranking-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: retriever-deploy
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: retriever-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: retriever-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/retriever-redis:latest
imagePullPolicy: IfNotPresent
name: retriever-deploy
ports:
- containerPort: 7000
resources:
requests:
cpu: 4
memory: 4000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: retriever-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: retriever-svc
namespace: default
spec:
ports:
- name: service
port: 7000
targetPort: 7000
selector:
app: retriever-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: vector-db
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: vector-db
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: vector-db
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: redis/redis-stack:7.2.0-v9
imagePullPolicy: IfNotPresent
name: vector-db
ports:
- containerPort: 6379
- containerPort: 8001
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: vector-db
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: vector-db
namespace: default
spec:
ports:
- name: vector-db-service
port: 6379
targetPort: 6379
- name: vector-db-insight
port: 8001
targetPort: 8001
selector:
app: vector-db
type: ClusterIP
---

View File

@@ -0,0 +1,421 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: v1
data:
EMBEDDING_MODEL_ID: BAAI/bge-base-en-v1.5
EMBEDDING_SERVER_HOST_IP: embedding-dependency-svc
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
INDEX_NAME: rag-redis
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
LLM_SERVER_HOST_IP: llm-dependency-svc
NODE_SELECTOR: chatqna-opea
REDIS_URL: redis://vector-db.default.svc.cluster.local:6379
RERANK_MODEL_ID: BAAI/bge-reranker-base
RERANK_SERVER_HOST_IP: reranking-dependency-svc
RETRIEVER_SERVICE_HOST_IP: retriever-svc
TEI_EMBEDDING_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_RERANKING_ENDPOINT: http://reranking-dependency-svc.default.svc.cluster.local:8808
TGI_LLM_ENDPOINT: http://llm-dependency-svc.default.svc.cluster.local:9009
kind: ConfigMap
metadata:
name: qna-config
namespace: default
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: chatqna-backend-server-deploy
namespace: default
spec:
replicas: 8
selector:
matchLabels:
app: chatqna-backend-server-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: chatqna-backend-server-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/chatqna-without-rerank:latest
imagePullPolicy: IfNotPresent
name: chatqna-backend-server-deploy
ports:
- containerPort: 8888
resources:
limits:
cpu: 8
memory: 8000Mi
requests:
cpu: 8
memory: 8000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: chatqna-backend-server-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: chatqna-backend-server-svc
namespace: default
spec:
ports:
- name: service
nodePort: 30888
port: 8888
targetPort: 8888
selector:
app: chatqna-backend-server-deploy
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dataprep-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: dataprep-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: dataprep-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/dataprep-redis:latest
imagePullPolicy: IfNotPresent
name: dataprep-deploy
ports:
- containerPort: 6007
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: dataprep-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: dataprep-svc
namespace: default
spec:
ports:
- name: port1
port: 6007
targetPort: 6007
selector:
app: dataprep-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-dependency-deploy
namespace: default
spec:
replicas: 8
selector:
matchLabels:
app: embedding-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(EMBEDDING_MODEL_ID)
- --auto-truncate
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
imagePullPolicy: IfNotPresent
name: embedding-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
cpu: 80
memory: 20000Mi
requests:
cpu: 80
memory: 20000Mi
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: embedding-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: embedding-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 6006
targetPort: 80
selector:
app: embedding-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-dependency-deploy
namespace: default
spec:
replicas: 64
selector:
matchLabels:
app: llm-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(LLM_MODEL_ID)
- --max-input-length
- '1280'
- --max-total-tokens
- '2048'
- --max-batch-total-tokens
- '65536'
- --max-batch-prefill-tokens
- '4096'
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HF_TOKEN
value: ${HF_TOKEN}
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/tgi-gaudi:2.0.5
imagePullPolicy: IfNotPresent
name: llm-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
securityContext:
capabilities:
add:
- SYS_NICE
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: llm-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: llm-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 9009
targetPort: 80
selector:
app: llm-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: retriever-deploy
namespace: default
spec:
replicas: 8
selector:
matchLabels:
app: retriever-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: retriever-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/retriever-redis:latest
imagePullPolicy: IfNotPresent
name: retriever-deploy
ports:
- containerPort: 7000
resources:
requests:
cpu: 4
memory: 4000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: retriever-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: retriever-svc
namespace: default
spec:
ports:
- name: service
port: 7000
targetPort: 7000
selector:
app: retriever-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: vector-db
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: vector-db
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: vector-db
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: redis/redis-stack:7.2.0-v9
imagePullPolicy: IfNotPresent
name: vector-db
ports:
- containerPort: 6379
- containerPort: 8001
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: vector-db
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: vector-db
namespace: default
spec:
ports:
- name: vector-db-service
port: 6379
targetPort: 6379
- name: vector-db-insight
port: 8001
targetPort: 8001
selector:
app: vector-db
type: ClusterIP
---

View File

@@ -0,0 +1,421 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: v1
data:
EMBEDDING_MODEL_ID: BAAI/bge-base-en-v1.5
EMBEDDING_SERVER_HOST_IP: embedding-dependency-svc
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
INDEX_NAME: rag-redis
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
LLM_SERVER_HOST_IP: llm-dependency-svc
NODE_SELECTOR: chatqna-opea
REDIS_URL: redis://vector-db.default.svc.cluster.local:6379
RERANK_MODEL_ID: BAAI/bge-reranker-base
RERANK_SERVER_HOST_IP: reranking-dependency-svc
RETRIEVER_SERVICE_HOST_IP: retriever-svc
TEI_EMBEDDING_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_RERANKING_ENDPOINT: http://reranking-dependency-svc.default.svc.cluster.local:8808
TGI_LLM_ENDPOINT: http://llm-dependency-svc.default.svc.cluster.local:9009
kind: ConfigMap
metadata:
name: qna-config
namespace: default
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: chatqna-backend-server-deploy
namespace: default
spec:
replicas: 4
selector:
matchLabels:
app: chatqna-backend-server-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: chatqna-backend-server-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/chatqna-without-rerank:latest
imagePullPolicy: IfNotPresent
name: chatqna-backend-server-deploy
ports:
- containerPort: 8888
resources:
limits:
cpu: 8
memory: 8000Mi
requests:
cpu: 8
memory: 8000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: chatqna-backend-server-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: chatqna-backend-server-svc
namespace: default
spec:
ports:
- name: service
nodePort: 30888
port: 8888
targetPort: 8888
selector:
app: chatqna-backend-server-deploy
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dataprep-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: dataprep-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: dataprep-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/dataprep-redis:latest
imagePullPolicy: IfNotPresent
name: dataprep-deploy
ports:
- containerPort: 6007
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: dataprep-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: dataprep-svc
namespace: default
spec:
ports:
- name: port1
port: 6007
targetPort: 6007
selector:
app: dataprep-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-dependency-deploy
namespace: default
spec:
replicas: 4
selector:
matchLabels:
app: embedding-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(EMBEDDING_MODEL_ID)
- --auto-truncate
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
imagePullPolicy: IfNotPresent
name: embedding-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
cpu: 80
memory: 20000Mi
requests:
cpu: 80
memory: 20000Mi
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: embedding-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: embedding-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 6006
targetPort: 80
selector:
app: embedding-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-dependency-deploy
namespace: default
spec:
replicas: 32
selector:
matchLabels:
app: llm-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(LLM_MODEL_ID)
- --max-input-length
- '1280'
- --max-total-tokens
- '2048'
- --max-batch-total-tokens
- '65536'
- --max-batch-prefill-tokens
- '4096'
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HF_TOKEN
value: ${HF_TOKEN}
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/tgi-gaudi:2.0.5
imagePullPolicy: IfNotPresent
name: llm-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
securityContext:
capabilities:
add:
- SYS_NICE
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: llm-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: llm-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 9009
targetPort: 80
selector:
app: llm-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: retriever-deploy
namespace: default
spec:
replicas: 4
selector:
matchLabels:
app: retriever-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: retriever-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/retriever-redis:latest
imagePullPolicy: IfNotPresent
name: retriever-deploy
ports:
- containerPort: 7000
resources:
requests:
cpu: 4
memory: 4000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: retriever-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: retriever-svc
namespace: default
spec:
ports:
- name: service
port: 7000
targetPort: 7000
selector:
app: retriever-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: vector-db
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: vector-db
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: vector-db
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: redis/redis-stack:7.2.0-v9
imagePullPolicy: IfNotPresent
name: vector-db
ports:
- containerPort: 6379
- containerPort: 8001
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: vector-db
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: vector-db
namespace: default
spec:
ports:
- name: vector-db-service
port: 6379
targetPort: 6379
- name: vector-db-insight
port: 8001
targetPort: 8001
selector:
app: vector-db
type: ClusterIP
---

View File

@@ -0,0 +1,421 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: v1
data:
EMBEDDING_MODEL_ID: BAAI/bge-base-en-v1.5
EMBEDDING_SERVER_HOST_IP: embedding-dependency-svc
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
INDEX_NAME: rag-redis
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
LLM_SERVER_HOST_IP: llm-dependency-svc
NODE_SELECTOR: chatqna-opea
REDIS_URL: redis://vector-db.default.svc.cluster.local:6379
RERANK_MODEL_ID: BAAI/bge-reranker-base
RERANK_SERVER_HOST_IP: reranking-dependency-svc
RETRIEVER_SERVICE_HOST_IP: retriever-svc
TEI_EMBEDDING_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_RERANKING_ENDPOINT: http://reranking-dependency-svc.default.svc.cluster.local:8808
TGI_LLM_ENDPOINT: http://llm-dependency-svc.default.svc.cluster.local:9009
kind: ConfigMap
metadata:
name: qna-config
namespace: default
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: chatqna-backend-server-deploy
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: chatqna-backend-server-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: chatqna-backend-server-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/chatqna-without-rerank:latest
imagePullPolicy: IfNotPresent
name: chatqna-backend-server-deploy
ports:
- containerPort: 8888
resources:
limits:
cpu: 8
memory: 8000Mi
requests:
cpu: 8
memory: 8000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: chatqna-backend-server-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: chatqna-backend-server-svc
namespace: default
spec:
ports:
- name: service
nodePort: 30888
port: 8888
targetPort: 8888
selector:
app: chatqna-backend-server-deploy
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dataprep-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: dataprep-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: dataprep-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/dataprep-redis:latest
imagePullPolicy: IfNotPresent
name: dataprep-deploy
ports:
- containerPort: 6007
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: dataprep-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: dataprep-svc
namespace: default
spec:
ports:
- name: port1
port: 6007
targetPort: 6007
selector:
app: dataprep-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-dependency-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: embedding-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(EMBEDDING_MODEL_ID)
- --auto-truncate
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
imagePullPolicy: IfNotPresent
name: embedding-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
cpu: 80
memory: 20000Mi
requests:
cpu: 80
memory: 20000Mi
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: embedding-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: embedding-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 6006
targetPort: 80
selector:
app: embedding-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-dependency-deploy
namespace: default
spec:
replicas: 8
selector:
matchLabels:
app: llm-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(LLM_MODEL_ID)
- --max-input-length
- '1280'
- --max-total-tokens
- '2048'
- --max-batch-total-tokens
- '65536'
- --max-batch-prefill-tokens
- '4096'
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HF_TOKEN
value: ${HF_TOKEN}
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/tgi-gaudi:2.0.5
imagePullPolicy: IfNotPresent
name: llm-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
securityContext:
capabilities:
add:
- SYS_NICE
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: llm-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: llm-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 9009
targetPort: 80
selector:
app: llm-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: retriever-deploy
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: retriever-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: retriever-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/retriever-redis:latest
imagePullPolicy: IfNotPresent
name: retriever-deploy
ports:
- containerPort: 7000
resources:
requests:
cpu: 4
memory: 4000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: retriever-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: retriever-svc
namespace: default
spec:
ports:
- name: service
port: 7000
targetPort: 7000
selector:
app: retriever-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: vector-db
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: vector-db
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: vector-db
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: redis/redis-stack:7.2.0-v9
imagePullPolicy: IfNotPresent
name: vector-db
ports:
- containerPort: 6379
- containerPort: 8001
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: vector-db
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: vector-db
namespace: default
spec:
ports:
- name: vector-db-service
port: 6379
targetPort: 6379
- name: vector-db-insight
port: 8001
targetPort: 8001
selector:
app: vector-db
type: ClusterIP
---

View File

@@ -0,0 +1,421 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: v1
data:
EMBEDDING_MODEL_ID: BAAI/bge-base-en-v1.5
EMBEDDING_SERVER_HOST_IP: embedding-dependency-svc
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
INDEX_NAME: rag-redis
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
LLM_SERVER_HOST_IP: llm-dependency-svc
NODE_SELECTOR: chatqna-opea
REDIS_URL: redis://vector-db.default.svc.cluster.local:6379
RERANK_MODEL_ID: BAAI/bge-reranker-base
RERANK_SERVER_HOST_IP: reranking-dependency-svc
RETRIEVER_SERVICE_HOST_IP: retriever-svc
TEI_EMBEDDING_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_RERANKING_ENDPOINT: http://reranking-dependency-svc.default.svc.cluster.local:8808
TGI_LLM_ENDPOINT: http://llm-dependency-svc.default.svc.cluster.local:9009
kind: ConfigMap
metadata:
name: qna-config
namespace: default
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: chatqna-backend-server-deploy
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: chatqna-backend-server-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: chatqna-backend-server-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/chatqna-without-rerank:latest
imagePullPolicy: IfNotPresent
name: chatqna-backend-server-deploy
ports:
- containerPort: 8888
resources:
limits:
cpu: 8
memory: 8000Mi
requests:
cpu: 8
memory: 8000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: chatqna-backend-server-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: chatqna-backend-server-svc
namespace: default
spec:
ports:
- name: service
nodePort: 30888
port: 8888
targetPort: 8888
selector:
app: chatqna-backend-server-deploy
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dataprep-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: dataprep-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: dataprep-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/dataprep-redis:latest
imagePullPolicy: IfNotPresent
name: dataprep-deploy
ports:
- containerPort: 6007
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: dataprep-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: dataprep-svc
namespace: default
spec:
ports:
- name: port1
port: 6007
targetPort: 6007
selector:
app: dataprep-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-dependency-deploy
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: embedding-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(EMBEDDING_MODEL_ID)
- --auto-truncate
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
imagePullPolicy: IfNotPresent
name: embedding-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
cpu: 80
memory: 20000Mi
requests:
cpu: 80
memory: 20000Mi
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: embedding-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: embedding-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 6006
targetPort: 80
selector:
app: embedding-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-dependency-deploy
namespace: default
spec:
replicas: 16
selector:
matchLabels:
app: llm-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(LLM_MODEL_ID)
- --max-input-length
- '1280'
- --max-total-tokens
- '2048'
- --max-batch-total-tokens
- '65536'
- --max-batch-prefill-tokens
- '4096'
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HF_TOKEN
value: ${HF_TOKEN}
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/tgi-gaudi:2.0.5
imagePullPolicy: IfNotPresent
name: llm-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
securityContext:
capabilities:
add:
- SYS_NICE
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: llm-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: llm-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 9009
targetPort: 80
selector:
app: llm-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: retriever-deploy
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: retriever-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: retriever-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/retriever-redis:latest
imagePullPolicy: IfNotPresent
name: retriever-deploy
ports:
- containerPort: 7000
resources:
requests:
cpu: 4
memory: 4000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: retriever-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: retriever-svc
namespace: default
spec:
ports:
- name: service
port: 7000
targetPort: 7000
selector:
app: retriever-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: vector-db
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: vector-db
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: vector-db
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: redis/redis-stack:7.2.0-v9
imagePullPolicy: IfNotPresent
name: vector-db
ports:
- containerPort: 6379
- containerPort: 8001
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: vector-db
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: vector-db
namespace: default
spec:
ports:
- name: vector-db-service
port: 6379
targetPort: 6379
- name: vector-db-insight
port: 8001
targetPort: 8001
selector:
app: vector-db
type: ClusterIP
---

View File

@@ -47,7 +47,6 @@ RERANK_SERVER_HOST_IP = os.getenv("RERANK_SERVER_HOST_IP", "0.0.0.0")
RERANK_SERVER_PORT = int(os.getenv("RERANK_SERVER_PORT", 80))
LLM_SERVER_HOST_IP = os.getenv("LLM_SERVER_HOST_IP", "0.0.0.0")
LLM_SERVER_PORT = int(os.getenv("LLM_SERVER_PORT", 80))
LLM_MODEL = os.getenv("LLM_MODEL", "Intel/neural-chat-7b-v3-3")
def align_inputs(self, inputs, cur_node, runtime_graph, llm_parameters_dict, **kwargs):
@@ -62,7 +61,7 @@ def align_inputs(self, inputs, cur_node, runtime_graph, llm_parameters_dict, **k
elif self.services[cur_node].service_type == ServiceType.LLM:
# convert TGI/vLLM to unified OpenAI /v1/chat/completions format
next_inputs = {}
next_inputs["model"] = LLM_MODEL
next_inputs["model"] = "tgi" # specifically clarify the fake model to make the format unified
next_inputs["messages"] = [{"role": "user", "content": inputs["inputs"]}]
next_inputs["max_tokens"] = llm_parameters_dict["max_tokens"]
next_inputs["top_p"] = llm_parameters_dict["top_p"]
@@ -166,10 +165,7 @@ def align_generator(self, gen, **kwargs):
try:
# sometimes yield empty chunk, do a fallback here
json_data = json.loads(json_str)
if (
json_data["choices"][0]["finish_reason"] != "eos_token"
and "content" in json_data["choices"][0]["delta"]
):
if json_data["choices"][0]["finish_reason"] != "eos_token":
yield f"data: {repr(json_data['choices'][0]['delta']['content'].encode('utf-8'))}\n\n"
except Exception as e:
yield f"data: {repr(json_str.encode('utf-8'))}\n\n"

View File

@@ -2,6 +2,104 @@
This document outlines the deployment process for a ChatQnA application utilizing the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline on AIPC. The steps include Docker image creation, container deployment via Docker Compose, and service execution to integrate microservices such as `embedding`, `retriever`, `rerank`, and `llm`.
## Prerequisites
We use [Ollama](https://ollama.com/) as our LLM service for AIPC.
Please follow the instructions to set up Ollama on your PC. This will set the entrypoint needed for the Ollama to suit the ChatQnA examples.
### Set Up Ollama LLM Service
#### Install Ollama Service
Install Ollama service with one command:
```
curl -fsSL https://ollama.com/install.sh | sh
```
#### Set Ollama Service Configuration
Ollama Service Configuration file is /etc/systemd/system/ollama.service. Edit the file to set OLLAMA_HOST environment.
Replace **<host_ip>** with your host IPV4 (please use external public IP). For example the host_ip is 10.132.x.y, then `Environment="OLLAMA_HOST=10.132.x.y:11434"'.
```
Environment="OLLAMA_HOST=host_ip:11434"
```
#### Set https_proxy environment for Ollama
If your system access network through proxy, add https_proxy in Ollama Service Configuration file
```
Environment="https_proxy=Your_HTTPS_Proxy"
```
#### Restart Ollama services
```
$ sudo systemctl daemon-reload
$ sudo systemctl restart ollama.service
```
#### Check the service started
```
netstat -tuln | grep 11434
```
The output are:
```
tcp 0 0 10.132.x.y:11434 0.0.0.0:* LISTEN
```
#### Pull Ollama LLM model
Run the command to download LLM models. The <host_ip> is the one set in [Ollama Service Configuration](#Set-Ollama-Service-Configuration)
```
export host_ip=<host_ip>
export OLLAMA_HOST=http://${host_ip}:11434
ollama pull llama3.2
```
After downloaded the models, you can list the models by `ollama list`.
The output should be similar to the following:
```
NAME ID SIZE MODIFIED
llama3.2:latest a80c4f17acd5 2.0 GB 2 minutes ago
```
### Consume Ollama LLM Service
Access ollama service to verify that the ollama is functioning correctly.
```bash
curl http://${host_ip}:11434/api/generate -d '{"model": "llama3.2", "prompt":"What is Deep Learning?"}'
```
The outputs are similar to these:
```
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.098813868Z","response":"Deep","done":false}
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.124514468Z","response":" learning","done":false}
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.149754216Z","response":" is","done":false}
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.180420784Z","response":" a","done":false}
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.229185873Z","response":" subset","done":false}
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.263956118Z","response":" of","done":false}
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.289097354Z","response":" machine","done":false}
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.316838918Z","response":" learning","done":false}
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.342309506Z","response":" that","done":false}
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.367221264Z","response":" involves","done":false}
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.39205893Z","response":" the","done":false}
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.417933974Z","response":" use","done":false}
{"model":"llama3.2","created_at":"2024-10-12T12:55:28.443110388Z","response":" of","done":false}
...
```
## 🚀 Build Docker Images
First of all, you need to build Docker Images locally and install the python package of it.
@@ -24,14 +122,20 @@ export https_proxy="Your_HTTPs_Proxy"
docker build --no-cache -t opea/retriever-redis:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/retrievers/redis/langchain/Dockerfile .
```
### 2. Build Dataprep Image
### 2 Build LLM Image
```bash
docker build --no-cache -t opea/llm-ollama:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/ollama/langchain/Dockerfile .
```
### 3. Build Dataprep Image
```bash
docker build --no-cache -t opea/dataprep-redis:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/dataprep/redis/langchain/Dockerfile .
cd ..
```
### 3. Build MegaService Docker Image
### 4. Build MegaService Docker Image
To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `chatqna.py` Python script. Build MegaService Docker image via below command:
@@ -42,7 +146,7 @@ cd GenAIExamples/ChatQnA
docker build --no-cache -t opea/chatqna:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
```
### 4. Build UI Docker Image
### 5. Build UI Docker Image
Build frontend Docker image via below command:
@@ -51,7 +155,7 @@ cd ~/OPEA/GenAIExamples/ChatQnA/ui
docker build --no-cache -t opea/chatqna-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile .
```
### 5. Build Nginx Docker Image
### 6. Build Nginx Docker Image
```bash
cd GenAIComps
@@ -62,9 +166,10 @@ Then run the command `docker images`, you will have the following 6 Docker Image
1. `opea/dataprep-redis:latest`
2. `opea/retriever-redis:latest`
3. `opea/chatqna:latest`
4. `opea/chatqna-ui:latest`
5. `opea/nginx:latest`
3. `opea/llm-ollama:latest`
4. `opea/chatqna:latest`
5. `opea/chatqna-ui:latest`
6. `opea/nginx:latest`
## 🚀 Start Microservices
@@ -90,10 +195,10 @@ For Linux users, please run `hostname -I | awk '{print $1}'`. For Windows users,
export your_hf_api_token="Your_Huggingface_API_Token"
```
**Append the value of the public IP address to the no_proxy list if you are in a proxy environment**
**Append the value of the public IP address to the no_proxy list**
```
export your_no_proxy=${your_no_proxy},"External_Public_IP",chatqna-aipc-backend-server,tei-embedding-service,retriever,tei-reranking-service,redis-vector-db,dataprep-redis-service
export your_no_proxy=${your_no_proxy},"External_Public_IP"
```
- Linux PC
@@ -106,7 +211,7 @@ export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export INDEX_NAME="rag-redis"
export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token}
export OLLAMA_HOST=${host_ip}
export OLLAMA_ENDPOINT=http://${host_ip}:11434
export OLLAMA_MODEL="llama3.2"
```
@@ -117,7 +222,7 @@ set EMBEDDING_MODEL_ID=BAAI/bge-base-en-v1.5
set RERANK_MODEL_ID=BAAI/bge-reranker-base
set INDEX_NAME=rag-redis
set HUGGINGFACEHUB_API_TOKEN=%your_hf_api_token%
set OLLAMA_HOST=host.docker.internal
set OLLAMA_ENDPOINT=http://host.docker.internal:11434
set OLLAMA_MODEL="llama3.2"
```
@@ -172,7 +277,16 @@ For details on how to verify the correctness of the response, refer to [how-to-v
curl http://${host_ip}:11434/api/generate -d '{"model": "llama3.2", "prompt":"What is Deep Learning?"}'
```
5. MegaService
5. LLM Microservice
```bash
curl http://${host_ip}:9000/v1/chat/completions\
-X POST \
-d '{"query":"What is Deep Learning?","max_tokens":17,"top_k":10,"top_p":0.95,"typical_p":0.95,"temperature":0.01,"repetition_penalty":1.03,"streaming":true}' \
-H 'Content-Type: application/json'
```
6. MegaService
```bash
curl http://${host_ip}:8888/v1/chatqna -H "Content-Type: application/json" -d '{
@@ -180,7 +294,7 @@ For details on how to verify the correctness of the response, refer to [how-to-v
}'
```
6. Upload RAG Files through Dataprep Microservice (Optional)
7. Upload RAG Files through Dataprep Microservice (Optional)
To chat with retrieved information, you need to upload a file using Dataprep service.
@@ -220,4 +334,4 @@ the output is:
## 🚀 Launch the UI
To access the frontend, open the following URL in your browser: http://{host_ip}:80.
To access the frontend, open the following URL in your browser: http://{host_ip}:5173.

View File

@@ -72,21 +72,22 @@ services:
HF_HUB_DISABLE_PROGRESS_BARS: 1
HF_HUB_ENABLE_HF_TRANSFER: 0
command: --model-id ${RERANK_MODEL_ID} --auto-truncate
ollama-service:
image: ollama/ollama
container_name: ollama
llm:
image: ${REGISTRY:-opea}/llm-ollama
container_name: llm-ollama
ports:
- "11434:11434"
volumes:
- ollama:/root/.ollama
entrypoint: ["bash", "-c"]
command: ["ollama serve & sleep 10 && ollama run ${OLLAMA_MODEL} & wait"]
- "9000:9000"
ipc: host
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
HF_HUB_DISABLE_PROGRESS_BARS: 1
HF_HUB_ENABLE_HF_TRANSFER: 0
OLLAMA_ENDPOINT: ${OLLAMA_ENDPOINT}
OLLAMA_MODEL: ${OLLAMA_MODEL}
chatqna-aipc-backend-server:
chaqna-aipc-backend-server:
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}
container_name: chatqna-aipc-backend-server
depends_on:
@@ -95,29 +96,29 @@ services:
- tei-embedding-service
- retriever
- tei-reranking-service
- llm
ports:
- "8888:8888"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- MEGA_SERVICE_HOST_IP=chatqna-aipc-backend-server
- MEGA_SERVICE_HOST_IP=chaqna-aipc-backend-server
- EMBEDDING_SERVER_HOST_IP=tei-embedding-service
- EMBEDDING_SERVER_PORT=80
- RETRIEVER_SERVICE_HOST_IP=retriever
- RERANK_SERVER_HOST_IP=tei-reranking-service
- RERANK_SERVER_PORT=80
- LLM_SERVER_HOST_IP=${OLLAMA_HOST}
- LLM_SERVER_PORT=11434
- LLM_MODEL=${OLLAMA_MODEL}
- LLM_SERVER_HOST_IP=llm
- LLM_SERVER_PORT=9000
- LOGFLAG=${LOGFLAG}
ipc: host
restart: always
chatqna-aipc-ui-server:
chaqna-aipc-ui-server:
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}
container_name: chatqna-aipc-ui-server
depends_on:
- chatqna-aipc-backend-server
- chaqna-aipc-backend-server
ports:
- "5173:5173"
environment:
@@ -126,31 +127,28 @@ services:
- http_proxy=${http_proxy}
ipc: host
restart: always
chatqna-aipc-nginx-server:
chaqna-aipc-nginx-server:
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
container_name: chatqna-aipc-nginx-server
container_name: chaqna-aipc-nginx-server
depends_on:
- chatqna-aipc-backend-server
- chatqna-aipc-ui-server
- chaqna-aipc-backend-server
- chaqna-aipc-ui-server
ports:
- "${NGINX_PORT:-80}:80"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- FRONTEND_SERVICE_IP=chatqna-aipc-ui-server
- FRONTEND_SERVICE_IP=chatqna-xeon-ui-server
- FRONTEND_SERVICE_PORT=5173
- BACKEND_SERVICE_NAME=chatqna
- BACKEND_SERVICE_IP=chatqna-aipc-backend-server
- BACKEND_SERVICE_IP=chatqna-xeon-backend-server
- BACKEND_SERVICE_PORT=8888
- DATAPREP_SERVICE_IP=dataprep-redis-service
- DATAPREP_SERVICE_PORT=6007
ipc: host
restart: always
volumes:
ollama:
networks:
default:
driver: bridge

View File

@@ -16,5 +16,5 @@ export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token}
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
export INDEX_NAME="rag-redis"
export OLLAMA_HOST=${host_ip}
export OLLAMA_ENDPOINT=http://${host_ip}:11434
export OLLAMA_MODEL="llama3.2"

View File

@@ -17,6 +17,8 @@ To set up environment variables for deploying ChatQnA services, follow these ste
```bash
# Example: host_ip="192.168.1.1"
export host_ip="External_Public_IP"
# Example: no_proxy="localhost, 127.0.0.1, 192.168.1.1"
export no_proxy="Your_No_Proxy"
export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token"
```
@@ -25,9 +27,6 @@ To set up environment variables for deploying ChatQnA services, follow these ste
```bash
export http_proxy="Your_HTTP_Proxy"
export https_proxy="Your_HTTPs_Proxy"
# Example: no_proxy="localhost, 127.0.0.1, 192.168.1.1"
# Example: no_proxy="localhost, 127.0.0.1, 192.168.1.1"
export no_proxy="Your_No_Proxy",chatqna-xeon-ui-server,chatqna-xeon-backend-server,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm_service
```
3. Set up other environment variables:
@@ -48,13 +47,13 @@ docker pull opea/chatqna:latest
docker pull opea/chatqna-ui:latest
```
NB: You should build docker image from source by yourself if:
In following cases, you could build docker image from source by yourself.
- You are developing off the git main branch (as the container's ports in the repo may be different from the published docker image).
- You can't download the docker image.
- You want to use a specific version of Docker image.
- Failed to download the docker image.
Please refer to ['Build Docker Images'](#🚀-build-docker-images) in below.
- If you want to use a specific version of Docker image.
Please refer to 'Build Docker Images' in below.
## QuickStart: 3.Consume the ChatQnA Service
@@ -98,11 +97,6 @@ After launching your instance, you can connect to it using SSH (for Linux instan
First of all, you need to build Docker Images locally and install the python package of it.
```bash
git clone https://github.com/opea-project/GenAIComps.git
cd GenAIComps
```
### 1. Build Retriever Image
```bash
@@ -195,7 +189,7 @@ For users in China who are unable to download models directly from Huggingface,
export HF_TOKEN=${your_hf_token}
export HF_ENDPOINT="https://hf-mirror.com"
model_name="Intel/neural-chat-7b-v3-3"
docker run -p 8008:80 -v ./data:/data --name tgi-service -e HF_ENDPOINT=$HF_ENDPOINT -e http_proxy=$http_proxy -e https_proxy=$https_proxy --shm-size 1g ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu --model-id $model_name
docker run -p 8008:80 -v ./data:/data --name tgi-service -e HF_ENDPOINT=$HF_ENDPOINT -e http_proxy=$http_proxy -e https_proxy=$https_proxy --shm-size 1g ghcr.io/huggingface/text-generation-inference:sha-e4201f4-intel-cpu --model-id $model_name
```
2. Offline
@@ -209,7 +203,7 @@ For users in China who are unable to download models directly from Huggingface,
```bash
export HF_TOKEN=${your_hf_token}
export model_path="/path/to/model"
docker run -p 8008:80 -v $model_path:/data --name tgi_service --shm-size 1g ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu --model-id /data
docker run -p 8008:80 -v $model_path:/data --name tgi_service --shm-size 1g ghcr.io/huggingface/text-generation-inference:sha-e4201f4-intel-cpu --model-id /data
```
### Setup Environment Variables
@@ -219,6 +213,8 @@ For users in China who are unable to download models directly from Huggingface,
```bash
# Example: host_ip="192.168.1.1"
export host_ip="External_Public_IP"
# Example: no_proxy="localhost, 127.0.0.1, 192.168.1.1"
export no_proxy="Your_No_Proxy"
export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token"
# Example: NGINX_PORT=80
export NGINX_PORT=${your_nginx_port}
@@ -229,8 +225,6 @@ For users in China who are unable to download models directly from Huggingface,
```bash
export http_proxy="Your_HTTP_Proxy"
export https_proxy="Your_HTTPs_Proxy"
# Example: no_proxy="localhost, 127.0.0.1, 192.168.1.1"
export no_proxy="Your_No_Proxy",chatqna-xeon-ui-server,chatqna-xeon-backend-server,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm_service
```
3. Set up other environment variables:
@@ -311,7 +305,7 @@ For details on how to verify the correctness of the response, refer to [how-to-v
Try the command below to check whether the LLM serving is ready.
```bash
docker logs tgi-service | grep Connected
docker logs ${CONTAINER_ID} | grep Connected
```
If the service is ready, you will get the response like below.

View File

@@ -111,7 +111,7 @@ Build frontend Docker image that enables Conversational experience with ChatQnA
**Export the value of the public IP address of your Xeon server to the `host_ip` environment variable**
```bash
cd GenAIExamples/ChatQnA/ui
cd GenAIExamples/ChatQnA//ui
export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:8912/v1/chatqna"
export DATAPREP_SERVICE_ENDPOINT="http://${host_ip}:6043/v1/dataprep"
docker build --no-cache -t opea/chatqna-conversation-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy --build-arg BACKEND_SERVICE_ENDPOINT=$BACKEND_SERVICE_ENDPOINT --build-arg DATAPREP_SERVICE_ENDPOINT=$DATAPREP_SERVICE_ENDPOINT -f ./docker/Dockerfile.react .
@@ -167,10 +167,10 @@ export host_ip="External_Public_IP"
export your_hf_api_token="Your_Huggingface_API_Token"
```
**Append the value of the public IP address to the no_proxy list if you are in a proxy environment**
**Append the value of the public IP address to the no_proxy list**
```
export your_no_proxy=${your_no_proxy},"External_Public_IP",chatqna-xeon-ui-server,chatqna-xeon-backend-server,dataprep-qdrant-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service
export your_no_proxy=${your_no_proxy},"External_Public_IP"
```
```bash

Some files were not shown because too many files have changed in this diff Show More