Compare commits
20 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
99b3338649 | ||
|
|
b02db2ad40 | ||
|
|
dd232736e5 | ||
|
|
a82caef698 | ||
|
|
2dc2ba1d5c | ||
|
|
f90a6d2a8e | ||
|
|
1fdab591d9 | ||
|
|
13ea13862a | ||
|
|
1787d1ee98 | ||
|
|
15c62bfb7a | ||
|
|
aebb69cd75 | ||
|
|
db4bf1a4c3 | ||
|
|
f7002fcb70 | ||
|
|
c39c875211 | ||
|
|
6287f7945a | ||
|
|
d1b5113ce0 | ||
|
|
c2e9a259fe | ||
|
|
48eaf9c1c9 | ||
|
|
a39824f142 | ||
|
|
e10e6dd002 |
@@ -7,7 +7,7 @@ source /GenAIExamples/.github/workflows/scripts/change_color
|
||||
log_dir=/GenAIExamples/.github/workflows/scripts/codeScan
|
||||
ERROR_WARN=false
|
||||
|
||||
find . -type f \( -name "Dockerfile*" \) -print -exec hadolint --ignore DL3006 --ignore DL3007 --ignore DL3008 --ignore DL3013 --ignore DL3018 --ignore DL3016 {} \; > ${log_dir}/hadolint.log
|
||||
find . -type f \( -name "Dockerfile*" \) -print -exec hadolint --ignore DL3006 --ignore DL3007 --ignore DL3008 --ignore DL3013 {} \; > ${log_dir}/hadolint.log
|
||||
|
||||
if [[ $(grep -c "error" ${log_dir}/hadolint.log) != 0 ]]; then
|
||||
$BOLD_RED && echo "Error!! Please Click on the artifact button to download and check error details." && $RESET
|
||||
|
||||
@@ -1,203 +1,49 @@
|
||||
# Copyright (C) 2025 Intel Corporation
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# syntax=docker/dockerfile:1
|
||||
# Initialize device type args
|
||||
# use build args in the docker build command with --build-arg="BUILDARG=true"
|
||||
ARG USE_CUDA=false
|
||||
ARG USE_OLLAMA=false
|
||||
# Tested with cu117 for CUDA 11 and cu121 for CUDA 12 (default)
|
||||
ARG USE_CUDA_VER=cu121
|
||||
# any sentence transformer model; models to use can be found at https://huggingface.co/models?library=sentence-transformers
|
||||
# Leaderboard: https://huggingface.co/spaces/mteb/leaderboard
|
||||
# for better performance and multilangauge support use "intfloat/multilingual-e5-large" (~2.5GB) or "intfloat/multilingual-e5-base" (~1.5GB)
|
||||
# IMPORTANT: If you change the embedding model (sentence-transformers/all-MiniLM-L6-v2) and vice versa, you aren't able to use RAG Chat with your previous documents loaded in the WebUI! You need to re-embed them.
|
||||
ARG USE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
|
||||
ARG USE_RERANKING_MODEL=""
|
||||
#FROM python:3.11-slim
|
||||
FROM node:22.9.0
|
||||
|
||||
# Tiktoken encoding name; models to use can be found at https://huggingface.co/models?library=tiktoken
|
||||
ARG USE_TIKTOKEN_ENCODING_NAME="cl100k_base"
|
||||
ENV LANG=C.UTF-8
|
||||
ARG ARCH=cpu
|
||||
|
||||
ARG BUILD_HASH=dev-build
|
||||
# Override at your own risk - non-root configurations are untested
|
||||
ARG UID=0
|
||||
ARG GID=0
|
||||
RUN apt-get update -y && apt-get install -y --no-install-recommends --fix-missing \
|
||||
build-essential \
|
||||
libgl1-mesa-glx \
|
||||
libjemalloc-dev \
|
||||
git \
|
||||
python3-venv
|
||||
|
||||
######## WebUI frontend ########
|
||||
FROM --platform=$BUILDPLATFORM node:22-alpine3.20 AS build
|
||||
ARG BUILD_HASH
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY open_webui_patches /app/patches
|
||||
ARG WEBUI_VERSION=v0.5.20
|
||||
RUN apk add --no-cache git
|
||||
|
||||
# Clone code and use patch
|
||||
RUN git config --global user.name "opea" && \
|
||||
git config --global user.email "" && \
|
||||
git clone https://github.com/open-webui/open-webui.git
|
||||
|
||||
WORKDIR /app/open-webui
|
||||
|
||||
RUN git checkout ${WEBUI_VERSION} && git am /app/patches/*.patch
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN mv open-webui/* . && rm -fr open-webui && ls -lrth /app/backend/
|
||||
|
||||
RUN npm install onnxruntime-node --onnxruntime-node-install-cuda=skip
|
||||
RUN apk update && \
|
||||
apk add --no-cache wget && \
|
||||
wget https://github.com/microsoft/onnxruntime/releases/download/v1.20.1/onnxruntime-linux-x64-gpu-1.20.1.tgz
|
||||
|
||||
ENV APP_BUILD_HASH=${BUILD_HASH}
|
||||
RUN npm run build
|
||||
|
||||
######## WebUI backend ########
|
||||
FROM python:3.11-slim-bookworm AS base
|
||||
|
||||
# Use args
|
||||
ARG USE_CUDA
|
||||
ARG USE_OLLAMA
|
||||
ARG USE_CUDA_VER
|
||||
ARG USE_EMBEDDING_MODEL
|
||||
ARG USE_RERANKING_MODEL
|
||||
ARG UID
|
||||
ARG GID
|
||||
|
||||
## Basis ##
|
||||
ENV ENV=prod \
|
||||
PORT=8080 \
|
||||
# pass build args to the build
|
||||
USE_OLLAMA_DOCKER=${USE_OLLAMA} \
|
||||
USE_CUDA_DOCKER=${USE_CUDA} \
|
||||
USE_CUDA_DOCKER_VER=${USE_CUDA_VER} \
|
||||
USE_EMBEDDING_MODEL_DOCKER=${USE_EMBEDDING_MODEL} \
|
||||
USE_RERANKING_MODEL_DOCKER=${USE_RERANKING_MODEL}
|
||||
|
||||
## Basis URL Config ##
|
||||
ENV OLLAMA_BASE_URL="/ollama" \
|
||||
OPENAI_API_BASE_URL=""
|
||||
|
||||
## API Key and Security Config ##
|
||||
ENV OPENAI_API_KEY="" \
|
||||
WEBUI_SECRET_KEY="" \
|
||||
SCARF_NO_ANALYTICS=true \
|
||||
DO_NOT_TRACK=true \
|
||||
ANONYMIZED_TELEMETRY=false
|
||||
|
||||
#### Other models #########################################################
|
||||
## whisper TTS model settings ##
|
||||
ENV WHISPER_MODEL="base" \
|
||||
WHISPER_MODEL_DIR="/app/backend/data/cache/whisper/models"
|
||||
|
||||
## RAG Embedding model settings ##
|
||||
ENV RAG_EMBEDDING_MODEL="$USE_EMBEDDING_MODEL_DOCKER" \
|
||||
RAG_RERANKING_MODEL="$USE_RERANKING_MODEL_DOCKER" \
|
||||
SENTENCE_TRANSFORMERS_HOME="/app/backend/data/cache/embedding/models"
|
||||
|
||||
## Tiktoken model settings ##
|
||||
ENV TIKTOKEN_ENCODING_NAME="cl100k_base" \
|
||||
TIKTOKEN_CACHE_DIR="/app/backend/data/cache/tiktoken"
|
||||
|
||||
## Hugging Face download cache ##
|
||||
ENV HF_HOME="/app/backend/data/cache/embedding/models"
|
||||
|
||||
## Torch Extensions ##
|
||||
# ENV TORCH_EXTENSIONS_DIR="/.cache/torch_extensions"
|
||||
|
||||
#### Other models ##########################################################
|
||||
|
||||
COPY --from=build /app/backend /app/backend
|
||||
|
||||
WORKDIR /app/backend
|
||||
|
||||
WORKDIR /root/
|
||||
|
||||
ENV HOME=/root
|
||||
# Create user and group if not root
|
||||
RUN if [ $UID -ne 0 ]; then \
|
||||
if [ $GID -ne 0 ]; then \
|
||||
addgroup --gid $GID app; \
|
||||
fi; \
|
||||
adduser --uid $UID --gid $GID --home $HOME --disabled-password --no-create-home app; \
|
||||
fi
|
||||
ENV VIRTUAL_ENV=$HOME/.env/open-webui
|
||||
|
||||
RUN mkdir -p $HOME/.cache/chroma
|
||||
RUN printf 00000000-0000-0000-0000-000000000000 > $HOME/.cache/chroma/telemetry_user_id
|
||||
COPY open_webui_patches /root/patches
|
||||
|
||||
# Make sure the user has access to the app and root directory
|
||||
RUN chown -R $UID:$GID /app $HOME
|
||||
RUN git clone https://github.com/open-webui/open-webui.git && \
|
||||
git config --global user.name "opea" && git config --global user.email "" && \
|
||||
mkdir -p $HOME/.env && python3 -m venv $VIRTUAL_ENV && \
|
||||
$VIRTUAL_ENV/bin/python -m pip install --no-cache-dir --upgrade pip && \
|
||||
$VIRTUAL_ENV/bin/python -m pip install --no-cache-dir build
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
WORKDIR /root/open-webui
|
||||
|
||||
RUN if [ "$USE_OLLAMA" = "true" ]; then \
|
||||
apt-get update && \
|
||||
# Install pandoc and netcat
|
||||
apt-get install -y --no-install-recommends git build-essential pandoc netcat-openbsd curl && \
|
||||
apt-get install -y --no-install-recommends gcc python3-dev && \
|
||||
# for RAG OCR
|
||||
apt-get install -y --no-install-recommends ffmpeg libsm6 libxext6 && \
|
||||
# install helper tools
|
||||
apt-get install -y --no-install-recommends curl jq && \
|
||||
# install ollama
|
||||
curl -fsSL https://ollama.com/install.sh | sh && \
|
||||
# cleanup
|
||||
rm -rf /var/lib/apt/lists/*; \
|
||||
else \
|
||||
apt-get update && \
|
||||
# Install pandoc, netcat and gcc
|
||||
apt-get install -y --no-install-recommends git build-essential pandoc gcc netcat-openbsd curl jq && \
|
||||
apt-get install -y --no-install-recommends gcc python3-dev && \
|
||||
# for RAG OCR
|
||||
apt-get install -y --no-install-recommends ffmpeg libsm6 libxext6 && \
|
||||
# cleanup
|
||||
rm -rf /var/lib/apt/lists/*; \
|
||||
fi
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||
|
||||
# install python dependencies
|
||||
# COPY --chown=$UID:$GID ./backend/requirements.txt ./requirements.txt
|
||||
# RUN cp /app/backend/requirements.txt ./requirements.txt
|
||||
RUN git checkout v0.5.20 && \
|
||||
git am ../patches/*.patch && \
|
||||
python -m build && \
|
||||
pip install --no-cache-dir dist/open_webui-0.5.20-py3-none-any.whl
|
||||
|
||||
RUN pip3 install --no-cache-dir uv && \
|
||||
if [ "$USE_CUDA" = "true" ]; then \
|
||||
# If you use CUDA the whisper and embedding model will be downloaded on first use
|
||||
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/$USE_CUDA_DOCKER_VER --no-cache-dir && \
|
||||
uv pip install --system -r requirements.txt --no-cache-dir && \
|
||||
python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \
|
||||
python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \
|
||||
python -c "import os; import tiktoken; tiktoken.get_encoding(os.environ['TIKTOKEN_ENCODING_NAME'])"; \
|
||||
else \
|
||||
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir && \
|
||||
uv pip install --system -r requirements.txt --no-cache-dir && \
|
||||
python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \
|
||||
python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \
|
||||
python -c "import os; import tiktoken; tiktoken.get_encoding(os.environ['TIKTOKEN_ENCODING_NAME'])"; \
|
||||
fi; \
|
||||
chown -R $UID:$GID /app/backend/data/
|
||||
ENV LANG=en_US.UTF-8
|
||||
|
||||
WORKDIR /root/
|
||||
|
||||
RUN rm -fr /root/open-webui && rm -fr /root/patches
|
||||
|
||||
# CMD ["/bin/bash"]
|
||||
ENTRYPOINT ["open-webui", "serve"]
|
||||
|
||||
|
||||
|
||||
# copy embedding weight from build
|
||||
# RUN mkdir -p /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2
|
||||
# COPY --from=build /app/onnx /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2/onnx
|
||||
|
||||
# copy built frontend files
|
||||
COPY --chown=$UID:$GID --from=build /app/build /app/build
|
||||
COPY --chown=$UID:$GID --from=build /app/CHANGELOG.md /app/CHANGELOG.md
|
||||
COPY --chown=$UID:$GID --from=build /app/package.json /app/package.json
|
||||
|
||||
# copy backend files
|
||||
# COPY --chown=$UID:$GID ./backend .
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
HEALTHCHECK CMD curl --silent --fail http://localhost:${PORT:-8080}/health | jq -ne 'input.status == true' || exit 1
|
||||
|
||||
USER $UID:$GID
|
||||
|
||||
ARG BUILD_HASH
|
||||
ENV WEBUI_BUILD_VERSION=${BUILD_HASH}
|
||||
ENV DOCKER=true
|
||||
|
||||
CMD [ "bash", "start.sh"]
|
||||
|
||||
192
ChatQnA/benchmark/performance/kubernetes/intel/gaudi/README.md
Normal file
192
ChatQnA/benchmark/performance/kubernetes/intel/gaudi/README.md
Normal file
@@ -0,0 +1,192 @@
|
||||
# ChatQnA Benchmarking
|
||||
|
||||
This folder contains a collection of Kubernetes manifest files for deploying the ChatQnA service across scalable nodes. It includes a comprehensive [benchmarking tool](https://github.com/opea-project/GenAIEval/blob/main/evals/benchmark/README.md) that enables throughput analysis to assess inference performance.
|
||||
|
||||
By following this guide, you can run benchmarks on your deployment and share the results with the OPEA community.
|
||||
|
||||
## Purpose
|
||||
|
||||
We aim to run these benchmarks and share them with the OPEA community for three primary reasons:
|
||||
|
||||
- To offer insights on inference throughput in real-world scenarios, helping you choose the best service or deployment for your needs.
|
||||
- To establish a baseline for validating optimization solutions across different implementations, providing clear guidance on which methods are most effective for your use case.
|
||||
- To inspire the community to build upon our benchmarks, allowing us to better quantify new solutions in conjunction with current leading llms, serving frameworks etc.
|
||||
|
||||
## Metrics
|
||||
|
||||
The benchmark will report the below metrics, including:
|
||||
|
||||
- Number of Concurrent Requests
|
||||
- End-to-End Latency: P50, P90, P99 (in milliseconds)
|
||||
- End-to-End First Token Latency: P50, P90, P99 (in milliseconds)
|
||||
- Average Next Token Latency (in milliseconds)
|
||||
- Average Token Latency (in milliseconds)
|
||||
- Requests Per Second (RPS)
|
||||
- Output Tokens Per Second
|
||||
- Input Tokens Per Second
|
||||
|
||||
Results will be displayed in the terminal and saved as CSV file named `1_stats.csv` for easy export to spreadsheets.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Deployment](#deployment)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Deployment Scenarios](#deployment-scenarios)
|
||||
- [Case 1: Baseline Deployment with Rerank](#case-1-baseline-deployment-with-rerank)
|
||||
- [Case 2: Baseline Deployment without Rerank](#case-2-baseline-deployment-without-rerank)
|
||||
- [Case 3: Tuned Deployment with Rerank](#case-3-tuned-deployment-with-rerank)
|
||||
- [Benchmark](#benchmark)
|
||||
- [Test Configurations](#test-configurations)
|
||||
- [Test Steps](#test-steps)
|
||||
- [Upload Retrieval File](#upload-retrieval-file)
|
||||
- [Run Benchmark Test](#run-benchmark-test)
|
||||
- [Data collection](#data-collection)
|
||||
- [Teardown](#teardown)
|
||||
|
||||
## Deployment
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Kubernetes installation: Use [kubespray](https://github.com/opea-project/docs/blob/main/guide/installation/k8s_install/k8s_install_kubespray.md) or other official Kubernetes installation guides:
|
||||
- (Optional) [Kubernetes set up guide on Intel Gaudi product](https://github.com/opea-project/GenAIInfra/blob/main/README.md#setup-kubernetes-cluster)
|
||||
- Helm installation: Follow the [Helm documentation](https://helm.sh/docs/intro/install/#helm) to install Helm.
|
||||
- Setup Hugging Face Token
|
||||
|
||||
To access models and APIs from Hugging Face, set your token as environment variable.
|
||||
```bash
|
||||
export HF_TOKEN="insert-your-huggingface-token-here"
|
||||
```
|
||||
- Prepare Shared Models (Optional but Strongly Recommended)
|
||||
|
||||
Downloading models simultaneously to multiple nodes in your cluster can overload resources such as network bandwidth, memory and storage. To prevent resource exhaustion, it's recommended to preload the models in advance.
|
||||
```bash
|
||||
pip install -U "huggingface_hub[cli]"
|
||||
sudo mkdir -p /mnt/models
|
||||
sudo chmod 777 /mnt/models
|
||||
huggingface-cli download --cache-dir /mnt/models Intel/neural-chat-7b-v3-3
|
||||
export MODEL_DIR=/mnt/models
|
||||
```
|
||||
Once the models are downloaded, you can consider the following methods for sharing them across nodes:
|
||||
- Persistent Volume Claim (PVC): This is the recommended approach for production setups. For more details on using PVC, refer to [PVC](https://github.com/opea-project/GenAIInfra/blob/main/helm-charts/README.md#using-persistent-volume).
|
||||
- Local Host Path: For simpler testing, ensure that each node involved in the deployment follows the steps above to locally prepare the models. After preparing the models, use `--set global.modelUseHostPath=${MODELDIR}` in the deployment command.
|
||||
|
||||
- Label Nodes
|
||||
```base
|
||||
python deploy.py --add-label --num-nodes 2
|
||||
```
|
||||
|
||||
### Deployment Scenarios
|
||||
|
||||
The example below are based on a two-node setup. You can adjust the number of nodes by using the `--num-nodes` option.
|
||||
|
||||
By default, these commands use the `default` namespace. To specify a different namespace, use the `--namespace` flag with deploy, uninstall, and kubernetes command. Additionally, update the `namespace` field in `benchmark.yaml` before running the benchmark test.
|
||||
|
||||
For additional configuration options, run `python deploy.py --help`
|
||||
|
||||
#### Case 1: Baseline Deployment with Rerank
|
||||
|
||||
Deploy Command (with node number, Hugging Face token, model directory specified):
|
||||
```bash
|
||||
python deploy.py --hf-token $HF_TOKEN --model-dir $MODEL_DIR --num-nodes 2 --with-rerank
|
||||
```
|
||||
Uninstall Command:
|
||||
```bash
|
||||
python deploy.py --uninstall
|
||||
```
|
||||
|
||||
#### Case 2: Baseline Deployment without Rerank
|
||||
|
||||
```bash
|
||||
python deploy.py --hf-token $HFTOKEN --model-dir $MODELDIR --num-nodes 2
|
||||
```
|
||||
#### Case 3: Tuned Deployment with Rerank
|
||||
|
||||
```bash
|
||||
python deploy.py --hf-token $HFTOKEN --model-dir $MODELDIR --num-nodes 2 --with-rerank --tuned
|
||||
```
|
||||
|
||||
## Benchmark
|
||||
|
||||
### Test Configurations
|
||||
|
||||
| Key | Value |
|
||||
| -------- | ------- |
|
||||
| Workload | ChatQnA |
|
||||
| Tag | V1.1 |
|
||||
|
||||
Models configuration
|
||||
| Key | Value |
|
||||
| ---------- | ------------------ |
|
||||
| Embedding | BAAI/bge-base-en-v1.5 |
|
||||
| Reranking | BAAI/bge-reranker-base |
|
||||
| Inference | Intel/neural-chat-7b-v3-3 |
|
||||
|
||||
Benchmark parameters
|
||||
| Key | Value |
|
||||
| ---------- | ------------------ |
|
||||
| LLM input tokens | 1024 |
|
||||
| LLM output tokens | 128 |
|
||||
|
||||
Number of test requests for different scheduled node number:
|
||||
| Node count | Concurrency | Query number |
|
||||
| ----- | -------- | -------- |
|
||||
| 1 | 128 | 640 |
|
||||
| 2 | 256 | 1280 |
|
||||
| 4 | 512 | 2560 |
|
||||
|
||||
More detailed configuration can be found in configuration file [benchmark.yaml](./benchmark.yaml).
|
||||
|
||||
### Test Steps
|
||||
|
||||
Use `kubectl get pods` to confirm that all pods are `READY` before starting the test.
|
||||
|
||||
#### Upload Retrieval File
|
||||
|
||||
Before testing, upload a specified file to make sure the llm input have the token length of 1k.
|
||||
|
||||
Get files:
|
||||
|
||||
```bash
|
||||
wget https://github.com/opea-project/GenAIEval/tree/main/evals/benchmark/data/upload_file.txt
|
||||
```
|
||||
|
||||
Retrieve the `ClusterIP` of the `chatqna-data-prep` service.
|
||||
|
||||
```bash
|
||||
kubectl get svc
|
||||
```
|
||||
Expected output:
|
||||
```log
|
||||
chatqna-data-prep ClusterIP xx.xx.xx.xx <none> 6007/TCP 51m
|
||||
```
|
||||
|
||||
Use the following `cURL` command to upload file:
|
||||
|
||||
```bash
|
||||
cd GenAIEval/evals/benchmark/data
|
||||
curl -X POST "http://${cluster_ip}:6007/v1/dataprep/ingest" \
|
||||
-H "Content-Type: multipart/form-data" \
|
||||
-F "chunk_size=3800" \
|
||||
-F "files=@./upload_file.txt"
|
||||
```
|
||||
|
||||
#### Run Benchmark Test
|
||||
|
||||
Run the benchmark test using:
|
||||
```bash
|
||||
bash benchmark.sh -n 2
|
||||
```
|
||||
The `-n` argument specifies the number of test nodes. Required dependencies will be automatically installed when running the benchmark for the first time.
|
||||
|
||||
#### Data collection
|
||||
|
||||
All the test results will come to the folder `GenAIEval/evals/benchmark/benchmark_output`.
|
||||
|
||||
## Teardown
|
||||
|
||||
After completing the benchmark, use the following command to clean up the environment:
|
||||
|
||||
Remove Node Labels:
|
||||
```bash
|
||||
python deploy.py --delete-label
|
||||
```
|
||||
102
ChatQnA/benchmark/performance/kubernetes/intel/gaudi/benchmark.sh
Executable file
102
ChatQnA/benchmark/performance/kubernetes/intel/gaudi/benchmark.sh
Executable file
@@ -0,0 +1,102 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
deployment_type="k8s"
|
||||
node_number=1
|
||||
service_port=8888
|
||||
query_per_node=640
|
||||
|
||||
benchmark_tool_path="$(pwd)/GenAIEval"
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 [-d deployment_type] [-n node_number] [-i service_ip] [-p service_port]"
|
||||
echo " -d deployment_type ChatQnA deployment type, select between k8s and docker (default: k8s)"
|
||||
echo " -n node_number Test node number, required only for k8s deployment_type, (default: 1)"
|
||||
echo " -i service_ip chatqna service ip, required only for docker deployment_type"
|
||||
echo " -p service_port chatqna service port, required only for docker deployment_type, (default: 8888)"
|
||||
exit 1
|
||||
}
|
||||
|
||||
while getopts ":d:n:i:p:" opt; do
|
||||
case ${opt} in
|
||||
d )
|
||||
deployment_type=$OPTARG
|
||||
;;
|
||||
n )
|
||||
node_number=$OPTARG
|
||||
;;
|
||||
i )
|
||||
service_ip=$OPTARG
|
||||
;;
|
||||
p )
|
||||
service_port=$OPTARG
|
||||
;;
|
||||
\? )
|
||||
echo "Invalid option: -$OPTARG" 1>&2
|
||||
usage
|
||||
;;
|
||||
: )
|
||||
echo "Invalid option: -$OPTARG requires an argument" 1>&2
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ "$deployment_type" == "docker" && -z "$service_ip" ]]; then
|
||||
echo "Error: service_ip is required for docker deployment_type" 1>&2
|
||||
usage
|
||||
fi
|
||||
|
||||
if [[ "$deployment_type" == "k8s" && ( -n "$service_ip" || -n "$service_port" ) ]]; then
|
||||
echo "Warning: service_ip and service_port are ignored for k8s deployment_type" 1>&2
|
||||
fi
|
||||
|
||||
function main() {
|
||||
if [[ ! -d ${benchmark_tool_path} ]]; then
|
||||
echo "Benchmark tool not found, setting up..."
|
||||
setup_env
|
||||
fi
|
||||
run_benchmark
|
||||
}
|
||||
|
||||
function setup_env() {
|
||||
git clone https://github.com/opea-project/GenAIEval.git
|
||||
pushd ${benchmark_tool_path}
|
||||
python3 -m venv stress_venv
|
||||
source stress_venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
popd
|
||||
}
|
||||
|
||||
function run_benchmark() {
|
||||
source ${benchmark_tool_path}/stress_venv/bin/activate
|
||||
export DEPLOYMENT_TYPE=${deployment_type}
|
||||
export SERVICE_IP=${service_ip:-"None"}
|
||||
export SERVICE_PORT=${service_port:-"None"}
|
||||
export LOAD_SHAPE=${load_shape:-"constant"}
|
||||
export CONCURRENT_LEVEL=${concurrent_level:-5}
|
||||
export ARRIVAL_RATE=${arrival_rate:-1.0}
|
||||
if [[ -z $USER_QUERIES ]]; then
|
||||
user_query=$((query_per_node*node_number))
|
||||
export USER_QUERIES="[${user_query}, ${user_query}, ${user_query}, ${user_query}]"
|
||||
echo "USER_QUERIES not configured, setting to: ${USER_QUERIES}."
|
||||
fi
|
||||
export WARMUP=$(echo $USER_QUERIES | sed -e 's/[][]//g' -e 's/,.*//')
|
||||
if [[ -z $WARMUP ]]; then export WARMUP=0; fi
|
||||
if [[ -z $TEST_OUTPUT_DIR ]]; then
|
||||
if [[ $DEPLOYMENT_TYPE == "k8s" ]]; then
|
||||
export TEST_OUTPUT_DIR="${benchmark_tool_path}/evals/benchmark/benchmark_output/node_${node_number}"
|
||||
else
|
||||
export TEST_OUTPUT_DIR="${benchmark_tool_path}/evals/benchmark/benchmark_output/docker"
|
||||
fi
|
||||
echo "TEST_OUTPUT_DIR not configured, setting to: ${TEST_OUTPUT_DIR}."
|
||||
fi
|
||||
|
||||
envsubst < ./benchmark.yaml > ${benchmark_tool_path}/evals/benchmark/benchmark.yaml
|
||||
cd ${benchmark_tool_path}/evals/benchmark
|
||||
python benchmark.py
|
||||
}
|
||||
|
||||
main
|
||||
@@ -0,0 +1,68 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
test_suite_config: # Overall configuration settings for the test suite
|
||||
examples: ["chatqna"] # The specific test cases being tested, e.g., chatqna, codegen, codetrans, faqgen, audioqna, visualqna
|
||||
deployment_type: ${DEPLOYMENT_TYPE} # Default is "k8s", can also be "docker"
|
||||
service_ip: ${SERVICE_IP} # Leave as None for k8s, specify for Docker
|
||||
service_port: ${SERVICE_PORT} # Leave as None for k8s, specify for Docker
|
||||
warm_ups: ${WARMUP} # Number of test requests for warm-up
|
||||
run_time: 60m # The max total run time for the test suite
|
||||
seed: # The seed for all RNGs
|
||||
user_queries: ${USER_QUERIES} # Number of test requests at each concurrency level
|
||||
query_timeout: 120 # Number of seconds to wait for a simulated user to complete any executing task before exiting. 120 sec by defeult.
|
||||
random_prompt: false # Use random prompts if true, fixed prompts if false
|
||||
collect_service_metric: false # Collect service metrics if true, do not collect service metrics if false
|
||||
data_visualization: false # Generate data visualization if true, do not generate data visualization if false
|
||||
llm_model: "Intel/neural-chat-7b-v3-3" # The LLM model used for the test
|
||||
test_output_dir: "${TEST_OUTPUT_DIR}" # The directory to store the test output
|
||||
load_shape: # Tenant concurrency pattern
|
||||
name: ${LOAD_SHAPE} # poisson or constant(locust default load shape)
|
||||
params: # Loadshape-specific parameters
|
||||
constant: # Constant load shape specific parameters, activate only if load_shape.name is constant
|
||||
concurrent_level: ${CONCURRENT_LEVEL} # If user_queries is specified, concurrent_level is target number of requests per user. If not, it is the number of simulated users
|
||||
poisson: # Poisson load shape specific parameters, activate only if load_shape.name is poisson
|
||||
arrival_rate: ${ARRIVAL_RATE} # Request arrival rate
|
||||
|
||||
test_cases:
|
||||
chatqna:
|
||||
embedding:
|
||||
run_test: false
|
||||
service_name: "chatqna-embedding-usvc" # Replace with your service name
|
||||
embedserve:
|
||||
run_test: false
|
||||
service_name: "chatqna-tei" # Replace with your service name
|
||||
retriever:
|
||||
run_test: false
|
||||
service_name: "chatqna-retriever-usvc" # Replace with your service name
|
||||
parameters:
|
||||
search_type: "similarity"
|
||||
k: 1
|
||||
fetch_k: 20
|
||||
lambda_mult: 0.5
|
||||
score_threshold: 0.2
|
||||
reranking:
|
||||
run_test: false
|
||||
service_name: "chatqna-reranking-usvc" # Replace with your service name
|
||||
parameters:
|
||||
top_n: 1
|
||||
rerankserve:
|
||||
run_test: false
|
||||
service_name: "chatqna-teirerank" # Replace with your service name
|
||||
llm:
|
||||
run_test: false
|
||||
service_name: "chatqna-llm-uservice" # Replace with your service name
|
||||
parameters:
|
||||
max_tokens: 128
|
||||
temperature: 0.01
|
||||
top_k: 10
|
||||
top_p: 0.95
|
||||
repetition_penalty: 1.03
|
||||
stream: true
|
||||
llmserve:
|
||||
run_test: false
|
||||
service_name: "chatqna-tgi" # Replace with your service name
|
||||
e2e:
|
||||
run_test: true
|
||||
service_name: "chatqna" # Replace with your service name
|
||||
k: 1
|
||||
278
ChatQnA/benchmark/performance/kubernetes/intel/gaudi/deploy.py
Normal file
278
ChatQnA/benchmark/performance/kubernetes/intel/gaudi/deploy.py
Normal file
@@ -0,0 +1,278 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import argparse
|
||||
import glob
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from generate_helm_values import generate_helm_values
|
||||
|
||||
|
||||
def run_kubectl_command(command):
|
||||
"""Run a kubectl command and return the output."""
|
||||
try:
|
||||
result = subprocess.run(command, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
||||
return result.stdout
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Error running command: {command}\n{e.stderr}")
|
||||
exit(1)
|
||||
|
||||
|
||||
def get_all_nodes():
|
||||
"""Get the list of all nodes in the Kubernetes cluster."""
|
||||
command = ["kubectl", "get", "nodes", "-o", "json"]
|
||||
output = run_kubectl_command(command)
|
||||
nodes = json.loads(output)
|
||||
return [node["metadata"]["name"] for node in nodes["items"]]
|
||||
|
||||
|
||||
def add_label_to_node(node_name, label):
|
||||
"""Add a label to the specified node."""
|
||||
command = ["kubectl", "label", "node", node_name, label, "--overwrite"]
|
||||
print(f"Labeling node {node_name} with {label}...")
|
||||
run_kubectl_command(command)
|
||||
print(f"Label {label} added to node {node_name} successfully.")
|
||||
|
||||
|
||||
def add_labels_to_nodes(node_count=None, label=None, node_names=None):
|
||||
"""Add a label to the specified number of nodes or to specified nodes."""
|
||||
|
||||
if node_names:
|
||||
# Add label to the specified nodes
|
||||
for node_name in node_names:
|
||||
add_label_to_node(node_name, label)
|
||||
else:
|
||||
# Fetch the node list and label the specified number of nodes
|
||||
all_nodes = get_all_nodes()
|
||||
if node_count is None or node_count > len(all_nodes):
|
||||
print(f"Error: Node count exceeds the number of available nodes ({len(all_nodes)} available).")
|
||||
sys.exit(1)
|
||||
|
||||
selected_nodes = all_nodes[:node_count]
|
||||
for node_name in selected_nodes:
|
||||
add_label_to_node(node_name, label)
|
||||
|
||||
|
||||
def clear_labels_from_nodes(label, node_names=None):
|
||||
"""Clear the specified label from specific nodes if provided, otherwise from all nodes."""
|
||||
label_key = label.split("=")[0] # Extract key from 'key=value' format
|
||||
|
||||
# If specific nodes are provided, use them; otherwise, get all nodes
|
||||
nodes_to_clear = node_names if node_names else get_all_nodes()
|
||||
|
||||
for node_name in nodes_to_clear:
|
||||
# Check if the node has the label by inspecting its metadata
|
||||
command = ["kubectl", "get", "node", node_name, "-o", "json"]
|
||||
node_info = run_kubectl_command(command)
|
||||
node_metadata = json.loads(node_info)
|
||||
|
||||
# Check if the label exists on this node
|
||||
labels = node_metadata["metadata"].get("labels", {})
|
||||
if label_key in labels:
|
||||
# Remove the label from the node
|
||||
command = ["kubectl", "label", "node", node_name, f"{label_key}-"]
|
||||
print(f"Removing label {label_key} from node {node_name}...")
|
||||
run_kubectl_command(command)
|
||||
print(f"Label {label_key} removed from node {node_name} successfully.")
|
||||
else:
|
||||
print(f"Label {label_key} not found on node {node_name}, skipping.")
|
||||
|
||||
|
||||
def install_helm_release(release_name, chart_name, namespace, values_file, device_type):
|
||||
"""Deploy a Helm release with a specified name and chart.
|
||||
|
||||
Parameters:
|
||||
- release_name: The name of the Helm release.
|
||||
- chart_name: The Helm chart name or path, e.g., "opea/chatqna".
|
||||
- namespace: The Kubernetes namespace for deployment.
|
||||
- values_file: The user values file for deployment.
|
||||
- device_type: The device type (e.g., "gaudi") for specific configurations (optional).
|
||||
"""
|
||||
|
||||
# Check if the namespace exists; if not, create it
|
||||
try:
|
||||
# Check if the namespace exists
|
||||
command = ["kubectl", "get", "namespace", namespace]
|
||||
subprocess.run(command, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
||||
except subprocess.CalledProcessError:
|
||||
# Namespace does not exist, create it
|
||||
print(f"Namespace '{namespace}' does not exist. Creating it...")
|
||||
command = ["kubectl", "create", "namespace", namespace]
|
||||
subprocess.run(command, check=True)
|
||||
print(f"Namespace '{namespace}' created successfully.")
|
||||
|
||||
# Handle gaudi-specific values file if device_type is "gaudi"
|
||||
hw_values_file = None
|
||||
untar_dir = None
|
||||
if device_type == "gaudi":
|
||||
print("Device type is gaudi. Pulling Helm chart to get gaudi-values.yaml...")
|
||||
|
||||
# Combine chart_name with fixed prefix
|
||||
chart_pull_url = f"oci://ghcr.io/opea-project/charts/{chart_name}"
|
||||
|
||||
# Pull and untar the chart
|
||||
subprocess.run(["helm", "pull", chart_pull_url, "--untar"], check=True)
|
||||
|
||||
# Find the untarred directory
|
||||
untar_dirs = glob.glob(f"{chart_name}*")
|
||||
if untar_dirs:
|
||||
untar_dir = untar_dirs[0]
|
||||
hw_values_file = os.path.join(untar_dir, "gaudi-values.yaml")
|
||||
print("gaudi-values.yaml pulled and ready for use.")
|
||||
else:
|
||||
print(f"Error: Could not find untarred directory for {chart_name}")
|
||||
return
|
||||
|
||||
# Prepare the Helm install command
|
||||
command = ["helm", "install", release_name, chart_name, "--namespace", namespace]
|
||||
|
||||
# Append additional values file for gaudi if it exists
|
||||
if hw_values_file:
|
||||
command.extend(["-f", hw_values_file])
|
||||
|
||||
# Append the main values file
|
||||
command.extend(["-f", values_file])
|
||||
|
||||
# Execute the Helm install command
|
||||
try:
|
||||
print(f"Running command: {' '.join(command)}") # Print full command for debugging
|
||||
subprocess.run(command, check=True)
|
||||
print("Deployment initiated successfully.")
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Error occurred while deploying Helm release: {e}")
|
||||
|
||||
# Cleanup: Remove the untarred directory
|
||||
if untar_dir and os.path.isdir(untar_dir):
|
||||
print(f"Removing temporary directory: {untar_dir}")
|
||||
shutil.rmtree(untar_dir)
|
||||
print("Temporary directory removed successfully.")
|
||||
|
||||
|
||||
def uninstall_helm_release(release_name, namespace=None):
|
||||
"""Uninstall a Helm release and clean up resources, optionally delete the namespace if not 'default'."""
|
||||
# Default to 'default' namespace if none is specified
|
||||
if not namespace:
|
||||
namespace = "default"
|
||||
|
||||
try:
|
||||
# Uninstall the Helm release
|
||||
command = ["helm", "uninstall", release_name, "--namespace", namespace]
|
||||
print(f"Uninstalling Helm release {release_name} in namespace {namespace}...")
|
||||
run_kubectl_command(command)
|
||||
print(f"Helm release {release_name} uninstalled successfully.")
|
||||
|
||||
# If the namespace is specified and not 'default', delete it
|
||||
if namespace != "default":
|
||||
print(f"Deleting namespace {namespace}...")
|
||||
delete_namespace_command = ["kubectl", "delete", "namespace", namespace]
|
||||
run_kubectl_command(delete_namespace_command)
|
||||
print(f"Namespace {namespace} deleted successfully.")
|
||||
else:
|
||||
print("Namespace is 'default', skipping deletion.")
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Error occurred while uninstalling Helm release or deleting namespace: {e}")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Manage Helm Deployment.")
|
||||
parser.add_argument(
|
||||
"--release-name",
|
||||
type=str,
|
||||
default="chatqna",
|
||||
help="The Helm release name created during deployment (default: chatqna).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--chart-name",
|
||||
type=str,
|
||||
default="chatqna",
|
||||
help="The chart name to deploy, composed of repo name and chart name (default: chatqna).",
|
||||
)
|
||||
parser.add_argument("--namespace", default="default", help="Kubernetes namespace (default: default).")
|
||||
parser.add_argument("--hf-token", help="Hugging Face API token.")
|
||||
parser.add_argument(
|
||||
"--model-dir", help="Model directory, mounted as volumes for service access to pre-downloaded models"
|
||||
)
|
||||
parser.add_argument("--user-values", help="Path to a user-specified values.yaml file.")
|
||||
parser.add_argument(
|
||||
"--create-values-only", action="store_true", help="Only create the values.yaml file without deploying."
|
||||
)
|
||||
parser.add_argument("--uninstall", action="store_true", help="Uninstall the Helm release.")
|
||||
parser.add_argument("--num-nodes", type=int, default=1, help="Number of nodes to use (default: 1).")
|
||||
parser.add_argument("--node-names", nargs="*", help="Optional specific node names to label.")
|
||||
parser.add_argument("--add-label", action="store_true", help="Add label to specified nodes if this flag is set.")
|
||||
parser.add_argument(
|
||||
"--delete-label", action="store_true", help="Delete label from specified nodes if this flag is set."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--label", default="node-type=opea-benchmark", help="Label to add/delete (default: node-type=opea-benchmark)."
|
||||
)
|
||||
parser.add_argument("--with-rerank", action="store_true", help="Include rerank service in the deployment.")
|
||||
parser.add_argument(
|
||||
"--tuned",
|
||||
action="store_true",
|
||||
help="Modify resources for services and change extraCmdArgs when creating values.yaml.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--device-type",
|
||||
type=str,
|
||||
choices=["cpu", "gaudi"],
|
||||
default="gaudi",
|
||||
help="Specify the device type for deployment (choices: 'cpu', 'gaudi'; default: gaudi).",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Adjust num-nodes based on node-names if specified
|
||||
if args.node_names:
|
||||
num_node_names = len(args.node_names)
|
||||
if args.num_nodes != 1 and args.num_nodes != num_node_names:
|
||||
parser.error("--num-nodes must match the number of --node-names if both are specified.")
|
||||
else:
|
||||
args.num_nodes = num_node_names
|
||||
|
||||
# Node labeling management
|
||||
if args.add_label:
|
||||
add_labels_to_nodes(args.num_nodes, args.label, args.node_names)
|
||||
return
|
||||
elif args.delete_label:
|
||||
clear_labels_from_nodes(args.label, args.node_names)
|
||||
return
|
||||
|
||||
# Uninstall Helm release if specified
|
||||
if args.uninstall:
|
||||
uninstall_helm_release(args.release_name, args.namespace)
|
||||
return
|
||||
|
||||
# Prepare values.yaml if not uninstalling
|
||||
if args.user_values:
|
||||
values_file_path = args.user_values
|
||||
else:
|
||||
if not args.hf_token:
|
||||
parser.error("--hf-token are required")
|
||||
node_selector = {args.label.split("=")[0]: args.label.split("=")[1]}
|
||||
values_file_path = generate_helm_values(
|
||||
with_rerank=args.with_rerank,
|
||||
num_nodes=args.num_nodes,
|
||||
hf_token=args.hf_token,
|
||||
model_dir=args.model_dir,
|
||||
node_selector=node_selector,
|
||||
tune=args.tuned,
|
||||
)
|
||||
|
||||
# Read back the generated YAML file for verification
|
||||
with open(values_file_path, "r") as file:
|
||||
print("Generated YAML contents:")
|
||||
print(file.read())
|
||||
|
||||
# Deploy unless --create-values-only is specified
|
||||
if not args.create_values_only:
|
||||
install_helm_release(args.release_name, args.chart_name, args.namespace, values_file_path, args.device_type)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,164 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import os
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
def generate_helm_values(with_rerank, num_nodes, hf_token, model_dir, node_selector=None, tune=False):
|
||||
"""Create a values.yaml file based on the provided configuration."""
|
||||
|
||||
# Log the received parameters
|
||||
print("Received parameters:")
|
||||
print(f"with_rerank: {with_rerank}")
|
||||
print(f"num_nodes: {num_nodes}")
|
||||
print(f"node_selector: {node_selector}") # Log the node_selector
|
||||
print(f"tune: {tune}")
|
||||
|
||||
if node_selector is None:
|
||||
node_selector = {}
|
||||
|
||||
# Construct the base values dictionary
|
||||
values = {
|
||||
"tei": {"nodeSelector": {key: value for key, value in node_selector.items()}},
|
||||
"tgi": {"nodeSelector": {key: value for key, value in node_selector.items()}},
|
||||
"data-prep": {"nodeSelector": {key: value for key, value in node_selector.items()}},
|
||||
"redis-vector-db": {"nodeSelector": {key: value for key, value in node_selector.items()}},
|
||||
"retriever-usvc": {"nodeSelector": {key: value for key, value in node_selector.items()}},
|
||||
"chatqna-ui": {"nodeSelector": {key: value for key, value in node_selector.items()}},
|
||||
"global": {
|
||||
"HUGGINGFACEHUB_API_TOKEN": hf_token, # Use passed token
|
||||
"modelUseHostPath": model_dir, # Use passed model directory
|
||||
},
|
||||
"nodeSelector": {key: value for key, value in node_selector.items()},
|
||||
}
|
||||
|
||||
if with_rerank:
|
||||
values["teirerank"] = {"nodeSelector": {key: value for key, value in node_selector.items()}}
|
||||
else:
|
||||
values["image"] = {"repository": "opea/chatqna-without-rerank"}
|
||||
values["teirerank"] = {"enabled": False}
|
||||
|
||||
default_replicas = [
|
||||
{"name": "chatqna", "replicaCount": 2},
|
||||
{"name": "tei", "replicaCount": 1},
|
||||
{"name": "teirerank", "replicaCount": 1} if with_rerank else None,
|
||||
{"name": "tgi", "replicaCount": 7 if with_rerank else 8},
|
||||
{"name": "data-prep", "replicaCount": 1},
|
||||
{"name": "redis-vector-db", "replicaCount": 1},
|
||||
{"name": "retriever-usvc", "replicaCount": 2},
|
||||
]
|
||||
|
||||
if num_nodes > 1:
|
||||
# Scale replicas based on number of nodes
|
||||
replicas = [
|
||||
{"name": "chatqna", "replicaCount": 1 * num_nodes},
|
||||
{"name": "tei", "replicaCount": 1 * num_nodes},
|
||||
{"name": "teirerank", "replicaCount": 1} if with_rerank else None,
|
||||
{"name": "tgi", "replicaCount": (8 * num_nodes - 1) if with_rerank else 8 * num_nodes},
|
||||
{"name": "data-prep", "replicaCount": 1},
|
||||
{"name": "redis-vector-db", "replicaCount": 1},
|
||||
{"name": "retriever-usvc", "replicaCount": 1 * num_nodes},
|
||||
]
|
||||
else:
|
||||
replicas = default_replicas
|
||||
|
||||
# Remove None values for rerank disabled
|
||||
replicas = [r for r in replicas if r]
|
||||
|
||||
# Update values.yaml with replicas
|
||||
for replica in replicas:
|
||||
service_name = replica["name"]
|
||||
if service_name == "chatqna":
|
||||
values["replicaCount"] = replica["replicaCount"]
|
||||
print(replica["replicaCount"])
|
||||
elif service_name in values:
|
||||
values[service_name]["replicaCount"] = replica["replicaCount"]
|
||||
|
||||
# Prepare resource configurations based on tuning
|
||||
resources = []
|
||||
if tune:
|
||||
resources = [
|
||||
{
|
||||
"name": "chatqna",
|
||||
"resources": {
|
||||
"limits": {"cpu": "16", "memory": "8000Mi"},
|
||||
"requests": {"cpu": "16", "memory": "8000Mi"},
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "tei",
|
||||
"resources": {
|
||||
"limits": {"cpu": "80", "memory": "20000Mi"},
|
||||
"requests": {"cpu": "80", "memory": "20000Mi"},
|
||||
},
|
||||
},
|
||||
{"name": "teirerank", "resources": {"limits": {"habana.ai/gaudi": 1}}} if with_rerank else None,
|
||||
{"name": "tgi", "resources": {"limits": {"habana.ai/gaudi": 1}}},
|
||||
{"name": "retriever-usvc", "resources": {"requests": {"cpu": "8", "memory": "8000Mi"}}},
|
||||
]
|
||||
|
||||
# Filter out any None values directly as part of initialization
|
||||
resources = [r for r in resources if r is not None]
|
||||
|
||||
# Add resources for each service if tuning
|
||||
for resource in resources:
|
||||
service_name = resource["name"]
|
||||
if service_name == "chatqna":
|
||||
values["resources"] = resource["resources"]
|
||||
elif service_name in values:
|
||||
values[service_name]["resources"] = resource["resources"]
|
||||
|
||||
# Add extraCmdArgs for tgi service with default values
|
||||
if "tgi" in values:
|
||||
values["tgi"]["extraCmdArgs"] = [
|
||||
"--max-input-length",
|
||||
"1280",
|
||||
"--max-total-tokens",
|
||||
"2048",
|
||||
"--max-batch-total-tokens",
|
||||
"65536",
|
||||
"--max-batch-prefill-tokens",
|
||||
"4096",
|
||||
]
|
||||
|
||||
yaml_string = yaml.dump(values, default_flow_style=False)
|
||||
|
||||
# Determine the mode based on the 'tune' parameter
|
||||
mode = "tuned" if tune else "oob"
|
||||
|
||||
# Determine the filename based on 'with_rerank' and 'num_nodes'
|
||||
if with_rerank:
|
||||
filename = f"{mode}-{num_nodes}-gaudi-with-rerank-values.yaml"
|
||||
else:
|
||||
filename = f"{mode}-{num_nodes}-gaudi-without-rerank-values.yaml"
|
||||
|
||||
# Write the YAML data to the file
|
||||
with open(filename, "w") as file:
|
||||
file.write(yaml_string)
|
||||
|
||||
# Get the current working directory and construct the file path
|
||||
current_dir = os.getcwd()
|
||||
filepath = os.path.join(current_dir, filename)
|
||||
|
||||
print(f"YAML file {filepath} has been generated.")
|
||||
return filepath # Optionally return the file path
|
||||
|
||||
|
||||
# Main execution for standalone use of create_values_yaml
|
||||
if __name__ == "__main__":
|
||||
# Example values for standalone execution
|
||||
with_rerank = True
|
||||
num_nodes = 2
|
||||
hftoken = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
|
||||
modeldir = "/mnt/model"
|
||||
node_selector = {"node-type": "opea-benchmark"}
|
||||
tune = True
|
||||
|
||||
filename = generate_helm_values(with_rerank, num_nodes, hftoken, modeldir, node_selector, tune)
|
||||
|
||||
# Read back the generated YAML file for verification
|
||||
with open(filename, "r") as file:
|
||||
print("Generated YAML contents:")
|
||||
print(file.read())
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
deploy:
|
||||
device: gaudi
|
||||
version: 1.3.0
|
||||
version: 1.2.0
|
||||
modelUseHostPath: /mnt/models
|
||||
HUGGINGFACEHUB_API_TOKEN: "" # mandatory
|
||||
node: [1, 2, 4, 8]
|
||||
|
||||
@@ -1,90 +1,163 @@
|
||||
# Deploying ChatQnA on AMD ROCm GPU
|
||||
# Build and Deploy ChatQnA Application on AMD GPU (ROCm)
|
||||
|
||||
This document outlines the single node deployment process for a ChatQnA application utilizing the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservices on Intel Xeon server and AMD GPU. The steps include pulling Docker images, container deployment via Docker Compose, and service execution using microservices `llm`.
|
||||
## Build Docker Images
|
||||
|
||||
Note: The default LLM is `meta-llama/Meta-Llama-3-8B-Instruct`. Before deploying the application, please make sure either you've requested and been granted the access to it on [Huggingface](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) or you've downloaded the model locally from [ModelScope](https://www.modelscope.cn/models).
|
||||
### 1. Build Docker Image
|
||||
|
||||
## Table of Contents
|
||||
- #### Create application install directory and go to it:
|
||||
|
||||
1. [ChatQnA Quick Start Deployment](#chatqna-quick-start-deployment)
|
||||
2. [ChatQnA Docker Compose Files](#chatqna-docker-compose-files)
|
||||
3. [Validate Microservices](#validate-microservices)
|
||||
4. [Conclusion](#conclusion)
|
||||
```bash
|
||||
mkdir ~/chatqna-install && cd chatqna-install
|
||||
```
|
||||
|
||||
## ChatQnA Quick Start Deployment
|
||||
- #### Clone the repository GenAIExamples (the default repository branch "main" is used here):
|
||||
|
||||
This section describes how to quickly deploy and test the ChatQnA service manually on an AMD ROCm GPU. The basic steps are:
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIExamples.git
|
||||
```
|
||||
|
||||
1. [Access the Code](#access-the-code)
|
||||
2. [Configure the Deployment Environment](#configure-the-deployment-environment)
|
||||
3. [Deploy the Services Using Docker Compose](#deploy-the-services-using-docker-compose)
|
||||
4. [Check the Deployment Status](#check-the-deployment-status)
|
||||
5. [Validate the Pipeline](#validate-the-pipeline)
|
||||
6. [Cleanup the Deployment](#cleanup-the-deployment)
|
||||
If you need to use a specific branch/tag of the GenAIExamples repository, then (v1.3 replace with its own value):
|
||||
|
||||
### Access the Code
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIExamples.git && cd GenAIExamples && git checkout v1.3
|
||||
```
|
||||
|
||||
Clone the GenAIExample repository and access the ChatQnA AMD ROCm GPU platform Docker Compose files and supporting scripts:
|
||||
We remind you that when using a specific version of the code, you need to use the README from this version:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIExamples.git
|
||||
cd GenAIExamples/ChatQnA
|
||||
```
|
||||
- #### Go to build directory:
|
||||
|
||||
Then checkout a released version, such as v1.3:
|
||||
```bash
|
||||
cd ~/chatqna-install/GenAIExamples/ChatQnA/docker_image_build
|
||||
```
|
||||
|
||||
```bash
|
||||
git checkout v1.3
|
||||
```
|
||||
- Cleaning up the GenAIComps repository if it was previously cloned in this directory.
|
||||
This is necessary if the build was performed earlier and the GenAIComps folder exists and is not empty:
|
||||
|
||||
### Configure the Deployment Environment
|
||||
```bash
|
||||
echo Y | rm -R GenAIComps
|
||||
```
|
||||
|
||||
To set up environment variables for deploying ChatQnA services, set up some parameters specific to the deployment environment and source the `set_env_*.sh` script in this directory:
|
||||
- #### Clone the repository GenAIComps (the default repository branch "main" is used here):
|
||||
|
||||
- if used vLLM - set_env_vllm.sh
|
||||
- if used vLLM with FaqGen - set_env_faqgen_vllm.sh
|
||||
- if used TGI - set_env.sh
|
||||
- if used TGI with FaqGen - set_env_faqgen.sh
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git
|
||||
```
|
||||
|
||||
Set the values of the variables:
|
||||
If you use a specific tag of the GenAIExamples repository,
|
||||
then you should also use the corresponding tag for GenAIComps. (v1.3 replace with its own value):
|
||||
|
||||
- **HOST_IP, HOST_IP_EXTERNAL** - These variables are used to configure the name/address of the service in the operating system environment for the application services to interact with each other and with the outside world.
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout v1.3
|
||||
```
|
||||
|
||||
If your server uses only an internal address and is not accessible from the Internet, then the values for these two variables will be the same and the value will be equal to the server's internal name/address.
|
||||
We remind you that when using a specific version of the code, you need to use the README from this version.
|
||||
|
||||
If your server uses only an external, Internet-accessible address, then the values for these two variables will be the same and the value will be equal to the server's external name/address.
|
||||
- #### Setting the list of images for the build (from the build file.yaml)
|
||||
|
||||
If your server is located on an internal network, has an internal address, but is accessible from the Internet via a proxy/firewall/load balancer, then the HOST_IP variable will have a value equal to the internal name/address of the server, and the EXTERNAL_HOST_IP variable will have a value equal to the external name/address of the proxy/firewall/load balancer behind which the server is located.
|
||||
If you want to deploy a vLLM-based or TGI-based application, then the set of services is installed as follows:
|
||||
|
||||
We set these values in the file set_env\*\*\*\*.sh
|
||||
#### vLLM-based application
|
||||
|
||||
- **Variables with names like "**\*\*\*\*\*\*\_PORT"\*\* - These variables set the IP port numbers for establishing network connections to the application services.
|
||||
The values shown in the file set_env.sh or set_env_vllm.sh they are the values used for the development and testing of the application, as well as configured for the environment in which the development is performed. These values must be configured in accordance with the rules of network access to your environment's server, and must not overlap with the IP ports of other applications that are already in use.
|
||||
```bash
|
||||
service_list="dataprep retriever vllm-rocm chatqna chatqna-ui nginx"
|
||||
```
|
||||
|
||||
Setting variables in the operating system environment:
|
||||
#### vLLM-based application with FaqGen
|
||||
|
||||
```bash
|
||||
export HUGGINGFACEHUB_API_TOKEN="Your_HuggingFace_API_Token"
|
||||
source ./set_env_*.sh # replace the script name with the appropriate one
|
||||
```
|
||||
```bash
|
||||
service_list="dataprep retriever vllm-rocm llm-faqgen chatqna chatqna-ui nginx"
|
||||
```
|
||||
|
||||
Consult the section on [ChatQnA Service configuration](#chatqna-configuration) for information on how service specific configuration parameters affect deployments.
|
||||
#### TGI-based application
|
||||
|
||||
### Deploy the Services Using Docker Compose
|
||||
```bash
|
||||
service_list="dataprep retriever chatqna chatqna-ui nginx"
|
||||
```
|
||||
|
||||
To deploy the ChatQnA services, execute the `docker compose up` command with the appropriate arguments. For a default deployment with TGI, execute the command below. It uses the 'compose.yaml' file.
|
||||
#### TGI-based application with FaqGen
|
||||
|
||||
```bash
|
||||
cd docker_compose/amd/gpu/rocm
|
||||
# if used TGI
|
||||
docker compose -f compose.yaml up -d
|
||||
# if used TGI with FaqGen
|
||||
# docker compose -f compose_faqgen.yaml up -d
|
||||
# if used vLLM
|
||||
# docker compose -f compose_vllm.yaml up -d
|
||||
# if used vLLM with FaqGen
|
||||
# docker compose -f compose_faqgen_vllm.yaml up -d
|
||||
```
|
||||
```bash
|
||||
service_list="dataprep retriever llm-faqgen chatqna chatqna-ui nginx"
|
||||
```
|
||||
|
||||
- #### Pull Docker Images
|
||||
|
||||
```bash
|
||||
docker pull redis/redis-stack:7.2.0-v9
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
```
|
||||
|
||||
- #### Optional. Pull TGI Docker Image (Do this if you want to use TGI)
|
||||
|
||||
```bash
|
||||
docker pull ghcr.io/huggingface/text-generation-inference:2.3.1-rocm
|
||||
```
|
||||
|
||||
- #### Build Docker Images
|
||||
|
||||
```bash
|
||||
docker compose -f build.yaml build ${service_list} --no-cache
|
||||
```
|
||||
|
||||
After the build, we check the list of images with the command:
|
||||
|
||||
```bash
|
||||
docker image ls
|
||||
```
|
||||
|
||||
The list of images should include:
|
||||
|
||||
##### vLLM-based application:
|
||||
|
||||
- redis/redis-stack:7.2.0-v9
|
||||
- opea/dataprep:latest
|
||||
- ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
- opea/retriever:latest
|
||||
- opea/vllm-rocm:latest
|
||||
- opea/chatqna:latest
|
||||
- opea/chatqna-ui:latest
|
||||
- opea/nginx:latest
|
||||
|
||||
##### vLLM-based application with FaqGen:
|
||||
|
||||
- redis/redis-stack:7.2.0-v9
|
||||
- opea/dataprep:latest
|
||||
- ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
- opea/retriever:latest
|
||||
- opea/vllm-rocm:latest
|
||||
- opea/llm-faqgen:latest
|
||||
- opea/chatqna:latest
|
||||
- opea/chatqna-ui:latest
|
||||
- opea/nginx:latest
|
||||
|
||||
##### TGI-based application:
|
||||
|
||||
- redis/redis-stack:7.2.0-v9
|
||||
- opea/dataprep:latest
|
||||
- ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
- opea/retriever:latest
|
||||
- ghcr.io/huggingface/text-generation-inference:2.3.1-rocm
|
||||
- opea/chatqna:latest
|
||||
- opea/chatqna-ui:latest
|
||||
- opea/nginx:latest
|
||||
|
||||
##### TGI-based application with FaqGen:
|
||||
|
||||
- redis/redis-stack:7.2.0-v9
|
||||
- opea/dataprep:latest
|
||||
- ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
- opea/retriever:latest
|
||||
- ghcr.io/huggingface/text-generation-inference:2.3.1-rocm
|
||||
- opea/llm-faqgen:latest
|
||||
- opea/chatqna:latest
|
||||
- opea/chatqna-ui:latest
|
||||
- opea/nginx:latest
|
||||
|
||||
---
|
||||
|
||||
## Deploy the ChatQnA Application
|
||||
|
||||
### Docker Compose Configuration for AMD GPUs
|
||||
|
||||
To enable GPU support for AMD GPUs, the following configuration is added to the Docker Compose file:
|
||||
|
||||
@@ -125,103 +198,332 @@ security_opt:
|
||||
**How to Identify GPU Device IDs:**
|
||||
Use AMD GPU driver utilities to determine the correct `cardN` and `renderN` IDs for your GPU.
|
||||
|
||||
> **Note**: developers should build docker image from source when:
|
||||
>
|
||||
> - Developing off the git main branch (as the container's ports in the repo may be different > from the published docker image).
|
||||
> - Unable to download the docker image.
|
||||
> - Use a specific version of Docker image.
|
||||
### Set deploy environment variables
|
||||
|
||||
Please refer to the table below to build different microservices from source:
|
||||
#### Setting variables in the operating system environment:
|
||||
|
||||
| Microservice | Deployment Guide |
|
||||
| --------------- | ------------------------------------------------------------------------------------------------------------------ |
|
||||
| vLLM | [vLLM build guide](https://github.com/opea-project/GenAIComps/tree/main/comps/third_parties/vllm#build-docker) |
|
||||
| TGI | [TGI project](https://github.com/huggingface/text-generation-inference.git) |
|
||||
| LLM | [LLM build guide](https://github.com/opea-project/GenAIComps/tree/main/comps/llms) |
|
||||
| Redis Vector DB | [Redis](https://github.com/redis/redis.git) |
|
||||
| Dataprep | [Dataprep build guide](https://github.com/opea-project/GenAIComps/tree/main/comps/dataprep/src/README_redis.md) |
|
||||
| TEI Embedding | [TEI guide](https://github.com/huggingface/text-embeddings-inference.git) |
|
||||
| Retriever | [Retriever build guide](https://github.com/opea-project/GenAIComps/tree/main/comps/retrievers/src/README_redis.md) |
|
||||
| TEI Reranking | [TEI guide](https://github.com/huggingface/text-embeddings-inference.git) |
|
||||
| MegaService | [MegaService guide](../../../../README.md) |
|
||||
| UI | [UI guide](../../../../ui/react/README.md) |
|
||||
| Nginx | [Nginx guide](https://github.com/opea-project/GenAIComps/tree/main/comps/third_parties/nginx) |
|
||||
|
||||
### Check the Deployment Status
|
||||
|
||||
After running docker compose, check if all the containers launched via docker compose have started:
|
||||
##### Set variable HUGGINGFACEHUB_API_TOKEN:
|
||||
|
||||
```bash
|
||||
docker ps -a
|
||||
### Replace the string 'your_huggingfacehub_token' with your HuggingFacehub repository access token.
|
||||
export HUGGINGFACEHUB_API_TOKEN='your_huggingfacehub_token'
|
||||
```
|
||||
|
||||
For the default deployment with TGI, the following 9 containers should have started:
|
||||
#### Set variables value in set_env\*\*\*\*.sh file:
|
||||
|
||||
```
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
eaf24161aca8 opea/nginx:latest "/docker-entrypoint.…" 37 seconds ago Up 5 seconds 0.0.0.0:18104->80/tcp, [::]:18104->80/tcp chatqna-nginx-server
|
||||
2fce48a4c0f4 opea/chatqna-ui:latest "docker-entrypoint.s…" 37 seconds ago Up 5 seconds 0.0.0.0:18101->5173/tcp, [::]:18101->5173/tcp chatqna-ui-server
|
||||
613c384979f4 opea/chatqna:latest "bash entrypoint.sh" 37 seconds ago Up 5 seconds 0.0.0.0:18102->8888/tcp, [::]:18102->8888/tcp chatqna-backend-server
|
||||
05512bd29fee opea/dataprep:latest "sh -c 'python $( [ …" 37 seconds ago Up 36 seconds (healthy) 0.0.0.0:18103->5000/tcp, [::]:18103->5000/tcp chatqna-dataprep-service
|
||||
49844d339d1d opea/retriever:latest "python opea_retriev…" 37 seconds ago Up 36 seconds 0.0.0.0:7000->7000/tcp, [::]:7000->7000/tcp chatqna-retriever
|
||||
75b698fe7de0 ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 "text-embeddings-rou…" 37 seconds ago Up 36 seconds 0.0.0.0:18808->80/tcp, [::]:18808->80/tcp chatqna-tei-reranking-service
|
||||
342f01bfdbb2 ghcr.io/huggingface/text-generation-inference:2.3.1-rocm"python3 /workspace/…" 37 seconds ago Up 36 seconds 0.0.0.0:18008->8011/tcp, [::]:18008->8011/tcp chatqna-tgi-service
|
||||
6081eb1c119d redis/redis-stack:7.2.0-v9 "/entrypoint.sh" 37 seconds ago Up 36 seconds 0.0.0.0:6379->6379/tcp, [::]:6379->6379/tcp, 0.0.0.0:8001->8001/tcp, [::]:8001->8001/tcp chatqna-redis-vector-db
|
||||
eded17420782 ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 "text-embeddings-rou…" 37 seconds ago Up 36 seconds 0.0.0.0:18090->80/tcp, [::]:18090->80/tcp chatqna-tei-embedding-service
|
||||
Go to Docker Compose directory:
|
||||
|
||||
```bash
|
||||
cd ~/chatqna-install/GenAIExamples/ChatQnA/docker_compose/amd/gpu/rocm
|
||||
```
|
||||
|
||||
if used TGI with FaqGen:
|
||||
The example uses the Nano text editor. You can use any convenient text editor:
|
||||
|
||||
```
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
eaf24161aca8 opea/nginx:latest "/docker-entrypoint.…" 37 seconds ago Up 5 seconds 0.0.0.0:18104->80/tcp, [::]:18104->80/tcp chatqna-nginx-server
|
||||
2fce48a4c0f4 opea/chatqna-ui:latest "docker-entrypoint.s…" 37 seconds ago Up 5 seconds 0.0.0.0:18101->5173/tcp, [::]:18101->5173/tcp chatqna-ui-server
|
||||
613c384979f4 opea/chatqna:latest "bash entrypoint.sh" 37 seconds ago Up 5 seconds 0.0.0.0:18102->8888/tcp, [::]:18102->8888/tcp chatqna-backend-server
|
||||
e0ef1ea67640 opea/llm-faqgen:latest "bash entrypoint.sh" 37 seconds ago Up 36 seconds 0.0.0.0:18011->9000/tcp, [::]:18011->9000/tcp chatqna-llm-faqgen
|
||||
05512bd29fee opea/dataprep:latest "sh -c 'python $( [ …" 37 seconds ago Up 36 seconds (healthy) 0.0.0.0:18103->5000/tcp, [::]:18103->5000/tcp chatqna-dataprep-service
|
||||
49844d339d1d opea/retriever:latest "python opea_retriev…" 37 seconds ago Up 36 seconds 0.0.0.0:7000->7000/tcp, [::]:7000->7000/tcp chatqna-retriever
|
||||
75b698fe7de0 ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 "text-embeddings-rou…" 37 seconds ago Up 36 seconds 0.0.0.0:18808->80/tcp, [::]:18808->80/tcp chatqna-tei-reranking-service
|
||||
342f01bfdbb2 ghcr.io/huggingface/text-generation-inference:2.3.1-rocm"python3 /workspace/…" 37 seconds ago Up 36 seconds 0.0.0.0:18008->8011/tcp, [::]:18008->8011/tcp chatqna-tgi-service
|
||||
6081eb1c119d redis/redis-stack:7.2.0-v9 "/entrypoint.sh" 37 seconds ago Up 36 seconds 0.0.0.0:6379->6379/tcp, [::]:6379->6379/tcp, 0.0.0.0:8001->8001/tcp, [::]:8001->8001/tcp chatqna-redis-vector-db
|
||||
eded17420782 ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 "text-embeddings-rou…" 37 seconds ago Up 36 seconds 0.0.0.0:18090->80/tcp, [::]:18090->80/tcp chatqna-tei-embedding-service
|
||||
#### If you use vLLM based application
|
||||
|
||||
```bash
|
||||
nano set_env_vllm.sh
|
||||
```
|
||||
|
||||
if used vLLM:
|
||||
#### If you use vLLM based application with FaqGen
|
||||
|
||||
```
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
eaf24161aca8 opea/nginx:latest "/docker-entrypoint.…" 37 seconds ago Up 5 seconds 0.0.0.0:18104->80/tcp, [::]:18104->80/tcp chatqna-nginx-server
|
||||
2fce48a4c0f4 opea/chatqna-ui:latest "docker-entrypoint.s…" 37 seconds ago Up 5 seconds 0.0.0.0:18101->5173/tcp, [::]:18101->5173/tcp chatqna-ui-server
|
||||
613c384979f4 opea/chatqna:latest "bash entrypoint.sh" 37 seconds ago Up 5 seconds 0.0.0.0:18102->8888/tcp, [::]:18102->8888/tcp chatqna-backend-server
|
||||
05512bd29fee opea/dataprep:latest "sh -c 'python $( [ …" 37 seconds ago Up 36 seconds (healthy) 0.0.0.0:18103->5000/tcp, [::]:18103->5000/tcp chatqna-dataprep-service
|
||||
49844d339d1d opea/retriever:latest "python opea_retriev…" 37 seconds ago Up 36 seconds 0.0.0.0:7000->7000/tcp, [::]:7000->7000/tcp chatqna-retriever
|
||||
75b698fe7de0 ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 "text-embeddings-rou…" 37 seconds ago Up 36 seconds 0.0.0.0:18808->80/tcp, [::]:18808->80/tcp chatqna-tei-reranking-service
|
||||
342f01bfdbb2 opea/vllm-rocm:latest "python3 /workspace/…" 37 seconds ago Up 36 seconds 0.0.0.0:18008->8011/tcp, [::]:18008->8011/tcp chatqna-vllm-service
|
||||
6081eb1c119d redis/redis-stack:7.2.0-v9 "/entrypoint.sh" 37 seconds ago Up 36 seconds 0.0.0.0:6379->6379/tcp, [::]:6379->6379/tcp, 0.0.0.0:8001->8001/tcp, [::]:8001->8001/tcp chatqna-redis-vector-db
|
||||
eded17420782 ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 "text-embeddings-rou…" 37 seconds ago Up 36 seconds 0.0.0.0:18090->80/tcp, [::]:18090->80/tcp chatqna-tei-embedding-service
|
||||
```bash
|
||||
nano set_env_vllm_faqgen.sh
|
||||
```
|
||||
|
||||
if used vLLM with FaqGen:
|
||||
#### If you use TGI based application
|
||||
|
||||
```
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
eaf24161aca8 opea/nginx:latest "/docker-entrypoint.…" 37 seconds ago Up 5 seconds 0.0.0.0:18104->80/tcp, [::]:18104->80/tcp chatqna-nginx-server
|
||||
2fce48a4c0f4 opea/chatqna-ui:latest "docker-entrypoint.s…" 37 seconds ago Up 5 seconds 0.0.0.0:18101->5173/tcp, [::]:18101->5173/tcp chatqna-ui-server
|
||||
613c384979f4 opea/chatqna:latest "bash entrypoint.sh" 37 seconds ago Up 5 seconds 0.0.0.0:18102->8888/tcp, [::]:18102->8888/tcp chatqna-backend-server
|
||||
e0ef1ea67640 opea/llm-faqgen:latest "bash entrypoint.sh" 37 seconds ago Up 36 seconds 0.0.0.0:18011->9000/tcp, [::]:18011->9000/tcp chatqna-llm-faqgen
|
||||
05512bd29fee opea/dataprep:latest "sh -c 'python $( [ …" 37 seconds ago Up 36 seconds (healthy) 0.0.0.0:18103->5000/tcp, [::]:18103->5000/tcp chatqna-dataprep-service
|
||||
49844d339d1d opea/retriever:latest "python opea_retriev…" 37 seconds ago Up 36 seconds 0.0.0.0:7000->7000/tcp, [::]:7000->7000/tcp chatqna-retriever
|
||||
75b698fe7de0 ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 "text-embeddings-rou…" 37 seconds ago Up 36 seconds 0.0.0.0:18808->80/tcp, [::]:18808->80/tcp chatqna-tei-reranking-service
|
||||
342f01bfdbb2 opea/vllm-rocm:latest "python3 /workspace/…" 37 seconds ago Up 36 seconds 0.0.0.0:18008->8011/tcp, [::]:18008->8011/tcp chatqna-vllm-service
|
||||
6081eb1c119d redis/redis-stack:7.2.0-v9 "/entrypoint.sh" 37 seconds ago Up 36 seconds 0.0.0.0:6379->6379/tcp, [::]:6379->6379/tcp, 0.0.0.0:8001->8001/tcp, [::]:8001->8001/tcp chatqna-redis-vector-db
|
||||
eded17420782 ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 "text-embeddings-rou…" 37 seconds ago Up 36 seconds 0.0.0.0:18090->80/tcp, [::]:18090->80/tcp chatqna-tei-embedding-service
|
||||
```bash
|
||||
nano set_env.sh
|
||||
```
|
||||
|
||||
If any issues are encountered during deployment, refer to the [Troubleshooting](../../../../README_miscellaneous.md#troubleshooting) section.
|
||||
#### If you use TGI based application with FaqGen
|
||||
|
||||
### Validate the Pipeline
|
||||
```bash
|
||||
nano set_env_faqgen.sh
|
||||
```
|
||||
|
||||
Once the ChatQnA services are running, test the pipeline using the following command:
|
||||
If you are in a proxy environment, also set the proxy-related environment variables:
|
||||
|
||||
```bash
|
||||
export http_proxy="Your_HTTP_Proxy"
|
||||
export https_proxy="Your_HTTPs_Proxy"
|
||||
```
|
||||
|
||||
Set the values of the variables:
|
||||
|
||||
- **HOST_IP, HOST_IP_EXTERNAL** - These variables are used to configure the name/address of the service in the operating system environment for the application services to interact with each other and with the outside world.
|
||||
|
||||
If your server uses only an internal address and is not accessible from the Internet, then the values for these two variables will be the same and the value will be equal to the server's internal name/address.
|
||||
|
||||
If your server uses only an external, Internet-accessible address, then the values for these two variables will be the same and the value will be equal to the server's external name/address.
|
||||
|
||||
If your server is located on an internal network, has an internal address, but is accessible from the Internet via a proxy/firewall/load balancer, then the HOST_IP variable will have a value equal to the internal name/address of the server, and the EXTERNAL_HOST_IP variable will have a value equal to the external name/address of the proxy/firewall/load balancer behind which the server is located.
|
||||
|
||||
We set these values in the file set_env\*\*\*\*.sh
|
||||
|
||||
- **Variables with names like "**\*\*\*\*\*\*\_PORT"\*\* - These variables set the IP port numbers for establishing network connections to the application services.
|
||||
The values shown in the file set_env.sh or set_env_vllm they are the values used for the development and testing of the application, as well as configured for the environment in which the development is performed. These values must be configured in accordance with the rules of network access to your environment's server, and must not overlap with the IP ports of other applications that are already in use.
|
||||
|
||||
#### Set variables with script set_env\*\*\*\*.sh
|
||||
|
||||
#### If you use vLLM based application
|
||||
|
||||
```bash
|
||||
. set_env_vllm.sh
|
||||
```
|
||||
|
||||
#### If you use vLLM based application with FaqGen
|
||||
|
||||
```bash
|
||||
. set_env_faqgen_vllm.sh
|
||||
```
|
||||
|
||||
#### If you use TGI based application
|
||||
|
||||
```bash
|
||||
. set_env.sh
|
||||
```
|
||||
|
||||
#### If you use TGI based application with FaqGen
|
||||
|
||||
```bash
|
||||
. set_env_faqgen.sh
|
||||
```
|
||||
|
||||
### Start the services:
|
||||
|
||||
#### If you use vLLM based application
|
||||
|
||||
```bash
|
||||
docker compose -f compose_vllm.yaml up -d
|
||||
```
|
||||
|
||||
#### If you use vLLM based application with FaqGen
|
||||
|
||||
```bash
|
||||
docker compose -f compose_faqgen_vllm.yaml up -d
|
||||
```
|
||||
|
||||
#### If you use TGI based application
|
||||
|
||||
```bash
|
||||
docker compose -f compose.yaml up -d
|
||||
```
|
||||
|
||||
#### If you use TGI based application with FaqGen
|
||||
|
||||
```bash
|
||||
docker compose -f compose_faqgen.yaml up -d
|
||||
```
|
||||
|
||||
All containers should be running and should not restart:
|
||||
|
||||
##### If you use vLLM based application:
|
||||
|
||||
- chatqna-redis-vector-db
|
||||
- chatqna-dataprep-service
|
||||
- chatqna-tei-embedding-service
|
||||
- chatqna-retriever
|
||||
- chatqna-tei-reranking-service
|
||||
- chatqna-vllm-service
|
||||
- chatqna-backend-server
|
||||
- chatqna-ui-server
|
||||
- chatqna-nginx-server
|
||||
|
||||
##### If you use vLLM based application with FaqGen:
|
||||
|
||||
- chatqna-redis-vector-db
|
||||
- chatqna-dataprep-service
|
||||
- chatqna-tei-embedding-service
|
||||
- chatqna-retriever
|
||||
- chatqna-tei-reranking-service
|
||||
- chatqna-vllm-service
|
||||
- chatqna-llm-faqgen
|
||||
- chatqna-backend-server
|
||||
- chatqna-ui-server
|
||||
- chatqna-nginx-server
|
||||
|
||||
##### If you use TGI based application:
|
||||
|
||||
- chatqna-redis-vector-db
|
||||
- chatqna-dataprep-service
|
||||
- chatqna-tei-embedding-service
|
||||
- chatqna-retriever
|
||||
- chatqna-tei-reranking-service
|
||||
- chatqna-tgi-service
|
||||
- chatqna-backend-server
|
||||
- chatqna-ui-server
|
||||
- chaqna-nginx-server
|
||||
|
||||
##### If you use TGI based application with FaqGen:
|
||||
|
||||
- chatqna-redis-vector-db
|
||||
- chatqna-dataprep-service
|
||||
- chatqna-tei-embedding-service
|
||||
- chatqna-retriever
|
||||
- chatqna-tei-reranking-service
|
||||
- chatqna-tgi-service
|
||||
- chatqna-llm-faqgen
|
||||
- chatqna-backend-server
|
||||
- chatqna-ui-server
|
||||
- chaqna-nginx-server
|
||||
|
||||
---
|
||||
|
||||
## Validate the Services
|
||||
|
||||
### 1. Validate TEI Embedding Service
|
||||
|
||||
```bash
|
||||
curl http://${HOST_IP}:${CHATQNA_TEI_EMBEDDING_PORT}/embed \
|
||||
-X POST \
|
||||
-d '{"inputs":"What is Deep Learning?"}' \
|
||||
-H 'Content-Type: application/json'
|
||||
```
|
||||
|
||||
Checking the response from the service. The response should be similar to text:
|
||||
|
||||
```textmate
|
||||
[[0.00037115702,-0.06356819,0.0024758505,..................,0.022725677,0.016026087,-0.02125421,-0.02984927,-0.0049473033]]
|
||||
```
|
||||
|
||||
If the service response has a meaningful response in the value,
|
||||
then we consider the TEI Embedding Service to be successfully launched
|
||||
|
||||
### 2. Validate Retriever Microservice
|
||||
|
||||
```bash
|
||||
export your_embedding=$(python3 -c "import random; embedding = [random.uniform(-1, 1) for _ in range(768)]; print(embedding)")
|
||||
curl http://${HOST_IP}:${CHATQNA_REDIS_RETRIEVER_PORT}/v1/retrieval \
|
||||
-X POST \
|
||||
-d "{\"text\":\"test\",\"embedding\":${your_embedding}}" \
|
||||
-H 'Content-Type: application/json'
|
||||
```
|
||||
|
||||
Checking the response from the service. The response should be similar to JSON:
|
||||
|
||||
```json
|
||||
{ "id": "e191846168aed1f80b2ea12df80844d2", "retrieved_docs": [], "initial_query": "test", "top_n": 1, "metadata": [] }
|
||||
```
|
||||
|
||||
If the response corresponds to the form of the provided JSON, then we consider the
|
||||
Retriever Microservice verification successful.
|
||||
|
||||
### 3. Validate TEI Reranking Service
|
||||
|
||||
```bash
|
||||
curl http://${HOST_IP}:${CHATQNA_TEI_RERANKING_PORT}/rerank \
|
||||
-X POST \
|
||||
-d '{"query":"What is Deep Learning?", "texts": ["Deep Learning is not...", "Deep learning is..."]}' \
|
||||
-H 'Content-Type: application/json'
|
||||
```
|
||||
|
||||
Checking the response from the service. The response should be similar to JSON:
|
||||
|
||||
```json
|
||||
[
|
||||
{ "index": 1, "score": 0.94238955 },
|
||||
{ "index": 0, "score": 0.120219156 }
|
||||
]
|
||||
```
|
||||
|
||||
If the response corresponds to the form of the provided JSON, then we consider the TEI Reranking Service
|
||||
verification successful.
|
||||
|
||||
### 4. Validate the vLLM/TGI Service
|
||||
|
||||
#### If you use vLLM:
|
||||
|
||||
```bash
|
||||
DATA='{"model": "meta-llama/Meta-Llama-3-8B-Instruct", '\
|
||||
'"messages": [{"role": "user", "content": "What is a Deep Learning?"}], "max_tokens": 64}'
|
||||
|
||||
curl http://${HOST_IP}:${CHATQNA_VLLM_SERVICE_PORT}/v1/chat/completions \
|
||||
-X POST \
|
||||
-d "$DATA" \
|
||||
-H 'Content-Type: application/json'
|
||||
```
|
||||
|
||||
Checking the response from the service. The response should be similar to JSON:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "chatcmpl-91003647d1c7469a89e399958f390f67",
|
||||
"object": "chat.completion",
|
||||
"created": 1742877228,
|
||||
"model": "meta-llama/Meta-Llama-3-8B-Instruct",
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": "Deep Learning ( DL) is a subfield of Machine Learning (ML) that focuses on the design of algorithms and architectures inspired by the structure and function of the human brain. These algorithms are designed to analyze and interpret data that is presented in the form of patterns or signals, and they often mimic the way the human brain",
|
||||
"tool_calls": []
|
||||
},
|
||||
"logprobs": null,
|
||||
"finish_reason": "length",
|
||||
"stop_reason": null
|
||||
}
|
||||
],
|
||||
"usage": { "prompt_tokens": 16, "total_tokens": 80, "completion_tokens": 64, "prompt_tokens_details": null },
|
||||
"prompt_logprobs": null
|
||||
}
|
||||
```
|
||||
|
||||
If the service response has a meaningful response in the value of the "choices.message.content" key,
|
||||
then we consider the vLLM service to be successfully launched
|
||||
|
||||
#### If you use TGI:
|
||||
|
||||
```bash
|
||||
DATA='{"inputs":"What is a Deep Learning?",'\
|
||||
'"parameters":{"max_new_tokens":64,"do_sample": true}}'
|
||||
|
||||
curl http://${HOST_IP}:${CHATQNA_TGI_SERVICE_PORT}/generate \
|
||||
-X POST \
|
||||
-d "$DATA" \
|
||||
-H 'Content-Type: application/json'
|
||||
```
|
||||
|
||||
Checking the response from the service. The response should be similar to JSON:
|
||||
|
||||
```json
|
||||
{
|
||||
"generated_text": " What is its application in Computer Vision?\nWhat is a Deep Learning?\nDeep learning is a subfield of machine learning that involves the use of artificial neural networks to model high-level abstractions in data. It involves the use of deep neural networks, which are composed of multiple layers, to learn complex patterns in data. The"
|
||||
}
|
||||
```
|
||||
|
||||
If the service response has a meaningful response in the value of the "generated_text" key,
|
||||
then we consider the TGI service to be successfully launched
|
||||
|
||||
### 5. Validate the LLM Service (if your used application with FaqGen)
|
||||
|
||||
```bash
|
||||
DATA='{"messages":"Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source '\
|
||||
'text embeddings and sequence classification models. TEI enables high-performance extraction for the most '\
|
||||
'popular models, including FlagEmbedding, Ember, GTE and E5.","max_tokens": 128}'
|
||||
|
||||
curl http://${HOST_IP}:${CHATQNA_LLM_FAQGEN_PORT}/v1/faqgen \
|
||||
-X POST \
|
||||
-d "$DATA" \
|
||||
-H 'Content-Type: application/json'
|
||||
```
|
||||
|
||||
Checking the response from the service. The response should be similar to JSON:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "58f0632f5f03af31471b895b0d0d397b",
|
||||
"text": " Q: What is Text Embeddings Inference (TEI)?\n A: TEI is a toolkit for deploying and serving open source text embeddings and sequence classification models.\n\n Q: What models does TEI support?\n A: TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5.\n\n Q: What is the purpose of TEI?\n A: The purpose of TEI is to enable high-performance extraction for text embeddings and sequence classification models.\n\n Q: What are the benefits of using TEI?\n A: The benefits of using TEI include high",
|
||||
"prompt": "Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."
|
||||
}
|
||||
```
|
||||
|
||||
If the service response has a meaningful response in the value of the "text" key,
|
||||
then we consider the LLM service to be successfully launched
|
||||
|
||||
### 6. Validate the MegaService
|
||||
|
||||
```bash
|
||||
curl http://${HOST_IP}:${CHATQNA_BACKEND_SERVICE_PORT}/v1/chatqna \
|
||||
@@ -229,105 +531,91 @@ curl http://${HOST_IP}:${CHATQNA_BACKEND_SERVICE_PORT}/v1/chatqna \
|
||||
-d '{"messages": "What is the revenue of Nike in 2023?"}'
|
||||
```
|
||||
|
||||
**Note** : Access the ChatQnA UI by web browser through this URL: `http://${HOST_IP_EXTERNAL}:${CHATQNA_NGINX_PORT}`
|
||||
Checking the response from the service. The response should be similar to text:
|
||||
|
||||
### Cleanup the Deployment
|
||||
|
||||
To stop the containers associated with the deployment, execute the following command:
|
||||
|
||||
```bash
|
||||
# if used TGI
|
||||
docker compose -f compose.yaml down
|
||||
# if used TGI with FaqGen
|
||||
# docker compose -f compose_faqgen.yaml down
|
||||
# if used vLLM
|
||||
# docker compose -f compose_vllm.yaml down
|
||||
# if used vLLM with FaqGen
|
||||
# docker compose -f compose_faqgen_vllm.yaml down
|
||||
```textmate
|
||||
data: b' What'
|
||||
data: b' is'
|
||||
data: b' the'
|
||||
data: b' revenue'
|
||||
data: b' of'
|
||||
data: b' Nike'
|
||||
data: b' in'
|
||||
data: b' '
|
||||
data: b'202'
|
||||
data: b'3'
|
||||
data: b'?\n'
|
||||
data: b' '
|
||||
data: b' Answer'
|
||||
data: b':'
|
||||
data: b' According'
|
||||
data: b' to'
|
||||
data: b' the'
|
||||
data: b' search'
|
||||
data: b' results'
|
||||
data: b','
|
||||
data: b' the'
|
||||
data: b' revenue'
|
||||
data: b' of'
|
||||
data: b''
|
||||
|
||||
data: [DONE]
|
||||
|
||||
```
|
||||
|
||||
## ChatQnA Docker Compose Files
|
||||
If the output lines in the "data" keys contain words (tokens) containing meaning, then the service
|
||||
is considered launched successfully.
|
||||
|
||||
In the context of deploying an ChatQnA pipeline on an Intel® Xeon® platform, we can pick and choose different large language model serving frameworks, or single English TTS/multi-language TTS component. The table below outlines the various configurations that are available as part of the application. These configurations can be used as templates and can be extended to different components available in [GenAIComps](https://github.com/opea-project/GenAIComps.git).
|
||||
### 7. Validate the Frontend (UI)
|
||||
|
||||
| File | Description |
|
||||
| ------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ |
|
||||
| [compose.yaml](./compose.yaml) | The LLM serving framework is TGI. Default compose file using TGI as serving framework and redis as vector database |
|
||||
| [compose_faqgen.yaml](./compose_faqgen.yaml) | The LLM serving framework is TGI with FaqGen. All other configurations remain the same as the default |
|
||||
| [compose_vllm.yaml](./compose_vllm.yaml) | The LLM serving framework is vLLM. Compose file using vllm as serving framework and redis as vector database |
|
||||
| [compose_faqgen_vllm.yaml](./compose_faqgen_vllm.yaml) | The LLM serving framework is vLLM with FaqGen. Compose file using vllm as serving framework and redis as vector database |
|
||||
To access the UI, use the URL - http://${EXTERNAL_HOST_IP}:${CHATQNA_NGINX_PORT}
|
||||
A page should open when you click through to this address:
|
||||
|
||||
## Validate MicroServices
|
||||

|
||||
|
||||
1. TEI Embedding Service
|
||||
If a page of this type has opened, then we believe that the service is running and responding,
|
||||
and we can proceed to functional UI testing.
|
||||
|
||||
```bash
|
||||
curl http://${HOST_IP}:${CHATQNA_TEI_EMBEDDING_PORT}/embed \
|
||||
-X POST \
|
||||
-d '{"inputs":"What is Deep Learning?"}' \
|
||||
-H 'Content-Type: application/json'
|
||||
```
|
||||
Let's enter the task for the service in the "Enter prompt here" field.
|
||||
For example, "What is a Deep Learning?" and press Enter.
|
||||
After that, a page with the result of the task should open:
|
||||
|
||||
2. Retriever Microservice
|
||||
#### If used application without FaqGen
|
||||
|
||||
```bash
|
||||
export your_embedding=$(python3 -c "import random; embedding = [random.uniform(-1, 1) for _ in range(768)]; print(embedding)")
|
||||
curl http://${HOST_IP}:${CHATQNA_REDIS_RETRIEVER_PORT}/v1/retrieval \
|
||||
-X POST \
|
||||
-d "{\"text\":\"test\",\"embedding\":${your_embedding}}" \
|
||||
-H 'Content-Type: application/json'
|
||||
```
|
||||

|
||||
|
||||
3. TEI Reranking Service
|
||||
#### If used application with FaqGen
|
||||
|
||||
```bash
|
||||
curl http://${HOST_IP}:${CHATQNA_TEI_RERANKING_PORT}/rerank \
|
||||
-X POST \
|
||||
-d '{"query":"What is Deep Learning?", "texts": ["Deep Learning is not...", "Deep learning is..."]}' \
|
||||
-H 'Content-Type: application/json'
|
||||
```
|
||||

|
||||
|
||||
4. vLLM/TGI Service
|
||||
If the result shown on the page is correct, then we consider the verification of the UI service to be successful.
|
||||
|
||||
If you use vLLM:
|
||||
### 5. Stop application
|
||||
|
||||
```bash
|
||||
DATA='{"model": "meta-llama/Meta-Llama-3-8B-Instruct", '\
|
||||
'"messages": [{"role": "user", "content": "What is a Deep Learning?"}], "max_tokens": 64}'
|
||||
#### If you use vLLM
|
||||
|
||||
curl http://${HOST_IP}:${CHATQNA_VLLM_SERVICE_PORT}/v1/chat/completions \
|
||||
-X POST \
|
||||
-d "$DATA" \
|
||||
-H 'Content-Type: application/json'
|
||||
```
|
||||
```bash
|
||||
cd ~/chatqna-install/GenAIExamples/ChatQnA/docker_compose/amd/gpu/rocm
|
||||
docker compose -f compose_vllm.yaml down
|
||||
```
|
||||
|
||||
If you use TGI:
|
||||
#### If you use vLLM with FaqGen
|
||||
|
||||
```bash
|
||||
DATA='{"inputs":"What is a Deep Learning?",'\
|
||||
'"parameters":{"max_new_tokens":64,"do_sample": true}}'
|
||||
```bash
|
||||
cd ~/chatqna-install/GenAIExamples/ChatQnA/docker_compose/amd/gpu/rocm
|
||||
docker compose -f compose_faqgen_vllm.yaml down
|
||||
```
|
||||
|
||||
curl http://${HOST_IP}:${CHATQNA_TGI_SERVICE_PORT}/generate \
|
||||
-X POST \
|
||||
-d "$DATA" \
|
||||
-H 'Content-Type: application/json'
|
||||
```
|
||||
#### If you use TGI
|
||||
|
||||
5. LLM Service (if your used application with FaqGen)
|
||||
```bash
|
||||
cd ~/chatqna-install/GenAIExamples/ChatQnA/docker_compose/amd/gpu/rocm
|
||||
docker compose -f compose.yaml down
|
||||
```
|
||||
|
||||
```bash
|
||||
DATA='{"messages":"Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source '\
|
||||
'text embeddings and sequence classification models. TEI enables high-performance extraction for the most '\
|
||||
'popular models, including FlagEmbedding, Ember, GTE and E5.","max_tokens": 128}'
|
||||
#### If you use TGI with FaqGen
|
||||
|
||||
curl http://${HOST_IP}:${CHATQNA_LLM_FAQGEN_PORT}/v1/faqgen \
|
||||
-X POST \
|
||||
-d "$DATA" \
|
||||
-H 'Content-Type: application/json'
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
This guide should enable developers to deploy the default configuration or any of the other compose yaml files for different configurations. It also highlights the configurable parameters that can be set before deployment.
|
||||
```bash
|
||||
cd ~/chatqna-install/GenAIExamples/ChatQnA/docker_compose/amd/gpu/rocm
|
||||
docker compose -f compose_faqgen.yaml down
|
||||
```
|
||||
|
||||
@@ -165,7 +165,7 @@ services:
|
||||
|
||||
chatqna-nginx-server:
|
||||
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
|
||||
container_name: chatqna-nginx-server
|
||||
container_name: chaqna-nginx-server
|
||||
depends_on:
|
||||
- chatqna-backend-server
|
||||
- chatqna-ui-server
|
||||
|
||||
@@ -187,7 +187,7 @@ services:
|
||||
|
||||
chatqna-nginx-server:
|
||||
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
|
||||
container_name: chatqna-nginx-server
|
||||
container_name: chaqna-nginx-server
|
||||
depends_on:
|
||||
- chatqna-backend-server
|
||||
- chatqna-ui-server
|
||||
|
||||
@@ -192,7 +192,7 @@ services:
|
||||
|
||||
chatqna-nginx-server:
|
||||
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
|
||||
container_name: chatqna-nginx-server
|
||||
container_name: chaqna-nginx-server
|
||||
depends_on:
|
||||
- chatqna-backend-server
|
||||
- chatqna-ui-server
|
||||
|
||||
@@ -32,7 +32,7 @@ services:
|
||||
retries: 50
|
||||
restart: unless-stopped
|
||||
tei-embedding-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
container_name: tei-embedding-server
|
||||
ports:
|
||||
- "6006:80"
|
||||
@@ -65,7 +65,7 @@ services:
|
||||
RETRIEVER_COMPONENT_NAME: "OPEA_RETRIEVER_REDIS"
|
||||
restart: unless-stopped
|
||||
tei-reranking-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
container_name: tei-reranking-server
|
||||
ports:
|
||||
- "8808:80"
|
||||
|
||||
@@ -39,7 +39,7 @@ services:
|
||||
retries: 50
|
||||
restart: unless-stopped
|
||||
tei-embedding-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
container_name: tei-embedding-server
|
||||
ports:
|
||||
- "6006:80"
|
||||
@@ -72,7 +72,7 @@ services:
|
||||
RETRIEVER_COMPONENT_NAME: "OPEA_RETRIEVER_REDIS"
|
||||
restart: unless-stopped
|
||||
tei-reranking-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
container_name: tei-reranking-server
|
||||
ports:
|
||||
- "8808:80"
|
||||
|
||||
@@ -32,7 +32,7 @@ services:
|
||||
retries: 50
|
||||
restart: unless-stopped
|
||||
tei-embedding-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
container_name: tei-embedding-server
|
||||
ports:
|
||||
- "6006:80"
|
||||
@@ -65,7 +65,7 @@ services:
|
||||
RETRIEVER_COMPONENT_NAME: "OPEA_RETRIEVER_REDIS"
|
||||
restart: unless-stopped
|
||||
tei-reranking-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
container_name: tei-reranking-server
|
||||
ports:
|
||||
- "8808:80"
|
||||
|
||||
@@ -32,7 +32,7 @@ services:
|
||||
retries: 50
|
||||
restart: unless-stopped
|
||||
tei-embedding-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
container_name: tei-embedding-server
|
||||
ports:
|
||||
- "6006:80"
|
||||
@@ -65,7 +65,7 @@ services:
|
||||
RETRIEVER_COMPONENT_NAME: "OPEA_RETRIEVER_REDIS"
|
||||
restart: unless-stopped
|
||||
tei-reranking-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
container_name: tei-reranking-server
|
||||
ports:
|
||||
- "8808:80"
|
||||
|
||||
@@ -113,7 +113,7 @@ services:
|
||||
restart: unless-stopped
|
||||
|
||||
tei-embedding-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
container_name: tei-embedding-server
|
||||
ports:
|
||||
- "6006:80"
|
||||
@@ -127,7 +127,7 @@ services:
|
||||
command: --model-id ${EMBEDDING_MODEL_ID} --auto-truncate
|
||||
|
||||
tei-reranking-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
container_name: tei-reranking-server
|
||||
ports:
|
||||
- "8808:80"
|
||||
|
||||
@@ -29,7 +29,7 @@ services:
|
||||
retries: 50
|
||||
restart: unless-stopped
|
||||
tei-embedding-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
container_name: tei-embedding-server
|
||||
ports:
|
||||
- "6006:80"
|
||||
@@ -60,7 +60,7 @@ services:
|
||||
RETRIEVER_COMPONENT_NAME: "OPEA_RETRIEVER_PINECONE"
|
||||
restart: unless-stopped
|
||||
tei-reranking-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
container_name: tei-reranking-server
|
||||
ports:
|
||||
- "8808:80"
|
||||
|
||||
@@ -33,7 +33,7 @@ services:
|
||||
TEI_ENDPOINT: http://tei-embedding-service:80
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
tei-embedding-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
container_name: tei-embedding-server
|
||||
ports:
|
||||
- "6006:80"
|
||||
@@ -66,7 +66,7 @@ services:
|
||||
RETRIEVER_COMPONENT_NAME: "OPEA_RETRIEVER_REDIS"
|
||||
restart: unless-stopped
|
||||
tei-reranking-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
container_name: tei-reranking-server
|
||||
ports:
|
||||
- "8808:80"
|
||||
|
||||
@@ -32,7 +32,7 @@ services:
|
||||
retries: 50
|
||||
restart: unless-stopped
|
||||
tei-embedding-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
container_name: tei-embedding-server
|
||||
ports:
|
||||
- "6006:80"
|
||||
@@ -65,7 +65,7 @@ services:
|
||||
RETRIEVER_COMPONENT_NAME: "OPEA_RETRIEVER_REDIS"
|
||||
restart: unless-stopped
|
||||
tei-reranking-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
container_name: tei-reranking-server
|
||||
ports:
|
||||
- "8808:80"
|
||||
|
||||
@@ -32,7 +32,7 @@ services:
|
||||
retries: 50
|
||||
restart: unless-stopped
|
||||
tei-embedding-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
container_name: tei-embedding-server
|
||||
ports:
|
||||
- "6006:80"
|
||||
|
||||
@@ -95,7 +95,7 @@ d560c232b120 opea/retriever:latest
|
||||
a1d7ca2d3787 ghcr.io/huggingface/tei-gaudi:1.5.0 "text-embeddings-rou…" 2 minutes ago Up 2 minutes 0.0.0.0:8808->80/tcp, [::]:8808->80/tcp tei-reranking-gaudi-server
|
||||
9a9f3fd4fd4c opea/vllm-gaudi:latest "python3 -m vllm.ent…" 2 minutes ago Exited (1) 2 minutes ago vllm-gaudi-server
|
||||
1ab9bbdf5182 redis/redis-stack:7.2.0-v9 "/entrypoint.sh" 2 minutes ago Up 2 minutes 0.0.0.0:6379->6379/tcp, :::6379->6379/tcp, 0.0.0.0:8001->8001/tcp, :::8001->8001/tcp redis-vector-db
|
||||
9ee0789d819e ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 "text-embeddings-rou…" 2 minutes ago Up 2 minutes 0.0.0.0:8090->80/tcp, [::]:8090->80/tcp tei-embedding-gaudi-server
|
||||
9ee0789d819e ghcr.io/huggingface/text-embeddings-inference:cpu-1.6 "text-embeddings-rou…" 2 minutes ago Up 2 minutes 0.0.0.0:8090->80/tcp, [::]:8090->80/tcp tei-embedding-gaudi-server
|
||||
```
|
||||
|
||||
### Test the Pipeline
|
||||
@@ -148,7 +148,7 @@ The default deployment utilizes Gaudi devices primarily for the `vllm-service`,
|
||||
| ---------------------------- | ----------------------------------------------------- | ------------ |
|
||||
| redis-vector-db | redis/redis-stack:7.2.0-v9 | No |
|
||||
| dataprep-redis-service | opea/dataprep:latest | No |
|
||||
| tei-embedding-service | ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 | No |
|
||||
| tei-embedding-service | ghcr.io/huggingface/text-embeddings-inference:cpu-1.6 | No |
|
||||
| retriever | opea/retriever:latest | No |
|
||||
| tei-reranking-service | ghcr.io/huggingface/tei-gaudi:1.5.0 | 1 card |
|
||||
| vllm-service | opea/vllm-gaudi:latest | Configurable |
|
||||
@@ -164,7 +164,7 @@ The TGI (Text Generation Inference) deployment and the default deployment differ
|
||||
| ---------------------------- | ----------------------------------------------------- | -------------- |
|
||||
| redis-vector-db | redis/redis-stack:7.2.0-v9 | No |
|
||||
| dataprep-redis-service | opea/dataprep:latest | No |
|
||||
| tei-embedding-service | ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 | No |
|
||||
| tei-embedding-service | ghcr.io/huggingface/text-embeddings-inference:cpu-1.6 | No |
|
||||
| retriever | opea/retriever:latest | No |
|
||||
| tei-reranking-service | ghcr.io/huggingface/tei-gaudi:1.5.0 | 1 card |
|
||||
| **tgi-service** | ghcr.io/huggingface/tgi-gaudi:2.3.1 | Configurable |
|
||||
@@ -184,7 +184,7 @@ The TGI (Text Generation Inference) deployment and the default deployment differ
|
||||
| ---------------------------- | ----------------------------------------------------- | ------------ |
|
||||
| redis-vector-db | redis/redis-stack:7.2.0-v9 | No |
|
||||
| dataprep-redis-service | opea/dataprep:latest | No |
|
||||
| tei-embedding-service | ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 | No |
|
||||
| tei-embedding-service | ghcr.io/huggingface/text-embeddings-inference:cpu-1.6 | No |
|
||||
| retriever | opea/retriever:latest | No |
|
||||
| tei-reranking-service | ghcr.io/huggingface/tei-gaudi:1.5.0 | 1 card |
|
||||
| vllm-service | opea/vllm-gaudi:latest | Configurable |
|
||||
@@ -203,7 +203,7 @@ The _compose_without_rerank.yaml_ Docker Compose file is distinct from the defau
|
||||
| ---------------------------- | ----------------------------------------------------- | -------------- |
|
||||
| redis-vector-db | redis/redis-stack:7.2.0-v9 | No |
|
||||
| dataprep-redis-service | opea/dataprep:latest | No |
|
||||
| tei-embedding-service | ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 | No |
|
||||
| tei-embedding-service | ghcr.io/huggingface/text-embeddings-inference:cpu-1.6 | No |
|
||||
| retriever | opea/retriever:latest | No |
|
||||
| vllm-service | opea/vllm-gaudi:latest | Configurable |
|
||||
| chatqna-gaudi-backend-server | opea/chatqna:latest | No |
|
||||
@@ -222,7 +222,7 @@ The _compose_guardrails.yaml_ Docker Compose file introduces enhancements over t
|
||||
| dataprep-redis-service | opea/dataprep:latest | No | No |
|
||||
| _vllm-guardrails-service_ | opea/vllm-gaudi:latest | 1 card | Yes |
|
||||
| _guardrails_ | opea/guardrails:latest | No | No |
|
||||
| tei-embedding-service | ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 | No | No |
|
||||
| tei-embedding-service | ghcr.io/huggingface/text-embeddings-inference:cpu-1.6 | No | No |
|
||||
| retriever | opea/retriever:latest | No | No |
|
||||
| tei-reranking-service | ghcr.io/huggingface/tei-gaudi:1.5.0 | 1 card | No |
|
||||
| vllm-service | opea/vllm-gaudi:latest | Configurable | Yes |
|
||||
@@ -258,7 +258,7 @@ The table provides a comprehensive overview of the ChatQnA services utilized acr
|
||||
| ---------------------------- | ----------------------------------------------------- | -------- | -------------------------------------------------------------------------------------------------- |
|
||||
| redis-vector-db | redis/redis-stack:7.2.0-v9 | No | Acts as a Redis database for storing and managing data. |
|
||||
| dataprep-redis-service | opea/dataprep:latest | No | Prepares data and interacts with the Redis database. |
|
||||
| tei-embedding-service | ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 | No | Provides text embedding services, often using Hugging Face models. |
|
||||
| tei-embedding-service | ghcr.io/huggingface/text-embeddings-inference:cpu-1.6 | No | Provides text embedding services, often using Hugging Face models. |
|
||||
| retriever | opea/retriever:latest | No | Retrieves data from the Redis database and interacts with embedding services. |
|
||||
| tei-reranking-service | ghcr.io/huggingface/tei-gaudi:1.5.0 | Yes | Reranks text embeddings, typically using Gaudi hardware for enhanced performance. |
|
||||
| vllm-service | opea/vllm-gaudi:latest | No | Handles large language model (LLM) tasks, utilizing Gaudi hardware. |
|
||||
@@ -284,7 +284,7 @@ ChatQnA now supports running the latest DeepSeek models, including [deepseek-ai/
|
||||
|
||||
### tei-embedding-service & tei-reranking-service
|
||||
|
||||
The `ghcr.io/huggingface/text-embeddings-inference:cpu-1.5` image supporting `tei-embedding-service` and `tei-reranking-service` depends on the `EMBEDDING_MODEL_ID` or `RERANK_MODEL_ID` environment variables respectively to specify the embedding model and reranking model used for converting text into vector representations and rankings. This choice impacts the quality and relevance of the embeddings rerankings for various applications. Unlike the `vllm-service`, the `tei-embedding-service` and `tei-reranking-service` each typically acquires only one Gaudi device and does not use the `NUM_CARDS` parameter; embedding and reranking tasks generally do not require extensive parallel processing and one Gaudi per service is appropriate. The list of [supported embedding and reranking models](https://github.com/huggingface/tei-gaudi?tab=readme-ov-file#supported-models) can be found at the [huggingface/tei-gaudi](https://github.com/huggingface/tei-gaudi?tab=readme-ov-file#supported-models) website.
|
||||
The `ghcr.io/huggingface/text-embeddings-inference:cpu-1.6` image supporting `tei-embedding-service` and `tei-reranking-service` depends on the `EMBEDDING_MODEL_ID` or `RERANK_MODEL_ID` environment variables respectively to specify the embedding model and reranking model used for converting text into vector representations and rankings. This choice impacts the quality and relevance of the embeddings rerankings for various applications. Unlike the `vllm-service`, the `tei-embedding-service` and `tei-reranking-service` each typically acquires only one Gaudi device and does not use the `NUM_CARDS` parameter; embedding and reranking tasks generally do not require extensive parallel processing and one Gaudi per service is appropriate. The list of [supported embedding and reranking models](https://github.com/huggingface/tei-gaudi?tab=readme-ov-file#supported-models) can be found at the [huggingface/tei-gaudi](https://github.com/huggingface/tei-gaudi?tab=readme-ov-file#supported-models) website.
|
||||
|
||||
### tgi-guardrails-service
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ services:
|
||||
retries: 50
|
||||
restart: unless-stopped
|
||||
tei-embedding-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
container_name: tei-embedding-gaudi-server
|
||||
ports:
|
||||
- "8090:80"
|
||||
|
||||
@@ -33,7 +33,7 @@ services:
|
||||
retries: 50
|
||||
restart: unless-stopped
|
||||
tei-embedding-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
container_name: tei-embedding-gaudi-server
|
||||
ports:
|
||||
- "8090:80"
|
||||
|
||||
@@ -33,7 +33,7 @@ services:
|
||||
retries: 50
|
||||
restart: unless-stopped
|
||||
tei-embedding-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
container_name: tei-embedding-gaudi-server
|
||||
ports:
|
||||
- "8090:80"
|
||||
|
||||
@@ -76,7 +76,7 @@ services:
|
||||
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
restart: unless-stopped
|
||||
tei-embedding-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
container_name: tei-embedding-gaudi-server
|
||||
ports:
|
||||
- "8090:80"
|
||||
|
||||
@@ -32,7 +32,7 @@ services:
|
||||
retries: 50
|
||||
restart: unless-stopped
|
||||
tei-embedding-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
container_name: tei-embedding-gaudi-server
|
||||
ports:
|
||||
- "8090:80"
|
||||
|
||||
@@ -32,7 +32,7 @@ services:
|
||||
retries: 50
|
||||
restart: unless-stopped
|
||||
tei-embedding-service:
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
|
||||
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
container_name: tei-embedding-gaudi-server
|
||||
ports:
|
||||
- "8090:80"
|
||||
|
||||
@@ -51,7 +51,7 @@ f810f3b4d329 opea/embedding:latest "python embed
|
||||
174bd43fa6b5 ghcr.io/huggingface/tei-gaudi:1.5.0 "text-embeddings-rou…" 2 minutes ago Up 2 minutes 0.0.0.0:8090->80/tcp, :::8090->80/tcp tei-embedding-gaudi-server
|
||||
05c40b636239 ghcr.io/huggingface/tgi-gaudi:2.3.1 "text-generation-lau…" 2 minutes ago Exited (1) About a minute ago tgi-gaudi-server
|
||||
74084469aa33 redis/redis-stack:7.2.0-v9 "/entrypoint.sh" 2 minutes ago Up 2 minutes 0.0.0.0:6379->6379/tcp, :::6379->6379/tcp, 0.0.0.0:8001->8001/tcp, :::8001->8001/tcp redis-vector-db
|
||||
88399dbc9e43 ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 "text-embeddings-rou…" 2 minutes ago Up 2 minutes 0.0.0.0:8808->80/tcp, :::8808->80/tcp tei-reranking-gaudi-server
|
||||
88399dbc9e43 ghcr.io/huggingface/text-embeddings-inference:cpu-1.6 "text-embeddings-rou…" 2 minutes ago Up 2 minutes 0.0.0.0:8808->80/tcp, :::8808->80/tcp tei-reranking-gaudi-server
|
||||
```
|
||||
|
||||
In this case, `ghcr.io/huggingface/tgi-gaudi:2.3.1` Existed.
|
||||
|
||||
@@ -31,6 +31,8 @@ function build_docker_images() {
|
||||
service_list="chatqna chatqna-ui dataprep retriever llm-faqgen vllm-gaudi nginx"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
docker pull ghcr.io/huggingface/tei-gaudi:1.5.0
|
||||
docker images && sleep 1s
|
||||
}
|
||||
|
||||
|
||||
@@ -69,6 +69,9 @@ function build_docker_images() {
|
||||
service_list="chatqna chatqna-ui dataprep retriever llm-faqgen nginx"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > "${LOG_PATH}"/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-generation-inference:2.3.1-rocm
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
|
||||
docker images && sleep 1s
|
||||
}
|
||||
|
||||
|
||||
@@ -32,6 +32,7 @@ function build_docker_images() {
|
||||
service_list="chatqna chatqna-ui dataprep retriever llm-faqgen vllm nginx"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
docker images && sleep 1s
|
||||
}
|
||||
|
||||
|
||||
@@ -28,6 +28,9 @@ function build_docker_images() {
|
||||
service_list="chatqna chatqna-ui dataprep retriever llm-faqgen nginx"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.6
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
docker pull ghcr.io/huggingface/tei-gaudi:1.5.0
|
||||
docker images && sleep 1s
|
||||
}
|
||||
|
||||
|
||||
@@ -32,6 +32,8 @@ function build_docker_images() {
|
||||
service_list="chatqna chatqna-ui dataprep retriever llm-faqgen nginx"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
docker images && sleep 1s
|
||||
}
|
||||
|
||||
|
||||
@@ -31,6 +31,9 @@ function build_docker_images() {
|
||||
service_list="chatqna chatqna-ui dataprep retriever vllm-gaudi guardrails nginx"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
docker pull ghcr.io/huggingface/tei-gaudi:1.5.0
|
||||
|
||||
docker images && sleep 1s
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,8 @@ function build_docker_images() {
|
||||
service_list="chatqna chatqna-ui dataprep retriever vllm nginx"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
|
||||
docker images && sleep 1s
|
||||
}
|
||||
function start_services() {
|
||||
|
||||
@@ -31,6 +31,8 @@ function build_docker_images() {
|
||||
service_list="chatqna chatqna-ui dataprep retriever vllm-gaudi nginx"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
docker pull ghcr.io/huggingface/tei-gaudi:1.5.0
|
||||
docker images && sleep 1s
|
||||
}
|
||||
|
||||
|
||||
@@ -67,6 +67,9 @@ function build_docker_images() {
|
||||
service_list="chatqna chatqna-ui dataprep retriever nginx"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > "${LOG_PATH}"/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-generation-inference:2.3.1-rocm
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
|
||||
docker images && sleep 1s
|
||||
}
|
||||
|
||||
|
||||
@@ -34,6 +34,8 @@ function build_docker_images() {
|
||||
service_list="chatqna chatqna-ui dataprep retriever vllm nginx"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
|
||||
docker images && sleep 1s
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,8 @@ function build_docker_images() {
|
||||
service_list="chatqna chatqna-ui dataprep retriever vllm nginx"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
|
||||
docker images && sleep 1s
|
||||
}
|
||||
|
||||
|
||||
@@ -27,6 +27,10 @@ function build_docker_images() {
|
||||
service_list="chatqna chatqna-ui dataprep retriever nginx"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/tgi-gaudi:2.3.1
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
docker pull ghcr.io/huggingface/tei-gaudi:1.5.0
|
||||
|
||||
docker images && sleep 1s
|
||||
}
|
||||
|
||||
|
||||
@@ -27,6 +27,9 @@ function build_docker_images() {
|
||||
service_list="chatqna chatqna-ui dataprep retriever nginx"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
|
||||
docker images && sleep 1s
|
||||
}
|
||||
|
||||
|
||||
@@ -31,6 +31,9 @@ function build_docker_images() {
|
||||
service_list="chatqna chatqna-ui dataprep retriever vllm-gaudi nginx"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
docker pull ghcr.io/huggingface/tei-gaudi:1.5.0
|
||||
|
||||
docker images && sleep 1s
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,8 @@ function build_docker_images() {
|
||||
service_list="chatqna chatqna-ui dataprep retriever vllm nginx"
|
||||
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
|
||||
|
||||
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.6
|
||||
|
||||
docker images && sleep 1s
|
||||
}
|
||||
|
||||
|
||||
@@ -5,8 +5,8 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
### The IP address or domain name of the server on which the application is running
|
||||
export HOST_IP=''
|
||||
export EXTERNAL_HOST_IP=''
|
||||
export HOST_IP=${ip_address}
|
||||
export EXTERNAL_HOST_IP=${ip_address}
|
||||
|
||||
### The port of the TGI service. On this port, the TGI service will accept connections
|
||||
export CODEGEN_TGI_SERVICE_PORT=8028
|
||||
@@ -36,4 +36,4 @@ export CODEGEN_BACKEND_SERVICE_URL="http://${EXTERNAL_HOST_IP}:${CODEGEN_BACKEND
|
||||
export CODEGEN_LLM_SERVICE_HOST_IP=${HOST_IP}
|
||||
|
||||
### The CodeGen service UI port
|
||||
export CODEGEN_UI_SERVICE_PORT=18151
|
||||
export CODEGEN_UI_SERVICE_PORT=5173
|
||||
|
||||
@@ -5,8 +5,8 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
### The IP address or domain name of the server on which the application is running
|
||||
export HOST_IP=''
|
||||
export EXTERNAL_HOST_IP=''
|
||||
export HOST_IP=${ip_address}
|
||||
export EXTERNAL_HOST_IP=${ip_address}
|
||||
|
||||
### The port of the vLLM service. On this port, the TGI service will accept connections
|
||||
export CODEGEN_VLLM_SERVICE_PORT=8028
|
||||
@@ -25,7 +25,7 @@ export CODEGEN_LLM_SERVICE_PORT=9000
|
||||
export CODEGEN_MEGA_SERVICE_HOST_IP=${HOST_IP}
|
||||
|
||||
### The port for CodeGen backend service
|
||||
export CODEGEN_BACKEND_SERVICE_PORT=18150
|
||||
export CODEGEN_BACKEND_SERVICE_PORT=7778
|
||||
|
||||
### The URL of CodeGen backend service, used by the frontend service
|
||||
export CODEGEN_BACKEND_SERVICE_URL="http://${EXTERNAL_HOST_IP}:${CODEGEN_BACKEND_SERVICE_PORT}/v1/codegen"
|
||||
@@ -34,4 +34,4 @@ export CODEGEN_BACKEND_SERVICE_URL="http://${EXTERNAL_HOST_IP}:${CODEGEN_BACKEND
|
||||
export CODEGEN_LLM_SERVICE_HOST_IP=${HOST_IP}
|
||||
|
||||
### The CodeGen service UI port
|
||||
export CODEGEN_UI_SERVICE_PORT=18151
|
||||
export CODEGEN_UI_SERVICE_PORT=5173
|
||||
|
||||
@@ -41,6 +41,7 @@ services:
|
||||
https_proxy: ${https_proxy}
|
||||
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
|
||||
host_ip: ${host_ip}
|
||||
VLLM_CPU_KVCACHE_SPACE: 40
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://localhost:80/health || exit 1"]
|
||||
interval: 10s
|
||||
|
||||
@@ -52,6 +52,7 @@ services:
|
||||
VLLM_SKIP_WARMUP: ${VLLM_SKIP_WARMUP:-false}
|
||||
NUM_CARDS: ${NUM_CARDS:-1}
|
||||
VLLM_TORCH_PROFILER_DIR: "/mnt"
|
||||
VLLM_CPU_KVCACHE_SPACE: 40
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://localhost:80/health || exit 1"]
|
||||
interval: 10s
|
||||
|
||||
33
CodeGen/tests/README.md
Normal file
33
CodeGen/tests/README.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# CodeGen E2E test scripts
|
||||
|
||||
## Set the required environment variable
|
||||
|
||||
```bash
|
||||
export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token"
|
||||
```
|
||||
|
||||
## Run test
|
||||
|
||||
On Intel Xeon with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_on_xeon.sh
|
||||
```
|
||||
|
||||
On Intel Gaudi with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_on_gaudi.sh
|
||||
```
|
||||
|
||||
On AMD ROCm with TGI:
|
||||
|
||||
```bash
|
||||
bash test_compose_on_rocm.sh
|
||||
```
|
||||
|
||||
On AMD ROCm with vLLM:
|
||||
|
||||
```bash
|
||||
bash test_compose_vllm_on_rocm.sh
|
||||
```
|
||||
@@ -10,21 +10,11 @@ echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
export MODEL_CACHE=${model_cache:-"./data"}
|
||||
export REDIS_DB_PORT=6379
|
||||
export REDIS_INSIGHTS_PORT=8001
|
||||
export REDIS_RETRIEVER_PORT=7000
|
||||
export EMBEDDER_PORT=6000
|
||||
export TEI_EMBEDDER_PORT=8090
|
||||
export DATAPREP_REDIS_PORT=6007
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
export http_proxy=${http_proxy}
|
||||
export https_proxy=${https_proxy}
|
||||
export no_proxy=${no_proxy},${ip_address}
|
||||
|
||||
function build_docker_images() {
|
||||
opea_branch=${opea_branch:-"main"}
|
||||
# If the opea_branch isn't main, replace the git clone branch in Dockerfile.
|
||||
@@ -58,29 +48,12 @@ function start_services() {
|
||||
local compose_profile="$1"
|
||||
local llm_container_name="$2"
|
||||
|
||||
cd $WORKPATH/docker_compose/intel/hpu/gaudi
|
||||
|
||||
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
|
||||
export LLM_ENDPOINT="http://${ip_address}:8028"
|
||||
cd $WORKPATH/docker_compose
|
||||
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-32B-Instruct"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export MEGA_SERVICE_PORT=7778
|
||||
export MEGA_SERVICE_HOST_IP=${ip_address}
|
||||
export LLM_SERVICE_HOST_IP=${ip_address}
|
||||
export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:${MEGA_SERVICE_PORT}/v1/codegen"
|
||||
export NUM_CARDS=1
|
||||
export host_ip=${ip_address}
|
||||
|
||||
export REDIS_URL="redis://${host_ip}:${REDIS_DB_PORT}"
|
||||
export RETRIEVAL_SERVICE_HOST_IP=${host_ip}
|
||||
export RETRIEVER_COMPONENT_NAME="OPEA_RETRIEVER_REDIS"
|
||||
export INDEX_NAME="CodeGen"
|
||||
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export TEI_EMBEDDING_HOST_IP=${host_ip}
|
||||
export TEI_EMBEDDING_ENDPOINT="http://${host_ip}:${TEI_EMBEDDER_PORT}"
|
||||
export DATAPREP_ENDPOINT="http://${host_ip}:${DATAPREP_REDIS_PORT}/v1/dataprep"
|
||||
|
||||
export INDEX_NAME="CodeGen"
|
||||
source set_env.sh
|
||||
cd intel/hpu/gaudi
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose --profile ${compose_profile} up -d | tee ${LOG_PATH}/start_services_with_compose.log
|
||||
@@ -144,7 +117,7 @@ function validate_microservices() {
|
||||
"completion_tokens" \
|
||||
"llm-service" \
|
||||
"${llm_container_name}" \
|
||||
'{"model": "Qwen/Qwen2.5-Coder-7B-Instruct", "messages": [{"role": "user", "content": "def print_hello_world():"}], "max_tokens": 256}'
|
||||
'{"model": "Qwen/Qwen2.5-Coder-32B-Instruct", "messages": [{"role": "user", "content": "def print_hello_world():"}], "max_tokens": 256}'
|
||||
|
||||
# llm microservice
|
||||
validate_services \
|
||||
@@ -176,7 +149,7 @@ function validate_megaservice() {
|
||||
# Curl the Mega Service with index_name and agents_flag
|
||||
validate_services \
|
||||
"${ip_address}:7778/v1/codegen" \
|
||||
"" \
|
||||
"fingerprint" \
|
||||
"mega-codegen" \
|
||||
"codegen-gaudi-backend-server" \
|
||||
'{ "index_name": "test_redis", "agents_flag": "True", "messages": "def print_hello_world():", "max_tokens": 256}'
|
||||
@@ -225,8 +198,9 @@ function validate_gradio() {
|
||||
|
||||
function stop_docker() {
|
||||
local docker_profile="$1"
|
||||
|
||||
cd $WORKPATH/docker_compose/intel/hpu/gaudi
|
||||
cd $WORKPATH/docker_compose
|
||||
source set_env.sh
|
||||
cd intel/hpu/gaudi
|
||||
docker compose --profile ${docker_profile} down
|
||||
}
|
||||
|
||||
|
||||
@@ -41,18 +41,7 @@ function build_docker_images() {
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/amd/gpu/rocm/
|
||||
|
||||
export CODEGEN_LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
|
||||
export CODEGEN_TGI_SERVICE_PORT=8028
|
||||
export CODEGEN_TGI_LLM_ENDPOINT="http://${ip_address}:${CODEGEN_TGI_SERVICE_PORT}"
|
||||
export CODEGEN_LLM_SERVICE_PORT=9000
|
||||
export CODEGEN_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export CODEGEN_MEGA_SERVICE_HOST_IP=${ip_address}
|
||||
export CODEGEN_LLM_SERVICE_HOST_IP=${ip_address}
|
||||
export CODEGEN_BACKEND_SERVICE_PORT=7778
|
||||
export CODEGEN_BACKEND_SERVICE_URL="http://${ip_address}:${CODEGEN_BACKEND_SERVICE_PORT}/v1/codegen"
|
||||
export CODEGEN_UI_SERVICE_PORT=5173
|
||||
export HOST_IP=${ip_address}
|
||||
source set_env.sh
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
|
||||
|
||||
|
||||
@@ -10,21 +10,11 @@ echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
|
||||
export REGISTRY=${IMAGE_REPO}
|
||||
export TAG=${IMAGE_TAG}
|
||||
export MODEL_CACHE=${model_cache:-"./data"}
|
||||
export REDIS_DB_PORT=6379
|
||||
export REDIS_INSIGHTS_PORT=8001
|
||||
export REDIS_RETRIEVER_PORT=7000
|
||||
export EMBEDDER_PORT=6000
|
||||
export TEI_EMBEDDER_PORT=8090
|
||||
export DATAPREP_REDIS_PORT=6007
|
||||
|
||||
WORKPATH=$(dirname "$PWD")
|
||||
LOG_PATH="$WORKPATH/tests"
|
||||
ip_address=$(hostname -I | awk '{print $1}')
|
||||
|
||||
export http_proxy=${http_proxy}
|
||||
export https_proxy=${https_proxy}
|
||||
export no_proxy=${no_proxy},${ip_address}
|
||||
|
||||
function build_docker_images() {
|
||||
opea_branch=${opea_branch:-"main"}
|
||||
# If the opea_branch isn't main, replace the git clone branch in Dockerfile.
|
||||
@@ -60,26 +50,11 @@ function start_services() {
|
||||
local compose_profile="$1"
|
||||
local llm_container_name="$2"
|
||||
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon/
|
||||
|
||||
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
|
||||
export LLM_ENDPOINT="http://${ip_address}:8028"
|
||||
cd $WORKPATH/docker_compose
|
||||
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-32B-Instruct"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export MEGA_SERVICE_PORT=7778
|
||||
export MEGA_SERVICE_HOST_IP=${ip_address}
|
||||
export LLM_SERVICE_HOST_IP=${ip_address}
|
||||
export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:${MEGA_SERVICE_PORT}/v1/codegen"
|
||||
export host_ip=${ip_address}
|
||||
|
||||
export REDIS_URL="redis://${host_ip}:${REDIS_DB_PORT}"
|
||||
export RETRIEVAL_SERVICE_HOST_IP=${host_ip}
|
||||
export RETRIEVER_COMPONENT_NAME="OPEA_RETRIEVER_REDIS"
|
||||
export INDEX_NAME="CodeGen"
|
||||
|
||||
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
|
||||
export TEI_EMBEDDING_HOST_IP=${host_ip}
|
||||
export TEI_EMBEDDING_ENDPOINT="http://${host_ip}:${TEI_EMBEDDER_PORT}"
|
||||
export DATAPREP_ENDPOINT="http://${host_ip}:${DATAPREP_REDIS_PORT}/v1/dataprep"
|
||||
source set_env.sh
|
||||
cd intel/cpu/xeon/
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose --profile ${compose_profile} up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
@@ -143,7 +118,7 @@ function validate_microservices() {
|
||||
"completion_tokens" \
|
||||
"llm-service" \
|
||||
"${llm_container_name}" \
|
||||
'{"model": "Qwen/Qwen2.5-Coder-7B-Instruct", "messages": [{"role": "user", "content": "What is Deep Learning?"}], "max_tokens": 256}'
|
||||
'{"model": "Qwen/Qwen2.5-Coder-32B-Instruct", "messages": [{"role": "user", "content": "What is Deep Learning?"}], "max_tokens": 256}'
|
||||
|
||||
# llm microservice
|
||||
validate_services \
|
||||
@@ -175,7 +150,7 @@ function validate_megaservice() {
|
||||
# Curl the Mega Service with index_name and agents_flag
|
||||
validate_services \
|
||||
"${ip_address}:7778/v1/codegen" \
|
||||
"" \
|
||||
"fingerprint" \
|
||||
"mega-codegen" \
|
||||
"codegen-xeon-backend-server" \
|
||||
'{ "index_name": "test_redis", "agents_flag": "True", "messages": "def print_hello_world():", "max_tokens": 256}'
|
||||
@@ -225,7 +200,9 @@ function validate_gradio() {
|
||||
function stop_docker() {
|
||||
local docker_profile="$1"
|
||||
|
||||
cd $WORKPATH/docker_compose/intel/cpu/xeon/
|
||||
cd $WORKPATH/docker_compose
|
||||
source set_env.sh
|
||||
cd intel/cpu/xeon/
|
||||
docker compose --profile ${docker_profile} down
|
||||
}
|
||||
|
||||
|
||||
@@ -40,18 +40,7 @@ function build_docker_images() {
|
||||
|
||||
function start_services() {
|
||||
cd $WORKPATH/docker_compose/amd/gpu/rocm/
|
||||
|
||||
export CODEGEN_LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
|
||||
export CODEGEN_VLLM_SERVICE_PORT=8028
|
||||
export CODEGEN_VLLM_ENDPOINT="http://${ip_address}:${CODEGEN_VLLM_SERVICE_PORT}"
|
||||
export CODEGEN_LLM_SERVICE_PORT=9000
|
||||
export CODEGEN_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
|
||||
export CODEGEN_MEGA_SERVICE_HOST_IP=${ip_address}
|
||||
export CODEGEN_LLM_SERVICE_HOST_IP=${ip_address}
|
||||
export CODEGEN_BACKEND_SERVICE_PORT=7778
|
||||
export CODEGEN_BACKEND_SERVICE_URL="http://${ip_address}:${CODEGEN_BACKEND_SERVICE_PORT}/v1/codegen"
|
||||
export CODEGEN_UI_SERVICE_PORT=5173
|
||||
export HOST_IP=${ip_address}
|
||||
source set_env.sh
|
||||
|
||||
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
|
||||
|
||||
@@ -104,7 +93,7 @@ function validate_microservices() {
|
||||
"content" \
|
||||
"codegen-vllm-service" \
|
||||
"codegen-vllm-service" \
|
||||
'{"model": "Qwen/Qwen2.5-Coder-7B-Instruct", "messages": [{"role": "user", "content": "What is Deep Learning?"}], "max_tokens": 17}'
|
||||
'{"model": "Qwen/Qwen2.5-Coder-32B-Instruct", "messages": [{"role": "user", "content": "What is Deep Learning?"}], "max_tokens": 17}'
|
||||
sleep 10
|
||||
# llm microservice
|
||||
validate_services \
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
deploy:
|
||||
device: gaudi
|
||||
version: 1.3.0
|
||||
version: 1.2.0
|
||||
modelUseHostPath: /mnt/models
|
||||
HUGGINGFACEHUB_API_TOKEN: "" # mandatory
|
||||
node: [1]
|
||||
@@ -20,10 +20,14 @@ deploy:
|
||||
memory_capacity: "8000Mi"
|
||||
replicaCount: [1]
|
||||
|
||||
teirerank:
|
||||
enabled: False
|
||||
|
||||
llm:
|
||||
engine: vllm # or tgi
|
||||
model_id: "meta-llama/Llama-3.2-3B-Instruct" # mandatory
|
||||
replicaCount: [1]
|
||||
replicaCount:
|
||||
without_teirerank: [1] # When teirerank.enabled is False
|
||||
resources:
|
||||
enabled: False
|
||||
cards_per_instance: 1
|
||||
@@ -74,7 +78,7 @@ benchmark:
|
||||
|
||||
# workload, all of the test cases will run for benchmark
|
||||
bench_target: ["docsumfixed"] # specify the bench_target for benchmark
|
||||
dataset: "/home/sdp/pubmed_10.txt" # specify the absolute path to the dataset file
|
||||
dataset: "/home/sdp/upload.txt" # specify the absolute path to the dataset file
|
||||
summary_type: "stuff"
|
||||
stream: True
|
||||
|
||||
|
||||
@@ -23,17 +23,17 @@ This section describes how to quickly deploy and test the DocSum service manuall
|
||||
|
||||
### Access the Code
|
||||
|
||||
Clone the GenAIExample repository and access the DocSum AMD GPU platform Docker Compose files and supporting scripts:
|
||||
Clone the GenAIExample repository and access the ChatQnA AMD GPU platform Docker Compose files and supporting scripts:
|
||||
|
||||
```bash
|
||||
```
|
||||
git clone https://github.com/opea-project/GenAIExamples.git
|
||||
cd GenAIExamples/DocSum/docker_compose/amd/gpu/rocm
|
||||
```
|
||||
|
||||
Checkout a released version, such as v1.3:
|
||||
Checkout a released version, such as v1.2:
|
||||
|
||||
```
|
||||
git checkout v1.3
|
||||
git checkout v1.2
|
||||
```
|
||||
|
||||
### Generate a HuggingFace Access Token
|
||||
@@ -42,96 +42,33 @@ Some HuggingFace resources, such as some models, are only accessible if you have
|
||||
|
||||
### Configure the Deployment Environment
|
||||
|
||||
To set up environment variables for deploying DocSum services, set up some parameters specific to the deployment environment and source the `set_env_*.sh` script in this directory:
|
||||
To set up environment variables for deploying DocSum services, source the _set_env.sh_ script in this directory:
|
||||
|
||||
- if used vLLM - set_env_vllm.sh
|
||||
- if used TGI - set_env.sh
|
||||
|
||||
Set the values of the variables:
|
||||
|
||||
- **HOST_IP, HOST_IP_EXTERNAL** - These variables are used to configure the name/address of the service in the operating system environment for the application services to interact with each other and with the outside world.
|
||||
|
||||
If your server uses only an internal address and is not accessible from the Internet, then the values for these two variables will be the same and the value will be equal to the server's internal name/address.
|
||||
|
||||
If your server uses only an external, Internet-accessible address, then the values for these two variables will be the same and the value will be equal to the server's external name/address.
|
||||
|
||||
If your server is located on an internal network, has an internal address, but is accessible from the Internet via a proxy/firewall/load balancer, then the HOST_IP variable will have a value equal to the internal name/address of the server, and the EXTERNAL_HOST_IP variable will have a value equal to the external name/address of the proxy/firewall/load balancer behind which the server is located.
|
||||
|
||||
We set these values in the file set_env\*\*\*\*.sh
|
||||
|
||||
- **Variables with names like "**\*\*\*\*\*\*\_PORT"\*\* - These variables set the IP port numbers for establishing network connections to the application services.
|
||||
The values shown in the file set_env.sh or set_env_vllm.sh they are the values used for the development and testing of the application, as well as configured for the environment in which the development is performed. These values must be configured in accordance with the rules of network access to your environment's server, and must not overlap with the IP ports of other applications that are already in use.
|
||||
|
||||
Setting variables in the operating system environment:
|
||||
|
||||
```bash
|
||||
export HUGGINGFACEHUB_API_TOKEN="Your_HuggingFace_API_Token"
|
||||
source ./set_env_*.sh # replace the script name with the appropriate one
|
||||
```
|
||||
source ./set_env.sh
|
||||
```
|
||||
|
||||
Consult the section on [DocSum Service configuration](#docsum-configuration) for information on how service specific configuration parameters affect deployments.
|
||||
The _set_env.sh_ script will prompt for required and optional environment variables used to configure the DocSum services. If a value is not entered, the script will use a default value for the same. It will also generate a _.env_ file defining the desired configuration. Consult the section on [DocSum Service configuration](#docsum-service-configuration) for information on how service specific configuration parameters affect deployments.
|
||||
|
||||
### Deploy the Services Using Docker Compose
|
||||
|
||||
To deploy the DocSum services, execute the `docker compose up` command with the appropriate arguments. For a default deployment with TGI, execute the command below. It uses the 'compose.yaml' file.
|
||||
To deploy the DocSum services, execute the `docker compose up` command with the appropriate arguments. For a default deployment, execute:
|
||||
|
||||
```bash
|
||||
cd docker_compose/amd/gpu/rocm
|
||||
# if used TGI
|
||||
docker compose -f compose.yaml up -d
|
||||
# if used vLLM
|
||||
# docker compose -f compose_vllm.yaml up -d
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
To enable GPU support for AMD GPUs, the following configuration is added to the Docker Compose file:
|
||||
**Note**: developers should build docker image from source when:
|
||||
|
||||
- compose_vllm.yaml - for vLLM-based application
|
||||
- compose.yaml - for TGI-based
|
||||
|
||||
```yaml
|
||||
shm_size: 1g
|
||||
devices:
|
||||
- /dev/kfd:/dev/kfd
|
||||
- /dev/dri:/dev/dri
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
group_add:
|
||||
- video
|
||||
security_opt:
|
||||
- seccomp:unconfined
|
||||
```
|
||||
|
||||
This configuration forwards all available GPUs to the container. To use a specific GPU, specify its `cardN` and `renderN` device IDs. For example:
|
||||
|
||||
```yaml
|
||||
shm_size: 1g
|
||||
devices:
|
||||
- /dev/kfd:/dev/kfd
|
||||
- /dev/dri/card0:/dev/dri/card0
|
||||
- /dev/dri/render128:/dev/dri/render128
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
group_add:
|
||||
- video
|
||||
security_opt:
|
||||
- seccomp:unconfined
|
||||
```
|
||||
|
||||
**How to Identify GPU Device IDs:**
|
||||
Use AMD GPU driver utilities to determine the correct `cardN` and `renderN` IDs for your GPU.
|
||||
|
||||
> **Note**: developers should build docker image from source when:
|
||||
>
|
||||
> - Developing off the git main branch (as the container's ports in the repo may be different > from the published docker image).
|
||||
> - Unable to download the docker image.
|
||||
> - Use a specific version of Docker image.
|
||||
- Developing off the git main branch (as the container's ports in the repo may be different from the published docker image).
|
||||
- Unable to download the docker image.
|
||||
- Use a specific version of Docker image.
|
||||
|
||||
Please refer to the table below to build different microservices from source:
|
||||
|
||||
| Microservice | Deployment Guide |
|
||||
| ------------ | ------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| whisper | [whisper build guide](https://github.com/opea-project/GenAIComps/tree/main/comps/third_parties/whisper/src) |
|
||||
| TGI | [TGI project](https://github.com/huggingface/text-generation-inference.git) |
|
||||
| vLLM | [vLLM build guide](https://github.com/opea-project/GenAIComps/tree/main/comps/third_parties/vllm#build-docker) |
|
||||
| llm-docsum | [LLM-DocSum build guide](https://github.com/opea-project/GenAIComps/tree/main/comps/llms/src/doc-summarization#12-build-docker-image) |
|
||||
| MegaService | [MegaService build guide](../../../../README_miscellaneous.md#build-megaservice-docker-image) |
|
||||
@@ -147,8 +84,6 @@ docker ps -a
|
||||
|
||||
For the default deployment, the following 5 containers should have started:
|
||||
|
||||
If used TGI:
|
||||
|
||||
```
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
748f577b3c78 opea/whisper:latest "python whisper_s…" 5 minutes ago Up About a minute 0.0.0.0:7066->7066/tcp, :::7066->7066/tcp whisper-service
|
||||
@@ -158,39 +93,24 @@ fds3dd5b9fd8 opea/docsum:latest "py
|
||||
78964d0c1hg5 ghcr.io/huggingface/text-generation-inference:2.4.1-rocm "/tgi-entrypoint.sh" 5 minutes ago Up 5 minutes (healthy) 0.0.0.0:8008->80/tcp, [::]:8008->80/tcp docsum-tgi-service
|
||||
```
|
||||
|
||||
If used vLLM:
|
||||
|
||||
```
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
748f577b3c78 opea/whisper:latest "python whisper_s…" 5 minutes ago Up About a minute 0.0.0.0:7066->7066/tcp, :::7066->7066/tcp whisper-service
|
||||
4eq8b7034fd9 opea/docsum-gradio-ui:latest "docker-entrypoint.s…" 5 minutes ago Up About a minute 0.0.0.0:5173->5173/tcp, :::5173->5173/tcp docsum-ui-server
|
||||
fds3dd5b9fd8 opea/docsum:latest "python docsum.py" 5 minutes ago Up About a minute 0.0.0.0:8888->8888/tcp, :::8888->8888/tcp docsum-backend-server
|
||||
78fsd6fabfs7 opea/llm-docsum:latest "bash entrypoint.sh" 5 minutes ago Up About a minute 0.0.0.0:9000->9000/tcp, :::9000->9000/tcp docsum-llm-server
|
||||
78964d0c1hg5 opea/vllm-rocm:latest "python3 /workspace/…" 5 minutes ago Up 5 minutes (healthy) 0.0.0.0:8008->80/tcp, [::]:8008->80/tcp docsum-vllm-service
|
||||
```
|
||||
|
||||
### Test the Pipeline
|
||||
|
||||
Once the DocSum services are running, test the pipeline using the following command:
|
||||
|
||||
```bash
|
||||
curl -X POST http://${HOST_IP}:${DOCSUM_BACKEND_SERVER_PORT}/v1/docsum \
|
||||
curl -X POST http://${host_ip}:8888/v1/docsum \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"type": "text", "messages": "Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}'
|
||||
```
|
||||
|
||||
**Note** The value of _HOST_IP_ was set using the _set_env.sh_ script and can be found in the _.env_ file.
|
||||
**Note** The value of _host_ip_ was set using the _set_env.sh_ script and can be found in the _.env_ file.
|
||||
|
||||
### Cleanup the Deployment
|
||||
|
||||
To stop the containers associated with the deployment, execute the following command:
|
||||
|
||||
```bash
|
||||
# if used TGI
|
||||
```
|
||||
docker compose -f compose.yaml down
|
||||
# if used vLLM
|
||||
# docker compose -f compose_vllm.yaml down
|
||||
|
||||
```
|
||||
|
||||
All the DocSum containers will be stopped and then removed on completion of the "down" command.
|
||||
@@ -212,7 +132,7 @@ There are also some customized usage.
|
||||
|
||||
```bash
|
||||
# form input. Use English mode (default).
|
||||
curl http://${HOST_IP}:${DOCSUM_BACKEND_SERVER_PORT}/v1/docsum \
|
||||
curl http://${host_ip}:8888/v1/docsum \
|
||||
-H "Content-Type: multipart/form-data" \
|
||||
-F "type=text" \
|
||||
-F "messages=Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5." \
|
||||
@@ -221,7 +141,7 @@ curl http://${HOST_IP}:${DOCSUM_BACKEND_SERVER_PORT}/v1/docsum \
|
||||
-F "stream=True"
|
||||
|
||||
# Use Chinese mode.
|
||||
curl http://${HOST_IP}:${DOCSUM_BACKEND_SERVER_PORT}/v1/docsum \
|
||||
curl http://${host_ip}:8888/v1/docsum \
|
||||
-H "Content-Type: multipart/form-data" \
|
||||
-F "type=text" \
|
||||
-F "messages=2024年9月26日,北京——今日,英特尔正式发布英特尔® 至强® 6性能核处理器(代号Granite Rapids),为AI、数据分析、科学计算等计算密集型业务提供卓越性能。" \
|
||||
@@ -230,7 +150,7 @@ curl http://${HOST_IP}:${DOCSUM_BACKEND_SERVER_PORT}/v1/docsum \
|
||||
-F "stream=True"
|
||||
|
||||
# Upload file
|
||||
curl http://${HOST_IP}:${DOCSUM_BACKEND_SERVER_PORT}/v1/docsum \
|
||||
curl http://${host_ip}:8888/v1/docsum \
|
||||
-H "Content-Type: multipart/form-data" \
|
||||
-F "type=text" \
|
||||
-F "messages=" \
|
||||
@@ -246,11 +166,11 @@ curl http://${HOST_IP}:${DOCSUM_BACKEND_SERVER_PORT}/v1/docsum \
|
||||
Audio:
|
||||
|
||||
```bash
|
||||
curl -X POST http://${HOST_IP}:${DOCSUM_BACKEND_SERVER_PORT}/v1/docsum \
|
||||
curl -X POST http://${host_ip}:8888/v1/docsum \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"type": "audio", "messages": "UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA"}'
|
||||
|
||||
curl http://${HOST_IP}:${DOCSUM_BACKEND_SERVER_PORT}/v1/docsum \
|
||||
curl http://${host_ip}:8888/v1/docsum \
|
||||
-H "Content-Type: multipart/form-data" \
|
||||
-F "type=audio" \
|
||||
-F "messages=UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA" \
|
||||
@@ -262,11 +182,11 @@ curl http://${HOST_IP}:${DOCSUM_BACKEND_SERVER_PORT}/v1/docsum \
|
||||
Video:
|
||||
|
||||
```bash
|
||||
curl -X POST http://${HOST_IP}:${DOCSUM_BACKEND_SERVER_PORT}/v1/docsum \
|
||||
curl -X POST http://${host_ip}:8888/v1/docsum \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"type": "video", "messages": "convert your video to base64 data type"}'
|
||||
|
||||
curl http://${HOST_IP}:${DOCSUM_BACKEND_SERVER_PORT}/v1/docsum \
|
||||
curl http://${host_ip}:8888/v1/docsum \
|
||||
-H "Content-Type: multipart/form-data" \
|
||||
-F "type=video" \
|
||||
-F "messages=convert your video to base64 data type" \
|
||||
@@ -288,7 +208,7 @@ If you want to deal with long context, can set following parameters and select s
|
||||
"summary_type" is set to be "auto" by default, in this mode we will check input token length, if it exceed `MAX_INPUT_TOKENS`, `summary_type` will automatically be set to `refine` mode, otherwise will be set to `stuff` mode.
|
||||
|
||||
```bash
|
||||
curl http://${HOST_IP}:${DOCSUM_BACKEND_SERVER_PORT}/v1/docsum \
|
||||
curl http://${host_ip}:8888/v1/docsum \
|
||||
-H "Content-Type: multipart/form-data" \
|
||||
-F "type=text" \
|
||||
-F "messages=" \
|
||||
@@ -303,7 +223,7 @@ curl http://${HOST_IP}:${DOCSUM_BACKEND_SERVER_PORT}/v1/docsum \
|
||||
In this mode LLM generate summary based on complete input text. In this case please carefully set `MAX_INPUT_TOKENS` and `MAX_TOTAL_TOKENS` according to your model and device memory, otherwise it may exceed LLM context limit and raise error when meet long context.
|
||||
|
||||
```bash
|
||||
curl http://${HOST_IP}:${DOCSUM_BACKEND_SERVER_PORT}/v1/docsum \
|
||||
curl http://${host_ip}:8888/v1/docsum \
|
||||
-H "Content-Type: multipart/form-data" \
|
||||
-F "type=text" \
|
||||
-F "messages=" \
|
||||
@@ -318,7 +238,7 @@ curl http://${HOST_IP}:${DOCSUM_BACKEND_SERVER_PORT}/v1/docsum \
|
||||
Truncate mode will truncate the input text and keep only the first chunk, whose length is equal to `min(MAX_TOTAL_TOKENS - input.max_tokens - 50, MAX_INPUT_TOKENS)`
|
||||
|
||||
```bash
|
||||
curl http://${HOST_IP}:${DOCSUM_BACKEND_SERVER_PORT}/v1/docsum \
|
||||
curl http://${host_ip}:8888/v1/docsum \
|
||||
-H "Content-Type: multipart/form-data" \
|
||||
-F "type=text" \
|
||||
-F "messages=" \
|
||||
@@ -335,7 +255,7 @@ Map_reduce mode will split the inputs into multiple chunks, map each document to
|
||||
In this mode, default `chunk_size` is set to be `min(MAX_TOTAL_TOKENS - input.max_tokens - 50, MAX_INPUT_TOKENS)`
|
||||
|
||||
```bash
|
||||
curl http://${HOST_IP}:${DOCSUM_BACKEND_SERVER_PORT}/v1/docsum \
|
||||
curl http://${host_ip}:8888/v1/docsum \
|
||||
-H "Content-Type: multipart/form-data" \
|
||||
-F "type=text" \
|
||||
-F "messages=" \
|
||||
@@ -352,7 +272,7 @@ Refin mode will split the inputs into multiple chunks, generate summary for the
|
||||
In this mode, default `chunk_size` is set to be `min(MAX_TOTAL_TOKENS - 2 * input.max_tokens - 128, MAX_INPUT_TOKENS)`.
|
||||
|
||||
```bash
|
||||
curl http://${HOST_IP}:${DOCSUM_BACKEND_SERVER_PORT}/v1/docsum \
|
||||
curl http://${host_ip}:8888/v1/docsum \
|
||||
-H "Content-Type: multipart/form-data" \
|
||||
-F "type=text" \
|
||||
-F "messages=" \
|
||||
@@ -368,7 +288,7 @@ Several UI options are provided. If you need to work with multimedia documents,
|
||||
|
||||
### Gradio UI
|
||||
|
||||
To access the UI, use the URL - http://${HOST_IP}:${DOCSUM_FRONTEND_PORT}
|
||||
To access the UI, use the URL - http://${EXTERNAL_HOST_IP}:${FAGGEN_UI_PORT}
|
||||
A page should open when you click through to this address:
|
||||
|
||||

|
||||
|
||||
@@ -40,7 +40,7 @@ USER user
|
||||
|
||||
WORKDIR /home/user/edgecraftrag
|
||||
RUN pip install --no-cache-dir --upgrade pip setuptools==70.0.0 && \
|
||||
pip install --no-cache-dir --extra-index-url https://download.pytorch.org/whl/cpu -r requirements.txt
|
||||
pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
WORKDIR /home/user/
|
||||
RUN git clone https://github.com/openvinotoolkit/openvino.genai.git genai
|
||||
|
||||
@@ -44,8 +44,6 @@ git clone https://github.com/opea-project/GenAIExamples.git
|
||||
### 2.2 Set up env vars
|
||||
|
||||
```bash
|
||||
export ip_address="External_Public_IP"
|
||||
export no_proxy=${your_no_proxy},${ip_address}
|
||||
export HF_CACHE_DIR=/path/to/your/model/cache/
|
||||
export HF_TOKEN=<you-hf-token>
|
||||
export FINNHUB_API_KEY=<your-finnhub-api-key> # go to https://finnhub.io/ to get your free api key
|
||||
@@ -102,8 +100,8 @@ bash launch_dataprep.sh
|
||||
Validate datat ingest data and retrieval from database:
|
||||
|
||||
```bash
|
||||
python $WORKDIR/GenAIExamples/FinanceAgent/tests/test_redis_finance.py --port 6007 --test_option ingest
|
||||
python $WORKDIR/GenAIExamples/FinanceAgent/tests/test_redis_finance.py --port 6007 --test_option get
|
||||
python $WORKPATH/tests/test_redis_finance.py --port 6007 --test_option ingest
|
||||
python $WORKPATH/tests/test_redis_finance.py --port 6007 --test_option get
|
||||
```
|
||||
|
||||
### 3.3 Launch the multi-agent system
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Deploy and Benchmark
|
||||
# ChatQnA Benchmarking
|
||||
|
||||
## Purpose
|
||||
|
||||
@@ -8,11 +8,6 @@ We aim to run these benchmarks and share them with the OPEA community for three
|
||||
- To establish a baseline for validating optimization solutions across different implementations, providing clear guidance on which methods are most effective for your use case.
|
||||
- To inspire the community to build upon our benchmarks, allowing us to better quantify new solutions in conjunction with current leading LLMs, serving frameworks etc.
|
||||
|
||||
### Support Example List
|
||||
|
||||
- ChatQnA
|
||||
- DocSum
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Prerequisites](#prerequisites)
|
||||
@@ -73,7 +68,6 @@ Before running the benchmarks, ensure you have:
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
notes: the benchmark need `opea-eval>=1.3`, if v1.3 is not released, please build the `opea-eval` from [source](https://github.com/opea-project/GenAIEval).
|
||||
|
||||
## Data Preparation
|
||||
|
||||
|
||||
@@ -1,29 +1,28 @@
|
||||
# Deploying SearchQnA on AMD ROCm Platform
|
||||
# Example SearchQnA deployments on AMD GPU (ROCm)
|
||||
|
||||
This document outlines the single node deployment process for a SearchQnA application utilizing the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservices on AMD ROCm Platform.
|
||||
This document outlines the deployment process for a SearchQnA application utilizing the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline on AMD GPU (ROCm).
|
||||
|
||||
## Table of Contents
|
||||
This example includes the following sections:
|
||||
|
||||
1. [SearchQnA Quick Start Deployment](#searchqna-quick-start-deployment)
|
||||
2. [SearchQnA Docker Compose Files](#searchqna-docker-compose-files)
|
||||
3. [Validate Microservices](#validate-microservices)
|
||||
4. [Launch the UI](#launch-the-ui): Guideline for UI usage
|
||||
5. [Conclusion](#conclusion)
|
||||
- [SearchQnA Quick Start Deployment](#searchqna-quick-start-deployment): Demonstrates how to quickly deploy a SearchQnA application/pipeline on AMD GPU platform.
|
||||
- [SearchQnA Docker Compose Files](#searchqna-docker-compose-files): Describes some example deployments and their docker compose files.
|
||||
- [Launch the UI](#launch-the-ui): Guideline for UI usage
|
||||
|
||||
## SearchQnA Quick Start Deployment
|
||||
|
||||
This section describes how to quickly deploy and test the SearchQnA service manually on an AMD ROCm Platform. The basic steps are:
|
||||
This section describes how to quickly deploy and test the SearchQnA service manually on AMD GPU (ROCm). The basic steps are:
|
||||
|
||||
1. [Access the Code](#access-the-code)
|
||||
2. [Configure the Deployment Environment](#configure-the-deployment-environment)
|
||||
3. [Deploy the Services Using Docker Compose](#deploy-the-services-using-docker-compose)
|
||||
4. [Check the Deployment Status](#check-the-deployment-status)
|
||||
5. [Validate the Pipeline](#validate-the-pipeline)
|
||||
6. [Cleanup the Deployment](#cleanup-the-deployment)
|
||||
2. [Generate a HuggingFace Access Token](#generate-a-huggingface-access-token)
|
||||
3. [Configure the Deployment Environment](#configure-the-deployment-environment)
|
||||
4. [Deploy the Services Using Docker Compose](#deploy-the-services-using-docker-compose)
|
||||
5. [Check the Deployment Status](#check-the-deployment-status)
|
||||
6. [Test the Pipeline](#test-the-pipeline)
|
||||
7. [Cleanup the Deployment](#cleanup-the-deployment)
|
||||
|
||||
### Access the Code
|
||||
|
||||
Clone the GenAIExample repository and access the SearchQnA AMD ROCm Platform Docker Compose files and supporting scripts:
|
||||
Clone the GenAIExample repository and access the SearchQnA AMD GPU (ROCm) Docker Compose files and supporting scripts:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/opea-project/GenAIExamples.git
|
||||
@@ -42,56 +41,34 @@ Some HuggingFace resources require an access token. Developers can create one by
|
||||
|
||||
### Configure the Deployment Environment
|
||||
|
||||
To set up environment variables for deploying SearchQnA services, set up some parameters specific to the deployment environment and source the `set_env.sh` script in this directory:
|
||||
To set up environment variables for deploying SearchQnA services, source the _setup_env.sh_ script in this directory:
|
||||
|
||||
#### For vLLM inference type deployment (default)
|
||||
|
||||
```bash
|
||||
export host_ip="External_Public_IP" # ip address of the node
|
||||
export GOOGLE_CSE_ID="your cse id"
|
||||
export GOOGLE_API_KEY="your google api key"
|
||||
export HUGGINGFACEHUB_API_TOKEN="Your_HuggingFace_API_Token"
|
||||
export http_proxy="Your_HTTP_Proxy" # http proxy if any
|
||||
export https_proxy="Your_HTTPs_Proxy" # https proxy if any
|
||||
export no_proxy=localhost,127.0.0.1,$host_ip # additional no proxies if needed
|
||||
export NGINX_PORT=${your_nginx_port} # your usable port for nginx, 80 for example
|
||||
source ./set_env_vllm.sh
|
||||
```
|
||||
|
||||
#### For TGI inference type deployment
|
||||
|
||||
```bash
|
||||
export host_ip="External_Public_IP" # ip address of the node
|
||||
export GOOGLE_CSE_ID="your cse id"
|
||||
export GOOGLE_API_KEY="your google api key"
|
||||
export HUGGINGFACEHUB_API_TOKEN="Your_HuggingFace_API_Token"
|
||||
export http_proxy="Your_HTTP_Proxy" # http proxy if any
|
||||
export https_proxy="Your_HTTPs_Proxy" # https proxy if any
|
||||
export no_proxy=localhost,127.0.0.1,$host_ip # additional no proxies if needed
|
||||
export NGINX_PORT=${your_nginx_port} # your usable port for nginx, 80 for example
|
||||
//with TGI:
|
||||
source ./set_env.sh
|
||||
```
|
||||
|
||||
Consult the section on [SearchQnA Service configuration](#SearchQnA-configuration) for information on how service specific configuration parameters affect deployments.
|
||||
```
|
||||
//with VLLM:
|
||||
source ./set_env_vllm.sh
|
||||
```
|
||||
|
||||
The _setup_env.sh_ script will prompt for required and optional environment variables used to configure the SearchQnA services based on TGI. The _setup_env_vllm.sh_ script will prompt for required and optional environment variables used to configure the SearchQnA services based on VLLM. If a value is not entered, the script will use a default value for the same. It will also generate a _.env_ file defining the desired configuration. Consult the section on [SearchQnA Service configuration](#SearchQnA-service-configuration) for information on how service specific configuration parameters affect deployments.
|
||||
|
||||
### Deploy the Services Using Docker Compose
|
||||
|
||||
To deploy the SearchQnA services, execute the `docker compose up` command with the appropriate arguments. For a default deployment, execute:
|
||||
|
||||
#### For vLLM inference type deployment (default)
|
||||
```bash
|
||||
//with TGI:
|
||||
docker compose -f compose.yaml up -d
|
||||
```
|
||||
|
||||
```bash
|
||||
//with VLLM:
|
||||
docker compose -f compose_vllm.yaml up -d
|
||||
```
|
||||
|
||||
#### For TGI inference type deployment
|
||||
|
||||
```bash
|
||||
//with TGI:
|
||||
docker compose -f compose.yaml up -d
|
||||
```
|
||||
|
||||
**Note**: developers should build docker image from source when:
|
||||
|
||||
- Developing off the git main branch (as the container's ports in the repo may be different from the published docker image).
|
||||
@@ -120,40 +97,7 @@ docker ps -a
|
||||
|
||||
For the default deployment, the following containers should have started
|
||||
|
||||
#### For vLLM inference type deployment (default)
|
||||
|
||||
```
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
50e5f4a00fcc opea/searchqna-ui:latest "docker-entrypoint.s…" About a minute ago Up About a minute 0.0.0.0:18143->5173/tcp, [::]:18143->5173/tcp search-ui-server
|
||||
a8f030d17e40 opea/searchqna:latest "python searchqna.py" About a minute ago Up About a minute 0.0.0.0:18142->8888/tcp, [::]:18142->8888/tcp search-backend-server
|
||||
916c5db048a2 opea/llm-textgen:latest "bash entrypoint.sh" About a minute ago Up About a minute 0.0.0.0:3007->9000/tcp, [::]:3007->9000/tcp search-llm-server
|
||||
bb46cdaf1794 opea/reranking:latest "python opea_reranki…" About a minute ago Up About a minute 0.0.0.0:3005->8000/tcp, [::]:3005->8000/tcp search-reranking-server
|
||||
d89ab0ef3f41 opea/embedding:latest "sh -c 'python $( [ …" About a minute ago Up About a minute 0.0.0.0:3002->6000/tcp, [::]:3002->6000/tcp search-embedding-server
|
||||
b248e55dd20f opea/vllm-rocm:latest "python3 /workspace/…" About a minute ago Up About a minute 0.0.0.0:3080->8011/tcp, [::]:3080->8011/tcp search-vllm-service
|
||||
c3800753fac5 opea/web-retriever:latest "python opea_web_ret…" About a minute ago Up About a minute 0.0.0.0:3003->7077/tcp, [::]:3003->7077/tcp search-web-retriever-server
|
||||
0db8af486bd0 ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 "text-embeddings-rou…" About a minute ago Up About a minute 0.0.0.0:3001->80/tcp, [::]:3001->80/tcp search-tei-embedding-server
|
||||
3125915447ef ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 "text-embeddings-rou…" About a minute ago Up About a minute 0.0.0.0:3004->80/tcp, [::]:3004->80/tcp search-tei-reranking-server
|
||||
```
|
||||
|
||||
#### For TGI inference type deployment
|
||||
|
||||
```
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
67cc886949a3 opea/searchqna-ui:latest "docker-entrypoint.s…" About a minute ago Up About a minute 0.0.0.0:18143->5173/tcp, [::]:18143->5173/tcp search-ui-server
|
||||
6547aca0d5fd opea/searchqna:latest "python searchqna.py" About a minute ago Up About a minute 0.0.0.0:18142->8888/tcp, [::]:18142->8888/tcp search-backend-server
|
||||
213b5d4d5fa5 opea/embedding:latest "sh -c 'python $( [ …" About a minute ago Up About a minute 0.0.0.0:3002->6000/tcp, [::]:3002->6000/tcp search-embedding-server
|
||||
6b90d16100b2 opea/reranking:latest "python opea_reranki…" About a minute ago Up About a minute 0.0.0.0:3005->8000/tcp, [::]:3005->8000/tcp search-reranking-server
|
||||
3266fd85207e opea/llm-textgen:latest "bash entrypoint.sh" About a minute ago Up About a minute 0.0.0.0:3007->9000/tcp, [::]:3007->9000/tcp search-llm-server
|
||||
d7322b70c15d ghcr.io/huggingface/text-generation-inference:2.4.1-rocm "/tgi-entrypoint.sh …" About a minute ago Up About a minute 0.0.0.0:3006->80/tcp, [::]:3006->80/tcp search-tgi-service
|
||||
a703b91b28ed ghcr.io/huggingface/text-embeddings-inference:cpu-1.6 "text-embeddings-rou…" About a minute ago Up About a minute 0.0.0.0:3001->80/tcp, [::]:3001->80/tcp search-tei-embedding-server
|
||||
22098a5eaf59 ghcr.io/huggingface/text-embeddings-inference:cpu-1.6 "text-embeddings-rou…" About a minute ago Up About a minute 0.0.0.0:3004->80/tcp, [::]:3004->80/tcp search-tei-reranking-server
|
||||
830fe84c971d opea/web-retriever:latest "python opea_web_ret…" About a minute ago Up About a minute 0.0.0.0:3003->7077/tcp, [::]:3003->7077/tcp search-web-retriever-server
|
||||
|
||||
```
|
||||
|
||||
If any issues are encountered during deployment, refer to the [Troubleshooting](../../../../README_miscellaneous.md#troubleshooting) section.
|
||||
|
||||
### Validate the Pipeline
|
||||
### Test the Pipeline
|
||||
|
||||
Once the SearchQnA services are running, test the pipeline using the following command:
|
||||
|
||||
@@ -187,125 +131,31 @@ data: [DONE]
|
||||
|
||||
A response text similar to the one above indicates that the service verification was successful.
|
||||
|
||||
**Note** : Access the SearchQnA UI by web browser through this URL: `http://${host_ip}:80`. Please confirm the `80` port is opened in the firewall. To validate each microservice used in the pipeline refer to the [Validate Microservices](#validate-microservices) section.
|
||||
|
||||
### Cleanup the Deployment
|
||||
|
||||
To stop the containers associated with the deployment, execute the following command:
|
||||
|
||||
#### For vLLM inference type deployment (default)
|
||||
|
||||
```bash
|
||||
//with VLLM:
|
||||
docker compose -f compose_vllm.yaml down
|
||||
```
|
||||
|
||||
#### For TGI inference type deployment
|
||||
|
||||
```bash
|
||||
//with TGI:
|
||||
docker compose -f compose.yaml down
|
||||
```
|
||||
|
||||
```bash
|
||||
//with VLLM:
|
||||
docker compose -f compose_vllm.yaml down
|
||||
```
|
||||
|
||||
All the SearchQnA containers will be stopped and then removed on completion of the "down" command.
|
||||
|
||||
## SearchQnA Docker Compose Files
|
||||
|
||||
When deploying a SearchQnA pipeline on an AMD GPUs (ROCm), different large language model serving frameworks can be selected. The table below outlines the available configurations included in the application. These configurations can serve as templates and be extended to other components available in [GenAIComps](https://github.com/opea-project/GenAIComps.git).
|
||||
When deploying the SearchQnA pipeline on AMD GPUs (ROCm), different large language model serving frameworks can be selected. The table below outlines the available configurations included in the application.
|
||||
|
||||
| File | Description |
|
||||
| ---------------------------------------- | ------------------------------------------------------------------------------------------ |
|
||||
| [compose.yaml](./compose.yaml) | Default compose file using tgi as serving framework |
|
||||
| [compose_vllm.yaml](./compose_vllm.yaml) | The LLM serving framework is vLLM. All other configurations remain the same as the default |
|
||||
|
||||
## Validate Microservices
|
||||
|
||||
1. Embedding backend Service
|
||||
|
||||
```bash
|
||||
curl http://${host_ip}:3001/embed \
|
||||
-X POST \
|
||||
-d '{"inputs":"What is Deep Learning?"}' \
|
||||
-H 'Content-Type: application/json'
|
||||
```
|
||||
|
||||
2. Embedding Microservice
|
||||
|
||||
```bash
|
||||
curl http://${host_ip}:3002/v1/embeddings\
|
||||
-X POST \
|
||||
-d '{"text":"hello"}' \
|
||||
-H 'Content-Type: application/json'
|
||||
```
|
||||
|
||||
3. Web Retriever Microservice
|
||||
|
||||
```bash
|
||||
export your_embedding=$(python3 -c "import random; embedding = [random.uniform(-1, 1) for _ in range(768)]; print(embedding)")
|
||||
curl http://${host_ip}:3003/v1/web_retrieval \
|
||||
-X POST \
|
||||
-d "{\"text\":\"What is the 2024 holiday schedule?\",\"embedding\":${your_embedding}}" \
|
||||
-H 'Content-Type: application/json'
|
||||
```
|
||||
|
||||
4. Reranking backend Service
|
||||
|
||||
```bash
|
||||
# TEI Reranking service
|
||||
curl http://${host_ip}:3004/rerank \
|
||||
-X POST \
|
||||
-d '{"query":"What is Deep Learning?", "texts": ["Deep Learning is not...", "Deep learning is..."]}' \
|
||||
-H 'Content-Type: application/json'
|
||||
```
|
||||
|
||||
5. Reranking Microservice
|
||||
|
||||
```bash
|
||||
curl http://${host_ip}:3005/v1/reranking\
|
||||
-X POST \
|
||||
-d '{"initial_query":"What is Deep Learning?", "retrieved_docs": [{"text":"Deep Learning is not..."}, {"text":"Deep learning is..."}]}' \
|
||||
-H 'Content-Type: application/json'
|
||||
```
|
||||
|
||||
6. LLM backend Service
|
||||
|
||||
```bash
|
||||
# TGI service
|
||||
curl http://${host_ip}:3006/generate \
|
||||
-X POST \
|
||||
-d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}' \
|
||||
-H 'Content-Type: application/json'
|
||||
```
|
||||
|
||||
7. LLM Microservice
|
||||
|
||||
```bash
|
||||
curl http://${host_ip}:3007/v1/chat/completions\
|
||||
-X POST \
|
||||
-d '{"query":"What is Deep Learning?","max_tokens":17,"top_k":10,"top_p":0.95,"typical_p":0.95,"temperature":0.01,"repetition_penalty":1.03,"stream":true}' \
|
||||
-H 'Content-Type: application/json'
|
||||
```
|
||||
|
||||
8. MegaService
|
||||
|
||||
```bash
|
||||
curl http://${host_ip}:3008/v1/searchqna -H "Content-Type: application/json" -d '{
|
||||
"messages": "What is the latest news? Give me also the source link.",
|
||||
"stream": "true"
|
||||
}'
|
||||
```
|
||||
|
||||
9. Nginx Service
|
||||
|
||||
```bash
|
||||
curl http://${host_ip}:${NGINX_PORT}/v1/searchqna \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"messages": "What is the latest news? Give me also the source link.",
|
||||
"stream": "true"
|
||||
}'
|
||||
```
|
||||
|
||||
## Launch the UI
|
||||
|
||||
Access the UI at http://${EXTERNAL_HOST_IP}:${SEARCH_FRONTEND_SERVICE_PORT}. A page should open when navigating to this address.
|
||||
@@ -317,7 +167,3 @@ Let's enter the task for the service in the "Enter prompt here" field. For examp
|
||||
|
||||

|
||||
A correct result displayed on the page indicates that the UI service has been successfully verified.
|
||||
|
||||
## Conclusion
|
||||
|
||||
This guide should enable developers to deploy the default configuration or any of the other compose yaml files for different configurations. It also highlights the configurable parameters that can be set before deployment.
|
||||
|
||||
17
deploy.py
17
deploy.py
@@ -224,7 +224,6 @@ def generate_helm_values(example_type, deploy_config, chart_dir, action_type, no
|
||||
"modelUseHostPath": deploy_config.get("modelUseHostPath", ""),
|
||||
}
|
||||
}
|
||||
os.environ["HF_TOKEN"] = deploy_config.get("HUGGINGFACEHUB_API_TOKEN", "")
|
||||
|
||||
# Configure components
|
||||
values = configure_node_selectors(values, node_selector or {}, deploy_config)
|
||||
@@ -339,15 +338,17 @@ def get_hw_values_file(deploy_config, chart_dir):
|
||||
version = deploy_config.get("version", "1.1.0")
|
||||
|
||||
if os.path.isdir(chart_dir):
|
||||
hw_values_file = os.path.join(chart_dir, f"{device_type}-{llm_engine}-values.yaml")
|
||||
# Determine which values file to use based on version
|
||||
if version in ["1.0.0", "1.1.0"]:
|
||||
hw_values_file = os.path.join(chart_dir, f"{device_type}-values.yaml")
|
||||
else:
|
||||
hw_values_file = os.path.join(chart_dir, f"{device_type}-{llm_engine}-values.yaml")
|
||||
|
||||
if not os.path.exists(hw_values_file):
|
||||
print(f"Warning: {hw_values_file} not found")
|
||||
hw_values_file = os.path.join(chart_dir, f"{device_type}-values.yaml")
|
||||
if not os.path.exists(hw_values_file):
|
||||
print(f"Warning: {hw_values_file} not found")
|
||||
print(f"Error: Can not found a correct values file for {device_type} with {llm_engine}")
|
||||
sys.exit(1)
|
||||
print(f"Device-specific values file found: {hw_values_file}")
|
||||
hw_values_file = None
|
||||
else:
|
||||
print(f"Device-specific values file found: {hw_values_file}")
|
||||
else:
|
||||
print(f"Error: Could not find directory for {chart_dir}")
|
||||
hw_values_file = None
|
||||
|
||||
@@ -54,7 +54,7 @@ def construct_deploy_config(deploy_config, target_node, batch_param_value=None,
|
||||
|
||||
# First determine which llm replicaCount to use based on teirerank.enabled
|
||||
services = new_config.get("services", {})
|
||||
teirerank_enabled = services.get("teirerank", {}).get("enabled", False)
|
||||
teirerank_enabled = services.get("teirerank", {}).get("enabled", True)
|
||||
|
||||
# Process each service's configuration
|
||||
for service_name, service_config in services.items():
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
kubernetes
|
||||
locust
|
||||
numpy
|
||||
opea-eval>=1.3
|
||||
opea-eval>=1.2
|
||||
prometheus_client
|
||||
pytest
|
||||
pyyaml
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
VERSION_MAJOR 1
|
||||
VERSION_MINOR 3
|
||||
VERSION_PATCH 0
|
||||
Reference in New Issue
Block a user