Compare commits

...

4 Commits
ft ... v1.1rc

Author SHA1 Message Date
NeuralChatBot
bbb4e231d0 Freeze OPEA images tag
Signed-off-by: NeuralChatBot <grp_neural_chat_bot@intel.com>
2024-11-21 14:24:16 +00:00
bjzhjing
da10068964 Adjustments for helm release change (#1173)
Signed-off-by: Cathy Zhang <cathy.zhang@intel.com>
(cherry picked from commit ef2047b070)
2024-11-21 16:57:30 +08:00
Letong Han
188b568467 Fix Translation Manifest CI with MODEL_ID (#1169)
Signed-off-by: letonghan <letong.han@intel.com>
(cherry picked from commit 94231584aa)
2024-11-21 16:57:29 +08:00
minmin-intel
9e9af9766f Fix DocIndexRetriever CI error on Xeon (#1167)
Signed-off-by: minmin-intel <minmin.hou@intel.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
(cherry picked from commit c5177c5e2f)
2024-11-21 16:57:28 +08:00
8 changed files with 51 additions and 70 deletions

View File

@@ -69,10 +69,6 @@ Results will be displayed in the terminal and saved as CSV file named `1_stats.c
- Persistent Volume Claim (PVC): This is the recommended approach for production setups. For more details on using PVC, refer to [PVC](https://github.com/opea-project/GenAIInfra/blob/main/helm-charts/README.md#using-persistent-volume).
- Local Host Path: For simpler testing, ensure that each node involved in the deployment follows the steps above to locally prepare the models. After preparing the models, use `--set global.modelUseHostPath=${MODELDIR}` in the deployment command.
- Add OPEA Helm Repository:
```bash
python deploy.py --add-repo
```
- Label Nodes
```base
python deploy.py --add-label --num-nodes 2
@@ -192,13 +188,9 @@ All the test results will come to the folder `GenAIEval/evals/benchmark/benchmar
## Teardown
After completing the benchmark, use the following commands to clean up the environment:
After completing the benchmark, use the following command to clean up the environment:
Remove Node Labels:
```base
```bash
python deploy.py --delete-label
```
Delete the OPEA Helm Repository:
```bash
python deploy.py --delete-repo
```

View File

@@ -83,26 +83,6 @@ def clear_labels_from_nodes(label, node_names=None):
print(f"Label {label_key} not found on node {node_name}, skipping.")
def add_helm_repo(repo_name, repo_url):
# Add the repo if it does not exist
add_command = ["helm", "repo", "add", repo_name, repo_url]
try:
subprocess.run(add_command, check=True)
print(f"Added Helm repo {repo_name} from {repo_url}.")
except subprocess.CalledProcessError as e:
print(f"Failed to add Helm repo {repo_name}: {e}")
def delete_helm_repo(repo_name):
"""Delete Helm repo if it exists."""
command = ["helm", "repo", "remove", repo_name]
try:
subprocess.run(command, check=True)
print(f"Deleted Helm repo {repo_name}.")
except subprocess.CalledProcessError:
print(f"Failed to delete Helm repo {repo_name}. It may not exist.")
def install_helm_release(release_name, chart_name, namespace, values_file, device_type):
"""Deploy a Helm release with a specified name and chart.
@@ -132,14 +112,14 @@ def install_helm_release(release_name, chart_name, namespace, values_file, devic
if device_type == "gaudi":
print("Device type is gaudi. Pulling Helm chart to get gaudi-values.yaml...")
# Combine chart_name with fixed prefix
chart_pull_url = f"oci://ghcr.io/opea-project/charts/{chart_name}"
# Pull and untar the chart
subprocess.run(["helm", "pull", chart_name, "--untar"], check=True)
subprocess.run(["helm", "pull", chart_pull_url, "--untar"], check=True)
# Determine the directory name (get the actual chart_name if chart_name is in the format 'repo_name/chart_name', else use chart_name directly)
chart_dir_name = chart_name.split("/")[-1] if "/" in chart_name else chart_name
# Find the untarred directory (assumes only one directory matches chart_dir_name)
untar_dirs = glob.glob(f"{chart_dir_name}*")
# Find the untarred directory
untar_dirs = glob.glob(f"{chart_name}*")
if untar_dirs:
untar_dir = untar_dirs[0]
hw_values_file = os.path.join(untar_dir, "gaudi-values.yaml")
@@ -210,20 +190,14 @@ def main():
parser.add_argument(
"--chart-name",
type=str,
default="opea/chatqna",
help="The chart name to deploy, composed of repo name and chart name (default: opea/chatqna).",
default="chatqna",
help="The chart name to deploy, composed of repo name and chart name (default: chatqna).",
)
parser.add_argument("--namespace", default="default", help="Kubernetes namespace (default: default).")
parser.add_argument("--hf-token", help="Hugging Face API token.")
parser.add_argument(
"--model-dir", help="Model directory, mounted as volumes for service access to pre-downloaded models"
)
parser.add_argument("--repo-name", default="opea", help="Helm repo name to add/delete (default: opea).")
parser.add_argument(
"--repo-url",
default="https://opea-project.github.io/GenAIInfra",
help="Helm repository URL (default: https://opea-project.github.io/GenAIInfra).",
)
parser.add_argument("--user-values", help="Path to a user-specified values.yaml file.")
parser.add_argument(
"--create-values-only", action="store_true", help="Only create the values.yaml file without deploying."
@@ -244,8 +218,6 @@ def main():
action="store_true",
help="Modify resources for services and change extraCmdArgs when creating values.yaml.",
)
parser.add_argument("--add-repo", action="store_true", help="Add the Helm repo specified by --repo-url.")
parser.add_argument("--delete-repo", action="store_true", help="Delete the Helm repo specified by --repo-name.")
parser.add_argument(
"--device-type",
type=str,
@@ -264,14 +236,6 @@ def main():
else:
args.num_nodes = num_node_names
# Helm repository management
if args.add_repo:
add_helm_repo(args.repo_name, args.repo_url)
return
elif args.delete_repo:
delete_helm_repo(args.repo_name)
return
# Node labeling management
if args.add_label:
add_labels_to_nodes(args.num_nodes, args.label, args.node_names)

View File

@@ -6,7 +6,7 @@ import argparse
import requests
def search_knowledge_base(query: str, url: str, request_type="chat_completion") -> str:
def search_knowledge_base(query: str, url: str, request_type: str) -> str:
"""Search the knowledge base for a specific query."""
print(url)
proxies = {"http": ""}
@@ -18,12 +18,13 @@ def search_knowledge_base(query: str, url: str, request_type="chat_completion")
"top_n": 2,
}
else:
print("Sending text request")
print("Sending textdoc request")
payload = {
"text": query,
}
response = requests.post(url, json=payload, proxies=proxies)
print(response)
print(response.json().keys())
if "documents" in response.json():
docs = response.json()["documents"]
context = ""
@@ -32,7 +33,6 @@ def search_knowledge_base(query: str, url: str, request_type="chat_completion")
context = str(i) + ": " + doc
else:
context += "\n" + str(i) + ": " + doc
# print(context)
return context
elif "text" in response.json():
return response.json()["text"]
@@ -44,7 +44,6 @@ def search_knowledge_base(query: str, url: str, request_type="chat_completion")
context = doc["text"]
else:
context += "\n" + doc["text"]
# print(context)
return context
else:
return "Error parsing response from the knowledge base."

View File

@@ -15,6 +15,7 @@ LOG_PATH="$WORKPATH/tests"
ip_address=$(hostname -I | awk '{print $1}')
function build_docker_images() {
echo "Building Docker Images...."
cd $WORKPATH/docker_image_build
if [ ! -d "GenAIComps" ] ; then
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
@@ -26,9 +27,11 @@ function build_docker_images() {
docker pull redis/redis-stack:7.2.0-v9
docker pull ghcr.io/huggingface/tei-gaudi:1.5.0
docker images && sleep 1s
echo "Docker images built!"
}
function start_services() {
echo "Starting Docker Services...."
cd $WORKPATH/docker_compose/intel/hpu/gaudi
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
@@ -47,6 +50,7 @@ function start_services() {
# Start Docker Containers
docker compose up -d
sleep 20
echo "Docker services started!"
}
function validate() {

View File

@@ -15,8 +15,10 @@ LOG_PATH="$WORKPATH/tests"
ip_address=$(hostname -I | awk '{print $1}')
function build_docker_images() {
echo "Building Docker Images...."
cd $WORKPATH/docker_image_build
if [ ! -d "GenAIComps" ] ; then
echo "Cloning GenAIComps repository"
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
fi
service_list="dataprep-redis embedding-tei retriever-redis reranking-tei doc-index-retriever"
@@ -25,9 +27,12 @@ function build_docker_images() {
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
docker pull redis/redis-stack:7.2.0-v9
docker images && sleep 1s
echo "Docker images built!"
}
function start_services() {
echo "Starting Docker Services...."
cd $WORKPATH/docker_compose/intel/cpu/xeon
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
export RERANK_MODEL_ID="BAAI/bge-reranker-base"
@@ -45,7 +50,8 @@ function start_services() {
# Start Docker Containers
docker compose up -d
sleep 20
sleep 5m
echo "Docker services started!"
}
function validate() {
@@ -66,7 +72,7 @@ function validate_megaservice() {
echo "===========Ingest data=================="
local CONTENT=$(http_proxy="" curl -X POST "http://${ip_address}:6007/v1/dataprep" \
-H "Content-Type: multipart/form-data" \
-F 'link_list=["https://opea.dev"]')
-F 'link_list=["https://opea.dev/"]')
local EXIT_CODE=$(validate "$CONTENT" "Data preparation succeeded" "dataprep-redis-service-xeon")
echo "$EXIT_CODE"
local EXIT_CODE="${EXIT_CODE:0-1}"
@@ -77,19 +83,26 @@ function validate_megaservice() {
fi
# Curl the Mega Service
echo "================Testing retriever service: Default params================"
local CONTENT=$(curl http://${ip_address}:8889/v1/retrievaltool -X POST -H "Content-Type: application/json" -d '{
"messages": "Explain the OPEA project?"
echo "================Testing retriever service: Text Request ================"
cd $WORKPATH/tests
local CONTENT=$(http_proxy="" curl http://${ip_address}:8889/v1/retrievaltool -X POST -H "Content-Type: application/json" -d '{
"text": "Explain the OPEA project?"
}')
# local CONTENT=$(python test.py --host_ip ${ip_address} --request_type text)
local EXIT_CODE=$(validate "$CONTENT" "OPEA" "doc-index-retriever-service-xeon")
echo "$EXIT_CODE"
local EXIT_CODE="${EXIT_CODE:0-1}"
echo "return value is $EXIT_CODE"
if [ "$EXIT_CODE" == "1" ]; then
docker logs tei-embedding-server | tee -a ${LOG_PATH}/doc-index-retriever-service-xeon.log
echo "=============Embedding container log=================="
docker logs embedding-tei-server | tee -a ${LOG_PATH}/doc-index-retriever-service-xeon.log
echo "=============Retriever container log=================="
docker logs retriever-redis-server | tee -a ${LOG_PATH}/doc-index-retriever-service-xeon.log
docker logs reranking-tei-server | tee -a ${LOG_PATH}/doc-index-retriever-service-xeon.log
echo "=============TEI Reranking log=================="
docker logs tei-reranking-server | tee -a ${LOG_PATH}/doc-index-retriever-service-xeon.log
echo "=============Reranking container log=================="
docker logs reranking-tei-xeon-server | tee -a ${LOG_PATH}/doc-index-retriever-service-xeon.log
echo "=============Doc-index-retriever container log=================="
docker logs doc-index-retriever-server | tee -a ${LOG_PATH}/doc-index-retriever-service-xeon.log
exit 1
fi
@@ -102,9 +115,15 @@ function validate_megaservice() {
local EXIT_CODE="${EXIT_CODE:0-1}"
echo "return value is $EXIT_CODE"
if [ "$EXIT_CODE" == "1" ]; then
docker logs tei-embedding-server | tee -a ${LOG_PATH}/doc-index-retriever-service-xeon.log
echo "=============Embedding container log=================="
docker logs embedding-tei-server | tee -a ${LOG_PATH}/doc-index-retriever-service-xeon.log
echo "=============Retriever container log=================="
docker logs retriever-redis-server | tee -a ${LOG_PATH}/doc-index-retriever-service-xeon.log
docker logs reranking-tei-server | tee -a ${LOG_PATH}/doc-index-retriever-service-xeon.log
echo "=============TEI Reranking log=================="
docker logs tei-reranking-server | tee -a ${LOG_PATH}/doc-index-retriever-service-xeon.log
echo "=============Reranking container log=================="
docker logs reranking-tei-xeon-server | tee -a ${LOG_PATH}/doc-index-retriever-service-xeon.log
echo "=============Doc-index-retriever container log=================="
docker logs doc-index-retriever-server | tee -a ${LOG_PATH}/doc-index-retriever-service-xeon.log
exit 1
fi

View File

@@ -10,7 +10,7 @@ metadata:
app.kubernetes.io/instance: translation
app.kubernetes.io/version: "2.1.0"
data:
LLM_MODEL_ID: "haoranxu/ALMA-13B"
MODEL_ID: "haoranxu/ALMA-13B"
PORT: "2080"
HF_TOKEN: "insert-your-huggingface-token-here"
http_proxy: ""

View File

@@ -10,7 +10,7 @@ metadata:
app.kubernetes.io/instance: translation
app.kubernetes.io/version: "2.1.0"
data:
LLM_MODEL_ID: "haoranxu/ALMA-13B"
MODEL_ID: "haoranxu/ALMA-13B"
PORT: "2080"
HF_TOKEN: "insert-your-huggingface-token-here"
http_proxy: ""

3
version.txt Normal file
View File

@@ -0,0 +1,3 @@
VERSION_MAJOR 1
VERSION_MINOR 1
VERSION_PATCH 0