Compare commits
2 Commits
main
...
reorg_helm
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
91940b8058 | ||
|
|
7d779513f5 |
23
DocSum/kubernetes/helm-charts/.helmignore
Normal file
23
DocSum/kubernetes/helm-charts/.helmignore
Normal file
@@ -0,0 +1,23 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
20
DocSum/kubernetes/helm-charts/Chart.yaml
Normal file
20
DocSum/kubernetes/helm-charts/Chart.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
apiVersion: v2
|
||||
name: docsum
|
||||
description: The Helm chart to deploy DocSum
|
||||
type: application
|
||||
dependencies:
|
||||
- name: tgi
|
||||
version: 1.0.0
|
||||
repository: "file://../common/tgi"
|
||||
- name: llm-uservice
|
||||
version: 1.0.0
|
||||
repository: "file://../common/llm-uservice"
|
||||
- name: ui
|
||||
version: 1.0.0
|
||||
repository: "file://../common/ui"
|
||||
alias: docsum-ui
|
||||
version: 1.0.0
|
||||
appVersion: "v1.0"
|
||||
59
DocSum/kubernetes/helm-charts/README.md
Normal file
59
DocSum/kubernetes/helm-charts/README.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# DocSum
|
||||
|
||||
Helm chart for deploying DocSum service.
|
||||
|
||||
DocSum depends on LLM microservice, refer to llm-uservice for more config details.
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart, run the following:
|
||||
|
||||
```console
|
||||
cd GenAIInfra/helm-charts/
|
||||
./update_dependency.sh
|
||||
helm dependency update docsum
|
||||
export HFTOKEN="insert-your-huggingface-token-here"
|
||||
export MODELDIR="/mnt/opea-models"
|
||||
export MODELNAME="Intel/neural-chat-7b-v3-3"
|
||||
helm install docsum docsum --set global.HUGGINGFACEHUB_API_TOKEN=${HFTOKEN} --set global.modelUseHostPath=${MODELDIR} --set tgi.LLM_MODEL_ID=${MODELNAME}
|
||||
# To use Gaudi device
|
||||
# helm install docsum docsum --set global.HUGGINGFACEHUB_API_TOKEN=${HFTOKEN} --values docsum/gaudi-values.yaml
|
||||
```
|
||||
|
||||
## Verify
|
||||
|
||||
To verify the installation, run the command `kubectl get pod` to make sure all pods are running.
|
||||
|
||||
Curl command and UI are the two options that can be leveraged to verify the result.
|
||||
|
||||
### Verify the workload through curl command
|
||||
|
||||
Then run the command `kubectl port-forward svc/docsum 8888:8888` to expose the DocSum service for access.
|
||||
|
||||
Open another terminal and run the following command to verify the service if working:
|
||||
|
||||
```console
|
||||
curl http://localhost:8888/v1/docsum \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"messages": "Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}'
|
||||
```
|
||||
|
||||
### Verify the workload through UI
|
||||
|
||||
The UI has already been installed via the Helm chart. To access it, use the external IP of one your Kubernetes node along with the NGINX port. You can find the NGINX port using the following command:
|
||||
|
||||
```bash
|
||||
export port=$(kubectl get service docsum-nginx --output='jsonpath={.spec.ports[0].nodePort}')
|
||||
echo $port
|
||||
```
|
||||
|
||||
Open a browser to access `http://<k8s-node-ip-address>:${port}` to play with the ChatQnA workload.
|
||||
|
||||
## Values
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
| ----------------- | ------ | ----------------------------- | -------------------------------------------------------------------------------------- |
|
||||
| image.repository | string | `"opea/docsum"` | |
|
||||
| service.port | string | `"8888"` | |
|
||||
| tgi.LLM_MODEL_ID | string | `"Intel/neural-chat-7b-v3-3"` | Models id from https://huggingface.co/, or predownloaded model directory |
|
||||
| global.monitoring | bool | `false` | Enable usage metrics for the service components. See ../monitoring.md before enabling! |
|
||||
1
DocSum/kubernetes/helm-charts/ci-gaudi-values.yaml
Normal file
1
DocSum/kubernetes/helm-charts/ci-gaudi-values.yaml
Normal file
@@ -0,0 +1 @@
|
||||
gaudi-values.yaml
|
||||
1
DocSum/kubernetes/helm-charts/ci-values.yaml
Normal file
1
DocSum/kubernetes/helm-charts/ci-values.yaml
Normal file
@@ -0,0 +1 @@
|
||||
values.yaml
|
||||
27
DocSum/kubernetes/helm-charts/gaudi-values.yaml
Normal file
27
DocSum/kubernetes/helm-charts/gaudi-values.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
tgi:
|
||||
accelDevice: "gaudi"
|
||||
image:
|
||||
repository: ghcr.io/huggingface/tgi-gaudi
|
||||
tag: "2.0.5"
|
||||
resources:
|
||||
limits:
|
||||
habana.ai/gaudi: 1
|
||||
MAX_INPUT_LENGTH: "1024"
|
||||
MAX_TOTAL_TOKENS: "2048"
|
||||
CUDA_GRAPHS: ""
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 1
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 1
|
||||
startupProbe:
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 1
|
||||
failureThreshold: 120
|
||||
62
DocSum/kubernetes/helm-charts/templates/_helpers.tpl
Normal file
62
DocSum/kubernetes/helm-charts/templates/_helpers.tpl
Normal file
@@ -0,0 +1,62 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "docsum.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "docsum.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "docsum.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "docsum.labels" -}}
|
||||
helm.sh/chart: {{ include "docsum.chart" . }}
|
||||
{{ include "docsum.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "docsum.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "docsum.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "docsum.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "docsum.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
81
DocSum/kubernetes/helm-charts/templates/deployment.yaml
Normal file
81
DocSum/kubernetes/helm-charts/templates/deployment.yaml
Normal file
@@ -0,0 +1,81 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "docsum.fullname" . }}
|
||||
labels:
|
||||
{{- include "docsum.labels" . | nindent 4 }}
|
||||
app: {{ include "docsum.fullname" . }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "docsum.selectorLabels" . | nindent 6 }}
|
||||
app: {{ include "docsum.fullname" . }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "docsum.selectorLabels" . | nindent 8 }}
|
||||
app: {{ include "docsum.fullname" . }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- name: {{ .Release.Name }}
|
||||
env:
|
||||
- name: LLM_SERVICE_HOST_IP
|
||||
{{- if .Values.LLM_SERVICE_HOST_IP }}
|
||||
value: {{ .Values.LLM_SERVICE_HOST_IP | quote}}
|
||||
{{- else }}
|
||||
value: {{ .Release.Name }}-llm-uservice
|
||||
{{- end }}
|
||||
#- name: MEGA_SERVICE_PORT
|
||||
# value: {{ .Values.port }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp
|
||||
ports:
|
||||
- name: docsum
|
||||
containerPort: {{ .Values.port }}
|
||||
protocol: TCP
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
volumes:
|
||||
- name: tmp
|
||||
emptyDir: {}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.evenly_distributed }}
|
||||
topologySpreadConstraints:
|
||||
- maxSkew: 1
|
||||
topologyKey: kubernetes.io/hostname
|
||||
whenUnsatisfiable: ScheduleAnyway
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
{{- include "docsum.selectorLabels" . | nindent 14 }}
|
||||
app: {{ include "docsum.fullname" . }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,93 @@
|
||||
apiVersion: v1
|
||||
data:
|
||||
default.conf: |+
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
listen [::]:80;
|
||||
|
||||
location /home {
|
||||
alias /usr/share/nginx/html/index.html;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://{{ include "ui.fullname" (index .Subcharts "docsum-ui") }}:{{ index .Values "docsum-ui" "service" "port" }};
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /v1/docsum {
|
||||
proxy_pass http://{{ include "docsum.fullname" . }}:{{ .Values.service.port }};
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
}
|
||||
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "docsum.fullname" . }}-nginx-config
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "docsum.fullname" . }}-nginx
|
||||
labels:
|
||||
{{- include "docsum.labels" . | nindent 4 }}
|
||||
app: {{ include "docsum.fullname" . }}-nginx
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "docsum.selectorLabels" . | nindent 6 }}
|
||||
app: {{ include "docsum.fullname" . }}-nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "docsum.selectorLabels" . | nindent 8 }}
|
||||
app: {{ include "docsum.fullname" . }}-nginx
|
||||
spec:
|
||||
containers:
|
||||
- image: nginx:1.27.1
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
name: nginx
|
||||
volumeMounts:
|
||||
- mountPath: /etc/nginx/conf.d
|
||||
name: nginx-config-volume
|
||||
securityContext: {}
|
||||
volumes:
|
||||
- configMap:
|
||||
defaultMode: 420
|
||||
name: {{ include "docsum.fullname" . }}-nginx-config
|
||||
name: nginx-config-volume
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "docsum.fullname" . }}-nginx
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
{{- include "docsum.selectorLabels" . | nindent 4 }}
|
||||
app: {{ include "docsum.fullname" . }}-nginx
|
||||
type: {{ .Values.nginx.service.type }}
|
||||
19
DocSum/kubernetes/helm-charts/templates/service.yaml
Normal file
19
DocSum/kubernetes/helm-charts/templates/service.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "docsum.fullname" . }}
|
||||
labels:
|
||||
{{- include "docsum.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: {{ .Values.port }}
|
||||
protocol: TCP
|
||||
name: docsum
|
||||
selector:
|
||||
{{- include "docsum.selectorLabels" . | nindent 4 }}
|
||||
app: {{ include "docsum.fullname" . }}
|
||||
18
DocSum/kubernetes/helm-charts/templates/servicemonitor.yaml
Normal file
18
DocSum/kubernetes/helm-charts/templates/servicemonitor.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
{{- if .Values.global.monitoring }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ include "docsum.fullname" . }}
|
||||
labels:
|
||||
release: {{ .Values.global.prometheusRelease }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "docsum.selectorLabels" . | nindent 6 }}
|
||||
endpoints:
|
||||
- port: docsum
|
||||
interval: 5s
|
||||
{{- end }}
|
||||
30
DocSum/kubernetes/helm-charts/templates/tests/test-pod.yaml
Normal file
30
DocSum/kubernetes/helm-charts/templates/tests/test-pod.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: "{{ include "docsum.fullname" . }}-testpod"
|
||||
labels:
|
||||
{{- include "docsum.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": test
|
||||
#"helm.sh/hook-delete-policy": "hook-succeeded, hook-failure"
|
||||
spec:
|
||||
containers:
|
||||
- name: curl
|
||||
image: python:3.10.14
|
||||
command: ['bash', '-c']
|
||||
args:
|
||||
- |
|
||||
max_retry=20;
|
||||
for ((i=1; i<=max_retry; i++)); do
|
||||
curl http://{{ include "docsum.fullname" . }}:{{ .Values.service.port }}/v1/docsum -sS --fail-with-body \
|
||||
-H 'Content-Type: multipart/form-data' \
|
||||
-F "messages=Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5." \
|
||||
-F "max_tokens=32" && break;
|
||||
curlcode=$?
|
||||
if [[ $curlcode -eq 7 ]]; then sleep 10; else echo "curl failed with code $curlcode"; exit 1; fi;
|
||||
done;
|
||||
if [ $i -gt $max_retry ]; then echo "test failed with maximum retry"; exit 1; fi
|
||||
restartPolicy: Never
|
||||
73
DocSum/kubernetes/helm-charts/values.yaml
Normal file
73
DocSum/kubernetes/helm-charts/values.yaml
Normal file
@@ -0,0 +1,73 @@
|
||||
# Copyright (C) 2024 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Default values for docsum.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
LLM_SERVICE_HOST_IP: ""
|
||||
|
||||
image:
|
||||
repository: opea/docsum
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: "latest"
|
||||
|
||||
port: 8888
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 8888
|
||||
|
||||
nginx:
|
||||
service:
|
||||
type: NodePort
|
||||
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
allowPrivilegeEscalation: false
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
# To override values in subchart llm-uservice
|
||||
llm-uservice:
|
||||
image:
|
||||
repository: opea/llm-docsum-tgi
|
||||
|
||||
# To override values in subchart tgi
|
||||
tgi:
|
||||
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
|
||||
|
||||
docsum-ui:
|
||||
image:
|
||||
repository: opea/docsum-ui
|
||||
tag: "latest"
|
||||
BACKEND_SERVICE_ENDPOINT: "/v1/docsum"
|
||||
containerPort: 5173
|
||||
|
||||
global:
|
||||
http_proxy: ""
|
||||
https_proxy: ""
|
||||
no_proxy: ""
|
||||
HUGGINGFACEHUB_API_TOKEN: "insert-your-huggingface-token-here"
|
||||
# set modelUseHostPath or modelUsePVC to use model cache.
|
||||
modelUseHostPath: ""
|
||||
# modelUseHostPath: /mnt/opea-models
|
||||
# modelUsePVC: model-volume
|
||||
|
||||
# Install Prometheus serviceMonitors for service components
|
||||
monitoring: false
|
||||
|
||||
# Prometheus Helm install release name needed for serviceMonitors
|
||||
prometheusRelease: prometheus-stack
|
||||
Reference in New Issue
Block a user