Use official tei gaudi image and update tgi gaudi version (#810)

Signed-off-by: lvliang-intel <liang1.lv@intel.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
lvliang-intel
2024-09-23 17:52:56 +08:00
committed by GitHub
parent c35fe0b429
commit 3fb60608b3
72 changed files with 8024 additions and 154 deletions

View File

@@ -64,10 +64,6 @@ jobs:
run: | run: |
cd ${{ github.workspace }}/${{ inputs.example }}/docker_image_build cd ${{ github.workspace }}/${{ inputs.example }}/docker_image_build
docker_compose_path=${{ github.workspace }}/${{ inputs.example }}/docker_image_build/build.yaml docker_compose_path=${{ github.workspace }}/${{ inputs.example }}/docker_image_build/build.yaml
if [[ $(grep -c "tei-gaudi:" ${docker_compose_path}) != 0 ]]; then
git clone https://github.com/huggingface/tei-gaudi.git
cd tei-gaudi && git rev-parse HEAD && cd ../
fi
if [[ $(grep -c "vllm:" ${docker_compose_path}) != 0 ]]; then if [[ $(grep -c "vllm:" ${docker_compose_path}) != 0 ]]; then
git clone https://github.com/vllm-project/vllm.git git clone https://github.com/vllm-project/vllm.git
cd vllm && git rev-parse HEAD && cd ../ cd vllm && git rev-parse HEAD && cd ../

View File

@@ -3,7 +3,7 @@
services: services:
tgi-server: tgi-server:
image: ghcr.io/huggingface/tgi-gaudi:2.0.4 image: ghcr.io/huggingface/tgi-gaudi:2.0.5
container_name: tgi-server container_name: tgi-server
ports: ports:
- "8085:80" - "8085:80"
@@ -13,12 +13,16 @@ services:
no_proxy: ${no_proxy} no_proxy: ${no_proxy}
http_proxy: ${http_proxy} http_proxy: ${http_proxy}
https_proxy: ${https_proxy} https_proxy: ${https_proxy}
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
HF_HUB_DISABLE_PROGRESS_BARS: 1 HF_HUB_DISABLE_PROGRESS_BARS: 1
HF_HUB_ENABLE_HF_TRANSFER: 0 HF_HUB_ENABLE_HF_TRANSFER: 0
HABANA_VISIBLE_DEVICES: all HABANA_VISIBLE_DEVICES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none OMPI_MCA_btl_vader_single_copy_mechanism: none
PT_HPU_ENABLE_LAZY_COLLECTIVES: true PT_HPU_ENABLE_LAZY_COLLECTIVES: true
ENABLE_HPU_GRAPH: true
LIMIT_HPU_GRAPH: true
USE_FLASH_ATTENTION: true
FLASH_ATTENTION_RECOMPUTE: true
runtime: habana runtime: habana
cap_add: cap_add:
- SYS_NICE - SYS_NICE

View File

@@ -51,7 +51,7 @@ services:
environment: environment:
TTS_ENDPOINT: ${TTS_ENDPOINT} TTS_ENDPOINT: ${TTS_ENDPOINT}
tgi-service: tgi-service:
image: ghcr.io/huggingface/tgi-gaudi:2.0.1 image: ghcr.io/huggingface/tgi-gaudi:2.0.5
container_name: tgi-gaudi-server container_name: tgi-gaudi-server
ports: ports:
- "3006:80" - "3006:80"
@@ -61,11 +61,15 @@ services:
no_proxy: ${no_proxy} no_proxy: ${no_proxy}
http_proxy: ${http_proxy} http_proxy: ${http_proxy}
https_proxy: ${https_proxy} https_proxy: ${https_proxy}
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
HF_HUB_DISABLE_PROGRESS_BARS: 1 HF_HUB_DISABLE_PROGRESS_BARS: 1
HF_HUB_ENABLE_HF_TRANSFER: 0 HF_HUB_ENABLE_HF_TRANSFER: 0
HABANA_VISIBLE_DEVICES: all HABANA_VISIBLE_DEVICES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none OMPI_MCA_btl_vader_single_copy_mechanism: none
ENABLE_HPU_GRAPH: true
LIMIT_HPU_GRAPH: true
USE_FLASH_ATTENTION: true
FLASH_ATTENTION_RECOMPUTE: true
runtime: habana runtime: habana
cap_add: cap_add:
- SYS_NICE - SYS_NICE

View File

@@ -25,7 +25,7 @@ The AudioQnA uses the below prebuilt images if you choose a Xeon deployment
Should you desire to use the Gaudi accelerator, two alternate images are used for the embedding and llm services. Should you desire to use the Gaudi accelerator, two alternate images are used for the embedding and llm services.
For Gaudi: For Gaudi:
- tgi-service: ghcr.io/huggingface/tgi-gaudi:1.2.1 - tgi-service: ghcr.io/huggingface/tgi-gaudi:2.0.5
- whisper-gaudi: opea/whisper-gaudi:latest - whisper-gaudi: opea/whisper-gaudi:latest
- speecht5-gaudi: opea/speecht5-gaudi:latest - speecht5-gaudi: opea/speecht5-gaudi:latest

View File

@@ -271,7 +271,7 @@ spec:
- envFrom: - envFrom:
- configMapRef: - configMapRef:
name: audio-qna-config name: audio-qna-config
image: ghcr.io/huggingface/tgi-gaudi:2.0.1 image: ghcr.io/huggingface/tgi-gaudi:2.0.5
name: llm-dependency-deploy-demo name: llm-dependency-deploy-demo
securityContext: securityContext:
capabilities: capabilities:
@@ -303,6 +303,14 @@ spec:
value: none value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES - name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true' value: 'true'
- name: ENABLE_HPU_GRAPH
value: 'true'
- name: LIMIT_HPU_GRAPH
value: 'true'
- name: USE_FLASH_ATTENTION
value: 'true'
- name: FLASH_ATTENTION_RECOMPUTE
value: 'true'
- name: runtime - name: runtime
value: habana value: habana
- name: HABANA_VISIBLE_DEVICES - name: HABANA_VISIBLE_DEVICES
@@ -315,7 +323,7 @@ spec:
volumes: volumes:
- name: model-volume - name: model-volume
hostPath: hostPath:
path: /home/sdp/cesg path: /mnt/models
type: Directory type: Directory
- name: shm - name: shm
emptyDir: emptyDir:

View File

@@ -22,7 +22,7 @@ function build_docker_images() {
service_list="audioqna whisper-gaudi asr llm-tgi speecht5-gaudi tts" service_list="audioqna whisper-gaudi asr llm-tgi speecht5-gaudi tts"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1 docker pull ghcr.io/huggingface/tgi-gaudi:2.0.5
docker images && sleep 1s docker images && sleep 1s
} }

View File

@@ -22,7 +22,7 @@ function build_docker_images() {
service_list="audioqna whisper asr llm-tgi speecht5 tts" service_list="audioqna whisper asr llm-tgi speecht5 tts"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1 docker pull ghcr.io/huggingface/tgi-gaudi:2.0.5
docker images && sleep 1s docker images && sleep 1s
} }

View File

@@ -0,0 +1,653 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: v1
data:
EMBEDDING_MODEL_ID: BAAI/bge-base-en-v1.5
EMBEDDING_SERVICE_HOST_IP: embedding-svc
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
INDEX_NAME: rag-redis
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
LLM_SERVICE_HOST_IP: llm-svc
NODE_SELECTOR: chatqna-opea
REDIS_URL: redis://vector-db.default.svc.cluster.local:6379
RERANK_MODEL_ID: BAAI/bge-reranker-base
RERANK_SERVICE_HOST_IP: reranking-svc
RETRIEVER_SERVICE_HOST_IP: retriever-svc
TEI_EMBEDDING_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_RERANKING_ENDPOINT: http://reranking-dependency-svc.default.svc.cluster.local:8808
TGI_LLM_ENDPOINT: http://llm-dependency-svc.default.svc.cluster.local:9009
kind: ConfigMap
metadata:
name: qna-config
namespace: default
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: chatqna-backend-server-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: chatqna-backend-server-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: chatqna-backend-server-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/chatqna:latest
imagePullPolicy: IfNotPresent
name: chatqna-backend-server-deploy
ports:
- containerPort: 8888
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: chatqna-backend-server-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: chatqna-backend-server-svc
namespace: default
spec:
ports:
- name: service
nodePort: 30888
port: 8888
targetPort: 8888
selector:
app: chatqna-backend-server-deploy
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dataprep-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: dataprep-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: dataprep-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/dataprep-redis:latest
imagePullPolicy: IfNotPresent
name: dataprep-deploy
ports:
- containerPort: 6007
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: dataprep-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: dataprep-svc
namespace: default
spec:
ports:
- name: port1
port: 6007
targetPort: 6007
selector:
app: dataprep-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-dependency-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: embedding-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(EMBEDDING_MODEL_ID)
- --auto-truncate
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
imagePullPolicy: IfNotPresent
name: embedding-dependency-deploy
ports:
- containerPort: 80
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: embedding-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: embedding-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 6006
targetPort: 80
selector:
app: embedding-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: embedding-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/embedding-tei:latest
imagePullPolicy: IfNotPresent
name: embedding-deploy
ports:
- containerPort: 6000
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: embedding-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: embedding-svc
namespace: default
spec:
ports:
- name: service
port: 6000
targetPort: 6000
selector:
app: embedding-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-dependency-deploy
namespace: default
spec:
replicas: 31
selector:
matchLabels:
app: llm-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(LLM_MODEL_ID)
- --max-input-length
- '2048'
- --max-total-tokens
- '4096'
- --max-batch-total-tokens
- '65536'
- --max-batch-prefill-tokens
- '4096'
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HUGGING_FACE_HUB_TOKEN
value: ${HF_TOKEN}
- name: ENABLE_HPU_GRAPH
value: 'true'
- name: LIMIT_HPU_GRAPH
value: 'true'
- name: USE_FLASH_ATTENTION
value: 'true'
- name: FLASH_ATTENTION_RECOMPUTE
value: 'true'
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/tgi-gaudi:2.0.5
imagePullPolicy: IfNotPresent
name: llm-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
securityContext:
capabilities:
add:
- SYS_NICE
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: llm-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: llm-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 9009
targetPort: 80
selector:
app: llm-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: llm-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/llm-tgi:latest
imagePullPolicy: IfNotPresent
name: llm-deploy
ports:
- containerPort: 9000
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: llm-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: llm-svc
namespace: default
spec:
ports:
- name: service
port: 9000
targetPort: 9000
selector:
app: llm-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: reranking-dependency-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: reranking-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: reranking-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(RERANK_MODEL_ID)
- --auto-truncate
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HF_TOKEN
value: ${HF_TOKEN}
- name: MAX_WARMUP_SEQUENCE_LENGTH
value: '512'
envFrom:
- configMapRef:
name: qna-config
image: opea/tei-gaudi:latest
imagePullPolicy: IfNotPresent
name: reranking-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: reranking-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: reranking-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 8808
targetPort: 80
selector:
app: reranking-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: reranking-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: reranking-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: reranking-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/reranking-tei:latest
imagePullPolicy: IfNotPresent
name: reranking-deploy
ports:
- containerPort: 8000
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: reranking-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: reranking-svc
namespace: default
spec:
ports:
- name: service
port: 8000
targetPort: 8000
selector:
app: reranking-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: retriever-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: retriever-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: retriever-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/retriever-redis:latest
imagePullPolicy: IfNotPresent
name: retriever-deploy
ports:
- containerPort: 7000
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: retriever-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: retriever-svc
namespace: default
spec:
ports:
- name: service
port: 7000
targetPort: 7000
selector:
app: retriever-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: vector-db
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: vector-db
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: vector-db
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: redis/redis-stack:7.2.0-v9
imagePullPolicy: IfNotPresent
name: vector-db
ports:
- containerPort: 6379
- containerPort: 8001
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: vector-db
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: vector-db
namespace: default
spec:
ports:
- name: vector-db-service
port: 6379
targetPort: 6379
- name: vector-db-insight
port: 8001
targetPort: 8001
selector:
app: vector-db
type: ClusterIP
---

View File

@@ -0,0 +1,653 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: v1
data:
EMBEDDING_MODEL_ID: BAAI/bge-base-en-v1.5
EMBEDDING_SERVICE_HOST_IP: embedding-svc
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
INDEX_NAME: rag-redis
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
LLM_SERVICE_HOST_IP: llm-svc
NODE_SELECTOR: chatqna-opea
REDIS_URL: redis://vector-db.default.svc.cluster.local:6379
RERANK_MODEL_ID: BAAI/bge-reranker-base
RERANK_SERVICE_HOST_IP: reranking-svc
RETRIEVER_SERVICE_HOST_IP: retriever-svc
TEI_EMBEDDING_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_RERANKING_ENDPOINT: http://reranking-dependency-svc.default.svc.cluster.local:8808
TGI_LLM_ENDPOINT: http://llm-dependency-svc.default.svc.cluster.local:9009
kind: ConfigMap
metadata:
name: qna-config
namespace: default
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: chatqna-backend-server-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: chatqna-backend-server-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: chatqna-backend-server-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/chatqna:latest
imagePullPolicy: IfNotPresent
name: chatqna-backend-server-deploy
ports:
- containerPort: 8888
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: chatqna-backend-server-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: chatqna-backend-server-svc
namespace: default
spec:
ports:
- name: service
nodePort: 30888
port: 8888
targetPort: 8888
selector:
app: chatqna-backend-server-deploy
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dataprep-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: dataprep-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: dataprep-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/dataprep-redis:latest
imagePullPolicy: IfNotPresent
name: dataprep-deploy
ports:
- containerPort: 6007
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: dataprep-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: dataprep-svc
namespace: default
spec:
ports:
- name: port1
port: 6007
targetPort: 6007
selector:
app: dataprep-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-dependency-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: embedding-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(EMBEDDING_MODEL_ID)
- --auto-truncate
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
imagePullPolicy: IfNotPresent
name: embedding-dependency-deploy
ports:
- containerPort: 80
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: embedding-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: embedding-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 6006
targetPort: 80
selector:
app: embedding-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: embedding-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/embedding-tei:latest
imagePullPolicy: IfNotPresent
name: embedding-deploy
ports:
- containerPort: 6000
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: embedding-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: embedding-svc
namespace: default
spec:
ports:
- name: service
port: 6000
targetPort: 6000
selector:
app: embedding-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-dependency-deploy
namespace: default
spec:
replicas: 7
selector:
matchLabels:
app: llm-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(LLM_MODEL_ID)
- --max-input-length
- '2048'
- --max-total-tokens
- '4096'
- --max-batch-total-tokens
- '65536'
- --max-batch-prefill-tokens
- '4096'
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HUGGING_FACE_HUB_TOKEN
value: ${HF_TOKEN}
- name: ENABLE_HPU_GRAPH
value: 'true'
- name: LIMIT_HPU_GRAPH
value: 'true'
- name: USE_FLASH_ATTENTION
value: 'true'
- name: FLASH_ATTENTION_RECOMPUTE
value: 'true'
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/tgi-gaudi:2.0.5
imagePullPolicy: IfNotPresent
name: llm-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
securityContext:
capabilities:
add:
- SYS_NICE
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: llm-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: llm-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 9009
targetPort: 80
selector:
app: llm-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: llm-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/llm-tgi:latest
imagePullPolicy: IfNotPresent
name: llm-deploy
ports:
- containerPort: 9000
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: llm-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: llm-svc
namespace: default
spec:
ports:
- name: service
port: 9000
targetPort: 9000
selector:
app: llm-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: reranking-dependency-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: reranking-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: reranking-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(RERANK_MODEL_ID)
- --auto-truncate
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HF_TOKEN
value: ${HF_TOKEN}
- name: MAX_WARMUP_SEQUENCE_LENGTH
value: '512'
envFrom:
- configMapRef:
name: qna-config
image: opea/tei-gaudi:latest
imagePullPolicy: IfNotPresent
name: reranking-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: reranking-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: reranking-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 8808
targetPort: 80
selector:
app: reranking-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: reranking-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: reranking-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: reranking-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/reranking-tei:latest
imagePullPolicy: IfNotPresent
name: reranking-deploy
ports:
- containerPort: 8000
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: reranking-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: reranking-svc
namespace: default
spec:
ports:
- name: service
port: 8000
targetPort: 8000
selector:
app: reranking-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: retriever-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: retriever-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: retriever-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/retriever-redis:latest
imagePullPolicy: IfNotPresent
name: retriever-deploy
ports:
- containerPort: 7000
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: retriever-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: retriever-svc
namespace: default
spec:
ports:
- name: service
port: 7000
targetPort: 7000
selector:
app: retriever-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: vector-db
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: vector-db
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: vector-db
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: redis/redis-stack:7.2.0-v9
imagePullPolicy: IfNotPresent
name: vector-db
ports:
- containerPort: 6379
- containerPort: 8001
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: vector-db
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: vector-db
namespace: default
spec:
ports:
- name: vector-db-service
port: 6379
targetPort: 6379
- name: vector-db-insight
port: 8001
targetPort: 8001
selector:
app: vector-db
type: ClusterIP
---

View File

@@ -0,0 +1,653 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: v1
data:
EMBEDDING_MODEL_ID: BAAI/bge-base-en-v1.5
EMBEDDING_SERVICE_HOST_IP: embedding-svc
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
INDEX_NAME: rag-redis
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
LLM_SERVICE_HOST_IP: llm-svc
NODE_SELECTOR: chatqna-opea
REDIS_URL: redis://vector-db.default.svc.cluster.local:6379
RERANK_MODEL_ID: BAAI/bge-reranker-base
RERANK_SERVICE_HOST_IP: reranking-svc
RETRIEVER_SERVICE_HOST_IP: retriever-svc
TEI_EMBEDDING_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_RERANKING_ENDPOINT: http://reranking-dependency-svc.default.svc.cluster.local:8808
TGI_LLM_ENDPOINT: http://llm-dependency-svc.default.svc.cluster.local:9009
kind: ConfigMap
metadata:
name: qna-config
namespace: default
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: chatqna-backend-server-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: chatqna-backend-server-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: chatqna-backend-server-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/chatqna:latest
imagePullPolicy: IfNotPresent
name: chatqna-backend-server-deploy
ports:
- containerPort: 8888
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: chatqna-backend-server-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: chatqna-backend-server-svc
namespace: default
spec:
ports:
- name: service
nodePort: 30888
port: 8888
targetPort: 8888
selector:
app: chatqna-backend-server-deploy
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dataprep-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: dataprep-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: dataprep-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/dataprep-redis:latest
imagePullPolicy: IfNotPresent
name: dataprep-deploy
ports:
- containerPort: 6007
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: dataprep-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: dataprep-svc
namespace: default
spec:
ports:
- name: port1
port: 6007
targetPort: 6007
selector:
app: dataprep-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-dependency-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: embedding-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(EMBEDDING_MODEL_ID)
- --auto-truncate
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
imagePullPolicy: IfNotPresent
name: embedding-dependency-deploy
ports:
- containerPort: 80
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: embedding-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: embedding-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 6006
targetPort: 80
selector:
app: embedding-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: embedding-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/embedding-tei:latest
imagePullPolicy: IfNotPresent
name: embedding-deploy
ports:
- containerPort: 6000
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: embedding-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: embedding-svc
namespace: default
spec:
ports:
- name: service
port: 6000
targetPort: 6000
selector:
app: embedding-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-dependency-deploy
namespace: default
spec:
replicas: 15
selector:
matchLabels:
app: llm-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(LLM_MODEL_ID)
- --max-input-length
- '2048'
- --max-total-tokens
- '4096'
- --max-batch-total-tokens
- '65536'
- --max-batch-prefill-tokens
- '4096'
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HUGGING_FACE_HUB_TOKEN
value: ${HF_TOKEN}
- name: ENABLE_HPU_GRAPH
value: 'true'
- name: LIMIT_HPU_GRAPH
value: 'true'
- name: USE_FLASH_ATTENTION
value: 'true'
- name: FLASH_ATTENTION_RECOMPUTE
value: 'true'
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/tgi-gaudi:2.0.5
imagePullPolicy: IfNotPresent
name: llm-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
securityContext:
capabilities:
add:
- SYS_NICE
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: llm-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: llm-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 9009
targetPort: 80
selector:
app: llm-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: llm-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/llm-tgi:latest
imagePullPolicy: IfNotPresent
name: llm-deploy
ports:
- containerPort: 9000
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: llm-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: llm-svc
namespace: default
spec:
ports:
- name: service
port: 9000
targetPort: 9000
selector:
app: llm-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: reranking-dependency-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: reranking-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: reranking-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(RERANK_MODEL_ID)
- --auto-truncate
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HF_TOKEN
value: ${HF_TOKEN}
- name: MAX_WARMUP_SEQUENCE_LENGTH
value: '512'
envFrom:
- configMapRef:
name: qna-config
image: opea/tei-gaudi:latest
imagePullPolicy: IfNotPresent
name: reranking-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: reranking-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: reranking-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 8808
targetPort: 80
selector:
app: reranking-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: reranking-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: reranking-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: reranking-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/reranking-tei:latest
imagePullPolicy: IfNotPresent
name: reranking-deploy
ports:
- containerPort: 8000
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: reranking-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: reranking-svc
namespace: default
spec:
ports:
- name: service
port: 8000
targetPort: 8000
selector:
app: reranking-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: retriever-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: retriever-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: retriever-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/retriever-redis:latest
imagePullPolicy: IfNotPresent
name: retriever-deploy
ports:
- containerPort: 7000
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: retriever-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: retriever-svc
namespace: default
spec:
ports:
- name: service
port: 7000
targetPort: 7000
selector:
app: retriever-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: vector-db
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: vector-db
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: vector-db
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: redis/redis-stack:7.2.0-v9
imagePullPolicy: IfNotPresent
name: vector-db
ports:
- containerPort: 6379
- containerPort: 8001
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: vector-db
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: vector-db
namespace: default
spec:
ports:
- name: vector-db-service
port: 6379
targetPort: 6379
- name: vector-db-insight
port: 8001
targetPort: 8001
selector:
app: vector-db
type: ClusterIP
---

View File

@@ -0,0 +1,742 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: v1
kind: ConfigMap
metadata:
name: qna-config
namespace: default
data:
EMBEDDING_MODEL_ID: BAAI/bge-base-en-v1.5
RERANK_MODEL_ID: BAAI/bge-reranker-base
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
TEI_EMBEDDING_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_RERANKING_ENDPOINT: http://reranking-dependency-svc.default.svc.cluster.local:8808
TGI_LLM_ENDPOINT: http://llm-dependency-svc.default.svc.cluster.local:9009
REDIS_URL: redis://vector-db.default.svc.cluster.local:6379
INDEX_NAME: rag-redis
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
EMBEDDING_SERVICE_HOST_IP: embedding-svc
RETRIEVER_SERVICE_HOST_IP: retriever-svc
RERANK_SERVICE_HOST_IP: reranking-svc
NODE_SELECTOR: chatqna-opea
LLM_SERVICE_HOST_IP: llm-svc
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: chatqna-backend-server-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: chatqna-backend-server-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: chatqna-backend-server-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: chatqna-backend-server-deploy
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/chatqna-without-rerank:latest
imagePullPolicy: IfNotPresent
name: chatqna-backend-server-deploy
args: null
ports:
- containerPort: 8888
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: chatqna-backend-server-svc
namespace: default
spec:
type: NodePort
selector:
app: chatqna-backend-server-deploy
ports:
- name: service
port: 8888
targetPort: 8888
nodePort: 30888
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: dataprep-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: dataprep-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: dataprep-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: dataprep-deploy
hostIPC: true
containers:
- env:
- name: REDIS_URL
valueFrom:
configMapKeyRef:
name: qna-config
key: REDIS_URL
- name: TEI_ENDPOINT
valueFrom:
configMapKeyRef:
name: qna-config
key: TEI_EMBEDDING_ENDPOINT
- name: INDEX_NAME
valueFrom:
configMapKeyRef:
name: qna-config
key: INDEX_NAME
image: opea/dataprep-redis:latest
imagePullPolicy: IfNotPresent
name: dataprep-deploy
args: null
ports:
- containerPort: 6007
- containerPort: 6008
- containerPort: 6009
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: dataprep-svc
namespace: default
spec:
type: ClusterIP
selector:
app: dataprep-deploy
ports:
- name: port1
port: 6007
targetPort: 6007
- name: port2
port: 6008
targetPort: 6008
- name: port3
port: 6009
targetPort: 6009
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-dependency-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: embedding-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-dependency-deploy
spec:
nodeSelector:
node-type: chatqna-opea
containers:
- envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
name: embedding-dependency-deploy
args:
- --model-id
- $(EMBEDDING_MODEL_ID)
- --auto-truncate
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
ports:
- containerPort: 80
serviceAccountName: default
volumes:
- name: model-volume
hostPath:
path: /mnt/models
type: Directory
- name: shm
emptyDir:
medium: Memory
sizeLimit: 1Gi
---
kind: Service
apiVersion: v1
metadata:
name: embedding-dependency-svc
namespace: default
spec:
type: ClusterIP
selector:
app: embedding-dependency-deploy
ports:
- name: service
port: 6006
targetPort: 80
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: embedding-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: embedding-deploy
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/embedding-tei:latest
imagePullPolicy: IfNotPresent
name: embedding-deploy
args: null
ports:
- containerPort: 6000
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: embedding-svc
namespace: default
spec:
type: ClusterIP
selector:
app: embedding-deploy
ports:
- name: service
port: 6000
targetPort: 6000
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-dependency-deploy
namespace: default
spec:
replicas: 32
selector:
matchLabels:
app: llm-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-dependency-deploy
spec:
nodeSelector:
node-type: chatqna-opea
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/tgi-gaudi:2.0.5
name: llm-dependency-deploy-demo
securityContext:
capabilities:
add:
- SYS_NICE
args:
- --model-id
- $(LLM_MODEL_ID)
- --max-input-length
- '2048'
- --max-total-tokens
- '4096'
- --max-batch-total-tokens
- '65536'
- --max-batch-prefill-tokens
- '4096'
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HUGGING_FACE_HUB_TOKEN
value: ${HF_TOKEN}
- name: ENABLE_HPU_GRAPH
value: 'true'
- name: LIMIT_HPU_GRAPH
value: 'true'
- name: USE_FLASH_ATTENTION
value: 'true'
- name: FLASH_ATTENTION_RECOMPUTE
value: 'true'
serviceAccountName: default
volumes:
- name: model-volume
hostPath:
path: /mnt/models
type: Directory
- name: shm
emptyDir:
medium: Memory
sizeLimit: 1Gi
---
kind: Service
apiVersion: v1
metadata:
name: llm-dependency-svc
namespace: default
spec:
type: ClusterIP
selector:
app: llm-dependency-deploy
ports:
- name: service
port: 9009
targetPort: 80
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: llm-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: llm-deploy
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/llm-tgi:latest
imagePullPolicy: IfNotPresent
name: llm-deploy
args: null
ports:
- containerPort: 9000
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: llm-svc
namespace: default
spec:
type: ClusterIP
selector:
app: llm-deploy
ports:
- name: service
port: 9000
targetPort: 9000
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: reranking-dependency-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: reranking-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: reranking-dependency-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: reranking-dependency-deploy
containers:
- envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/tei-gaudi:latest
name: reranking-dependency-deploy
args:
- --model-id
- $(RERANK_MODEL_ID)
- --auto-truncate
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HF_TOKEN
value: ${HF_TOKEN}
- name: MAX_WARMUP_SEQUENCE_LENGTH
value: '512'
serviceAccountName: default
volumes:
- name: model-volume
hostPath:
path: /mnt/models
type: Directory
- name: shm
emptyDir:
medium: Memory
sizeLimit: 1Gi
---
kind: Service
apiVersion: v1
metadata:
name: reranking-dependency-svc
namespace: default
spec:
type: ClusterIP
selector:
app: reranking-dependency-deploy
ports:
- name: service
port: 8808
targetPort: 80
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: reranking-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: reranking-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: reranking-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: reranking-deploy
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/reranking-tei:latest
imagePullPolicy: IfNotPresent
name: reranking-deploy
args: null
ports:
- containerPort: 8000
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: reranking-svc
namespace: default
spec:
type: ClusterIP
selector:
app: reranking-deploy
ports:
- name: service
port: 8000
targetPort: 8000
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: retriever-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: retriever-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: retriever-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: retriever-deploy
hostIPC: true
containers:
- env:
- name: REDIS_URL
valueFrom:
configMapKeyRef:
name: qna-config
key: REDIS_URL
- name: TEI_EMBEDDING_ENDPOINT
valueFrom:
configMapKeyRef:
name: qna-config
key: TEI_EMBEDDING_ENDPOINT
- name: HUGGINGFACEHUB_API_TOKEN
valueFrom:
configMapKeyRef:
name: qna-config
key: HUGGINGFACEHUB_API_TOKEN
- name: INDEX_NAME
valueFrom:
configMapKeyRef:
name: qna-config
key: INDEX_NAME
image: opea/retriever-redis:latest
imagePullPolicy: IfNotPresent
name: retriever-deploy
args: null
ports:
- containerPort: 7000
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: retriever-svc
namespace: default
spec:
type: ClusterIP
selector:
app: retriever-deploy
ports:
- name: service
port: 7000
targetPort: 7000
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: vector-db
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: vector-db
template:
metadata:
labels:
app: vector-db
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: vector-db
containers:
- name: vector-db
image: redis/redis-stack:7.2.0-v9
ports:
- containerPort: 6379
- containerPort: 8001
---
apiVersion: v1
kind: Service
metadata:
name: vector-db
namespace: default
spec:
type: ClusterIP
selector:
app: vector-db
ports:
- name: vector-db-service
port: 6379
targetPort: 6379
- name: vector-db-insight
port: 8001
targetPort: 8001
---

View File

@@ -0,0 +1,591 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: v1
kind: ConfigMap
metadata:
name: qna-config
namespace: default
data:
EMBEDDING_MODEL_ID: BAAI/bge-base-en-v1.5
RERANK_MODEL_ID: BAAI/bge-reranker-base
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
TEI_EMBEDDING_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_RERANKING_ENDPOINT: http://reranking-dependency-svc.default.svc.cluster.local:8808
TGI_LLM_ENDPOINT: http://llm-dependency-svc.default.svc.cluster.local:9009
REDIS_URL: redis://vector-db.default.svc.cluster.local:6379
INDEX_NAME: rag-redis
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
EMBEDDING_SERVICE_HOST_IP: embedding-svc
RETRIEVER_SERVICE_HOST_IP: retriever-svc
RERANK_SERVICE_HOST_IP: reranking-svc
NODE_SELECTOR: chatqna-opea
LLM_SERVICE_HOST_IP: llm-svc
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: chatqna-backend-server-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: chatqna-backend-server-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: chatqna-backend-server-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: chatqna-backend-server-deploy
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/chatqna-without-rerank:latest
imagePullPolicy: IfNotPresent
name: chatqna-backend-server-deploy
args: null
ports:
- containerPort: 8888
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: chatqna-backend-server-svc
namespace: default
spec:
type: NodePort
selector:
app: chatqna-backend-server-deploy
ports:
- name: service
port: 8888
targetPort: 8888
nodePort: 30888
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: dataprep-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: dataprep-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: dataprep-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: dataprep-deploy
hostIPC: true
containers:
- env:
- name: REDIS_URL
valueFrom:
configMapKeyRef:
name: qna-config
key: REDIS_URL
- name: TEI_ENDPOINT
valueFrom:
configMapKeyRef:
name: qna-config
key: TEI_EMBEDDING_ENDPOINT
- name: INDEX_NAME
valueFrom:
configMapKeyRef:
name: qna-config
key: INDEX_NAME
image: opea/dataprep-redis:latest
imagePullPolicy: IfNotPresent
name: dataprep-deploy
args: null
ports:
- containerPort: 6007
- containerPort: 6008
- containerPort: 6009
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: dataprep-svc
namespace: default
spec:
type: ClusterIP
selector:
app: dataprep-deploy
ports:
- name: port1
port: 6007
targetPort: 6007
- name: port2
port: 6008
targetPort: 6008
- name: port3
port: 6009
targetPort: 6009
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-dependency-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: embedding-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-dependency-deploy
spec:
nodeSelector:
node-type: chatqna-opea
containers:
- envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
name: embedding-dependency-deploy
args:
- --model-id
- $(EMBEDDING_MODEL_ID)
- --auto-truncate
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
ports:
- containerPort: 80
serviceAccountName: default
volumes:
- name: model-volume
hostPath:
path: /mnt/models
type: Directory
- name: shm
emptyDir:
medium: Memory
sizeLimit: 1Gi
---
kind: Service
apiVersion: v1
metadata:
name: embedding-dependency-svc
namespace: default
spec:
type: ClusterIP
selector:
app: embedding-dependency-deploy
ports:
- name: service
port: 6006
targetPort: 80
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: embedding-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: embedding-deploy
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/embedding-tei:latest
imagePullPolicy: IfNotPresent
name: embedding-deploy
args: null
ports:
- containerPort: 6000
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: embedding-svc
namespace: default
spec:
type: ClusterIP
selector:
app: embedding-deploy
ports:
- name: service
port: 6000
targetPort: 6000
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-dependency-deploy
namespace: default
spec:
replicas: 8
selector:
matchLabels:
app: llm-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-dependency-deploy
spec:
nodeSelector:
node-type: chatqna-opea
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/tgi-gaudi:2.0.5
name: llm-dependency-deploy-demo
securityContext:
capabilities:
add:
- SYS_NICE
args:
- --model-id
- $(LLM_MODEL_ID)
- --max-input-length
- '2048'
- --max-total-tokens
- '4096'
- --max-batch-total-tokens
- '65536'
- --max-batch-prefill-tokens
- '4096'
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HUGGING_FACE_HUB_TOKEN
value: ${HF_TOKEN}
- name: ENABLE_HPU_GRAPH
value: 'true'
- name: LIMIT_HPU_GRAPH
value: 'true'
- name: USE_FLASH_ATTENTION
value: 'true'
- name: FLASH_ATTENTION_RECOMPUTE
value: 'true'
serviceAccountName: default
volumes:
- name: model-volume
hostPath:
path: /mnt/models
type: Directory
- name: shm
emptyDir:
medium: Memory
sizeLimit: 1Gi
---
kind: Service
apiVersion: v1
metadata:
name: llm-dependency-svc
namespace: default
spec:
type: ClusterIP
selector:
app: llm-dependency-deploy
ports:
- name: service
port: 9009
targetPort: 80
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: llm-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: llm-deploy
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/llm-tgi:latest
imagePullPolicy: IfNotPresent
name: llm-deploy
args: null
ports:
- containerPort: 9000
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: llm-svc
namespace: default
spec:
type: ClusterIP
selector:
app: llm-deploy
ports:
- name: service
port: 9000
targetPort: 9000
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: retriever-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: retriever-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: retriever-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: retriever-deploy
hostIPC: true
containers:
- env:
- name: REDIS_URL
valueFrom:
configMapKeyRef:
name: qna-config
key: REDIS_URL
- name: TEI_EMBEDDING_ENDPOINT
valueFrom:
configMapKeyRef:
name: qna-config
key: TEI_EMBEDDING_ENDPOINT
- name: HUGGINGFACEHUB_API_TOKEN
valueFrom:
configMapKeyRef:
name: qna-config
key: HUGGINGFACEHUB_API_TOKEN
- name: INDEX_NAME
valueFrom:
configMapKeyRef:
name: qna-config
key: INDEX_NAME
image: opea/retriever-redis:latest
imagePullPolicy: IfNotPresent
name: retriever-deploy
args: null
ports:
- containerPort: 7000
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: retriever-svc
namespace: default
spec:
type: ClusterIP
selector:
app: retriever-deploy
ports:
- name: service
port: 7000
targetPort: 7000
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: vector-db
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: vector-db
template:
metadata:
labels:
app: vector-db
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: vector-db
containers:
- name: vector-db
image: redis/redis-stack:7.2.0-v9
ports:
- containerPort: 6379
- containerPort: 8001
---
apiVersion: v1
kind: Service
metadata:
name: vector-db
namespace: default
spec:
type: ClusterIP
selector:
app: vector-db
ports:
- name: vector-db-service
port: 6379
targetPort: 6379
- name: vector-db-insight
port: 8001
targetPort: 8001
---

View File

@@ -0,0 +1,591 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: v1
kind: ConfigMap
metadata:
name: qna-config
namespace: default
data:
EMBEDDING_MODEL_ID: BAAI/bge-base-en-v1.5
RERANK_MODEL_ID: BAAI/bge-reranker-base
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
TEI_EMBEDDING_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_RERANKING_ENDPOINT: http://reranking-dependency-svc.default.svc.cluster.local:8808
TGI_LLM_ENDPOINT: http://llm-dependency-svc.default.svc.cluster.local:9009
REDIS_URL: redis://vector-db.default.svc.cluster.local:6379
INDEX_NAME: rag-redis
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
EMBEDDING_SERVICE_HOST_IP: embedding-svc
RETRIEVER_SERVICE_HOST_IP: retriever-svc
RERANK_SERVICE_HOST_IP: reranking-svc
NODE_SELECTOR: chatqna-opea
LLM_SERVICE_HOST_IP: llm-svc
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: chatqna-backend-server-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: chatqna-backend-server-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: chatqna-backend-server-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: chatqna-backend-server-deploy
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/chatqna-without-rerank:latest
imagePullPolicy: IfNotPresent
name: chatqna-backend-server-deploy
args: null
ports:
- containerPort: 8888
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: chatqna-backend-server-svc
namespace: default
spec:
type: NodePort
selector:
app: chatqna-backend-server-deploy
ports:
- name: service
port: 8888
targetPort: 8888
nodePort: 30888
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: dataprep-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: dataprep-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: dataprep-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: dataprep-deploy
hostIPC: true
containers:
- env:
- name: REDIS_URL
valueFrom:
configMapKeyRef:
name: qna-config
key: REDIS_URL
- name: TEI_ENDPOINT
valueFrom:
configMapKeyRef:
name: qna-config
key: TEI_EMBEDDING_ENDPOINT
- name: INDEX_NAME
valueFrom:
configMapKeyRef:
name: qna-config
key: INDEX_NAME
image: opea/dataprep-redis:latest
imagePullPolicy: IfNotPresent
name: dataprep-deploy
args: null
ports:
- containerPort: 6007
- containerPort: 6008
- containerPort: 6009
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: dataprep-svc
namespace: default
spec:
type: ClusterIP
selector:
app: dataprep-deploy
ports:
- name: port1
port: 6007
targetPort: 6007
- name: port2
port: 6008
targetPort: 6008
- name: port3
port: 6009
targetPort: 6009
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-dependency-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: embedding-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-dependency-deploy
spec:
nodeSelector:
node-type: chatqna-opea
containers:
- envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
name: embedding-dependency-deploy
args:
- --model-id
- $(EMBEDDING_MODEL_ID)
- --auto-truncate
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
ports:
- containerPort: 80
serviceAccountName: default
volumes:
- name: model-volume
hostPath:
path: /mnt/models
type: Directory
- name: shm
emptyDir:
medium: Memory
sizeLimit: 1Gi
---
kind: Service
apiVersion: v1
metadata:
name: embedding-dependency-svc
namespace: default
spec:
type: ClusterIP
selector:
app: embedding-dependency-deploy
ports:
- name: service
port: 6006
targetPort: 80
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: embedding-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: embedding-deploy
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/embedding-tei:latest
imagePullPolicy: IfNotPresent
name: embedding-deploy
args: null
ports:
- containerPort: 6000
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: embedding-svc
namespace: default
spec:
type: ClusterIP
selector:
app: embedding-deploy
ports:
- name: service
port: 6000
targetPort: 6000
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-dependency-deploy
namespace: default
spec:
replicas: 16
selector:
matchLabels:
app: llm-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-dependency-deploy
spec:
nodeSelector:
node-type: chatqna-opea
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/tgi-gaudi:2.0.5
name: llm-dependency-deploy-demo
securityContext:
capabilities:
add:
- SYS_NICE
args:
- --model-id
- $(LLM_MODEL_ID)
- --max-input-length
- '2048'
- --max-total-tokens
- '4096'
- --max-batch-total-tokens
- '65536'
- --max-batch-prefill-tokens
- '4096'
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HUGGING_FACE_HUB_TOKEN
value: ${HF_TOKEN}
- name: ENABLE_HPU_GRAPH
value: 'true'
- name: LIMIT_HPU_GRAPH
value: 'true'
- name: USE_FLASH_ATTENTION
value: 'true'
- name: FLASH_ATTENTION_RECOMPUTE
value: 'true'
serviceAccountName: default
volumes:
- name: model-volume
hostPath:
path: /mnt/models
type: Directory
- name: shm
emptyDir:
medium: Memory
sizeLimit: 1Gi
---
kind: Service
apiVersion: v1
metadata:
name: llm-dependency-svc
namespace: default
spec:
type: ClusterIP
selector:
app: llm-dependency-deploy
ports:
- name: service
port: 9009
targetPort: 80
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: llm-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: llm-deploy
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/llm-tgi:latest
imagePullPolicy: IfNotPresent
name: llm-deploy
args: null
ports:
- containerPort: 9000
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: llm-svc
namespace: default
spec:
type: ClusterIP
selector:
app: llm-deploy
ports:
- name: service
port: 9000
targetPort: 9000
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: retriever-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: retriever-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: retriever-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: retriever-deploy
hostIPC: true
containers:
- env:
- name: REDIS_URL
valueFrom:
configMapKeyRef:
name: qna-config
key: REDIS_URL
- name: TEI_EMBEDDING_ENDPOINT
valueFrom:
configMapKeyRef:
name: qna-config
key: TEI_EMBEDDING_ENDPOINT
- name: HUGGINGFACEHUB_API_TOKEN
valueFrom:
configMapKeyRef:
name: qna-config
key: HUGGINGFACEHUB_API_TOKEN
- name: INDEX_NAME
valueFrom:
configMapKeyRef:
name: qna-config
key: INDEX_NAME
image: opea/retriever-redis:latest
imagePullPolicy: IfNotPresent
name: retriever-deploy
args: null
ports:
- containerPort: 7000
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: retriever-svc
namespace: default
spec:
type: ClusterIP
selector:
app: retriever-deploy
ports:
- name: service
port: 7000
targetPort: 7000
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: vector-db
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: vector-db
template:
metadata:
labels:
app: vector-db
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: vector-db
containers:
- name: vector-db
image: redis/redis-stack:7.2.0-v9
ports:
- containerPort: 6379
- containerPort: 8001
---
apiVersion: v1
kind: Service
metadata:
name: vector-db
namespace: default
spec:
type: ClusterIP
selector:
app: vector-db
ports:
- name: vector-db-service
port: 6379
targetPort: 6379
- name: vector-db-insight
port: 8001
targetPort: 8001
---

View File

@@ -0,0 +1,683 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: v1
data:
EMBEDDING_MODEL_ID: BAAI/bge-base-en-v1.5
EMBEDDING_SERVICE_HOST_IP: embedding-svc
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
INDEX_NAME: rag-redis
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
LLM_SERVICE_HOST_IP: llm-svc
NODE_SELECTOR: chatqna-opea
REDIS_URL: redis://vector-db.default.svc.cluster.local:6379
RERANK_MODEL_ID: BAAI/bge-reranker-base
RERANK_SERVICE_HOST_IP: reranking-svc
RETRIEVER_SERVICE_HOST_IP: retriever-svc
TEI_EMBEDDING_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_RERANKING_ENDPOINT: http://reranking-dependency-svc.default.svc.cluster.local:8808
TGI_LLM_ENDPOINT: http://llm-dependency-svc.default.svc.cluster.local:9009
kind: ConfigMap
metadata:
name: qna-config
namespace: default
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: chatqna-backend-server-deploy
namespace: default
spec:
replicas: 4
selector:
matchLabels:
app: chatqna-backend-server-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: chatqna-backend-server-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/chatqna:latest
imagePullPolicy: IfNotPresent
name: chatqna-backend-server-deploy
ports:
- containerPort: 8888
resources:
limits:
cpu: 8
memory: 8000Mi
requests:
cpu: 8
memory: 8000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: chatqna-backend-server-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: chatqna-backend-server-svc
namespace: default
spec:
ports:
- name: service
nodePort: 30888
port: 8888
targetPort: 8888
selector:
app: chatqna-backend-server-deploy
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dataprep-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: dataprep-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: dataprep-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/dataprep-redis:latest
imagePullPolicy: IfNotPresent
name: dataprep-deploy
ports:
- containerPort: 6007
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: dataprep-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: dataprep-svc
namespace: default
spec:
ports:
- name: port1
port: 6007
targetPort: 6007
selector:
app: dataprep-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-dependency-deploy
namespace: default
spec:
replicas: 4
selector:
matchLabels:
app: embedding-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(EMBEDDING_MODEL_ID)
- --auto-truncate
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
imagePullPolicy: IfNotPresent
name: embedding-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
cpu: 76
memory: 20000Mi
requests:
cpu: 76
memory: 20000Mi
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: embedding-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: embedding-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 6006
targetPort: 80
selector:
app: embedding-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-deploy
namespace: default
spec:
replicas: 4
selector:
matchLabels:
app: embedding-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/embedding-tei:latest
imagePullPolicy: IfNotPresent
name: embedding-deploy
ports:
- containerPort: 6000
resources:
requests:
cpu: 4
memory: 4000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: embedding-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: embedding-svc
namespace: default
spec:
ports:
- name: service
port: 6000
targetPort: 6000
selector:
app: embedding-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-dependency-deploy
namespace: default
spec:
replicas: 31
selector:
matchLabels:
app: llm-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(LLM_MODEL_ID)
- --max-input-length
- '1024'
- --max-total-tokens
- '2048'
- --max-batch-total-tokens
- '65536'
- --max-batch-prefill-tokens
- '4096'
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HUGGING_FACE_HUB_TOKEN
value: ${HF_TOKEN}
- name: ENABLE_HPU_GRAPH
value: 'true'
- name: LIMIT_HPU_GRAPH
value: 'true'
- name: USE_FLASH_ATTENTION
value: 'true'
- name: FLASH_ATTENTION_RECOMPUTE
value: 'true'
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/tgi-gaudi:2.0.5
imagePullPolicy: IfNotPresent
name: llm-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
securityContext:
capabilities:
add:
- SYS_NICE
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: llm-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: llm-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 9009
targetPort: 80
selector:
app: llm-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-deploy
namespace: default
spec:
replicas: 4
selector:
matchLabels:
app: llm-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/llm-tgi:latest
imagePullPolicy: IfNotPresent
name: llm-deploy
ports:
- containerPort: 9000
resources:
requests:
cpu: 4
memory: 4000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: llm-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: llm-svc
namespace: default
spec:
ports:
- name: service
port: 9000
targetPort: 9000
selector:
app: llm-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: reranking-dependency-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: reranking-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: reranking-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(RERANK_MODEL_ID)
- --auto-truncate
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HF_TOKEN
value: ${HF_TOKEN}
- name: MAX_WARMUP_SEQUENCE_LENGTH
value: '512'
envFrom:
- configMapRef:
name: qna-config
image: opea/tei-gaudi:latest
imagePullPolicy: IfNotPresent
name: reranking-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: reranking-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: reranking-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 8808
targetPort: 80
selector:
app: reranking-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: reranking-deploy
namespace: default
spec:
replicas: 4
selector:
matchLabels:
app: reranking-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: reranking-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/reranking-tei:latest
imagePullPolicy: IfNotPresent
name: reranking-deploy
ports:
- containerPort: 8000
resources:
requests:
cpu: 4
memory: 4000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: reranking-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: reranking-svc
namespace: default
spec:
ports:
- name: service
port: 8000
targetPort: 8000
selector:
app: reranking-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: retriever-deploy
namespace: default
spec:
replicas: 4
selector:
matchLabels:
app: retriever-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: retriever-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/retriever-redis:latest
imagePullPolicy: IfNotPresent
name: retriever-deploy
ports:
- containerPort: 7000
resources:
requests:
cpu: 4
memory: 4000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: retriever-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: retriever-svc
namespace: default
spec:
ports:
- name: service
port: 7000
targetPort: 7000
selector:
app: retriever-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: vector-db
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: vector-db
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: vector-db
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: redis/redis-stack:7.2.0-v9
imagePullPolicy: IfNotPresent
name: vector-db
ports:
- containerPort: 6379
- containerPort: 8001
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: vector-db
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: vector-db
namespace: default
spec:
ports:
- name: vector-db-service
port: 6379
targetPort: 6379
- name: vector-db-insight
port: 8001
targetPort: 8001
selector:
app: vector-db
type: ClusterIP
---

View File

@@ -0,0 +1,683 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: v1
data:
EMBEDDING_MODEL_ID: BAAI/bge-base-en-v1.5
EMBEDDING_SERVICE_HOST_IP: embedding-svc
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
INDEX_NAME: rag-redis
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
LLM_SERVICE_HOST_IP: llm-svc
NODE_SELECTOR: chatqna-opea
REDIS_URL: redis://vector-db.default.svc.cluster.local:6379
RERANK_MODEL_ID: BAAI/bge-reranker-base
RERANK_SERVICE_HOST_IP: reranking-svc
RETRIEVER_SERVICE_HOST_IP: retriever-svc
TEI_EMBEDDING_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_RERANKING_ENDPOINT: http://reranking-dependency-svc.default.svc.cluster.local:8808
TGI_LLM_ENDPOINT: http://llm-dependency-svc.default.svc.cluster.local:9009
kind: ConfigMap
metadata:
name: qna-config
namespace: default
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: chatqna-backend-server-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: chatqna-backend-server-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: chatqna-backend-server-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/chatqna:latest
imagePullPolicy: IfNotPresent
name: chatqna-backend-server-deploy
ports:
- containerPort: 8888
resources:
limits:
cpu: 8
memory: 8000Mi
requests:
cpu: 8
memory: 8000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: chatqna-backend-server-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: chatqna-backend-server-svc
namespace: default
spec:
ports:
- name: service
nodePort: 30888
port: 8888
targetPort: 8888
selector:
app: chatqna-backend-server-deploy
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dataprep-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: dataprep-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: dataprep-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/dataprep-redis:latest
imagePullPolicy: IfNotPresent
name: dataprep-deploy
ports:
- containerPort: 6007
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: dataprep-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: dataprep-svc
namespace: default
spec:
ports:
- name: port1
port: 6007
targetPort: 6007
selector:
app: dataprep-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-dependency-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: embedding-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(EMBEDDING_MODEL_ID)
- --auto-truncate
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
imagePullPolicy: IfNotPresent
name: embedding-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
cpu: 76
memory: 20000Mi
requests:
cpu: 76
memory: 20000Mi
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: embedding-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: embedding-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 6006
targetPort: 80
selector:
app: embedding-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: embedding-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/embedding-tei:latest
imagePullPolicy: IfNotPresent
name: embedding-deploy
ports:
- containerPort: 6000
resources:
requests:
cpu: 4
memory: 4000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: embedding-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: embedding-svc
namespace: default
spec:
ports:
- name: service
port: 6000
targetPort: 6000
selector:
app: embedding-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-dependency-deploy
namespace: default
spec:
replicas: 7
selector:
matchLabels:
app: llm-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(LLM_MODEL_ID)
- --max-input-length
- '1024'
- --max-total-tokens
- '2048'
- --max-batch-total-tokens
- '65536'
- --max-batch-prefill-tokens
- '4096'
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HUGGING_FACE_HUB_TOKEN
value: ${HF_TOKEN}
- name: ENABLE_HPU_GRAPH
value: 'true'
- name: LIMIT_HPU_GRAPH
value: 'true'
- name: USE_FLASH_ATTENTION
value: 'true'
- name: FLASH_ATTENTION_RECOMPUTE
value: 'true'
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/tgi-gaudi:2.0.5
imagePullPolicy: IfNotPresent
name: llm-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
securityContext:
capabilities:
add:
- SYS_NICE
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: llm-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: llm-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 9009
targetPort: 80
selector:
app: llm-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: llm-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/llm-tgi:latest
imagePullPolicy: IfNotPresent
name: llm-deploy
ports:
- containerPort: 9000
resources:
requests:
cpu: 4
memory: 4000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: llm-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: llm-svc
namespace: default
spec:
ports:
- name: service
port: 9000
targetPort: 9000
selector:
app: llm-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: reranking-dependency-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: reranking-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: reranking-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(RERANK_MODEL_ID)
- --auto-truncate
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HF_TOKEN
value: ${HF_TOKEN}
- name: MAX_WARMUP_SEQUENCE_LENGTH
value: '512'
envFrom:
- configMapRef:
name: qna-config
image: opea/tei-gaudi:latest
imagePullPolicy: IfNotPresent
name: reranking-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: reranking-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: reranking-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 8808
targetPort: 80
selector:
app: reranking-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: reranking-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: reranking-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: reranking-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/reranking-tei:latest
imagePullPolicy: IfNotPresent
name: reranking-deploy
ports:
- containerPort: 8000
resources:
requests:
cpu: 4
memory: 4000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: reranking-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: reranking-svc
namespace: default
spec:
ports:
- name: service
port: 8000
targetPort: 8000
selector:
app: reranking-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: retriever-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: retriever-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: retriever-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/retriever-redis:latest
imagePullPolicy: IfNotPresent
name: retriever-deploy
ports:
- containerPort: 7000
resources:
requests:
cpu: 4
memory: 4000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: retriever-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: retriever-svc
namespace: default
spec:
ports:
- name: service
port: 7000
targetPort: 7000
selector:
app: retriever-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: vector-db
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: vector-db
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: vector-db
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: redis/redis-stack:7.2.0-v9
imagePullPolicy: IfNotPresent
name: vector-db
ports:
- containerPort: 6379
- containerPort: 8001
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: vector-db
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: vector-db
namespace: default
spec:
ports:
- name: vector-db-service
port: 6379
targetPort: 6379
- name: vector-db-insight
port: 8001
targetPort: 8001
selector:
app: vector-db
type: ClusterIP
---

View File

@@ -0,0 +1,683 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: v1
data:
EMBEDDING_MODEL_ID: BAAI/bge-base-en-v1.5
EMBEDDING_SERVICE_HOST_IP: embedding-svc
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
INDEX_NAME: rag-redis
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
LLM_SERVICE_HOST_IP: llm-svc
NODE_SELECTOR: chatqna-opea
REDIS_URL: redis://vector-db.default.svc.cluster.local:6379
RERANK_MODEL_ID: BAAI/bge-reranker-base
RERANK_SERVICE_HOST_IP: reranking-svc
RETRIEVER_SERVICE_HOST_IP: retriever-svc
TEI_EMBEDDING_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_RERANKING_ENDPOINT: http://reranking-dependency-svc.default.svc.cluster.local:8808
TGI_LLM_ENDPOINT: http://llm-dependency-svc.default.svc.cluster.local:9009
kind: ConfigMap
metadata:
name: qna-config
namespace: default
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: chatqna-backend-server-deploy
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: chatqna-backend-server-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: chatqna-backend-server-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/chatqna:latest
imagePullPolicy: IfNotPresent
name: chatqna-backend-server-deploy
ports:
- containerPort: 8888
resources:
limits:
cpu: 8
memory: 8000Mi
requests:
cpu: 8
memory: 8000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: chatqna-backend-server-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: chatqna-backend-server-svc
namespace: default
spec:
ports:
- name: service
nodePort: 30888
port: 8888
targetPort: 8888
selector:
app: chatqna-backend-server-deploy
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dataprep-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: dataprep-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: dataprep-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/dataprep-redis:latest
imagePullPolicy: IfNotPresent
name: dataprep-deploy
ports:
- containerPort: 6007
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: dataprep-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: dataprep-svc
namespace: default
spec:
ports:
- name: port1
port: 6007
targetPort: 6007
selector:
app: dataprep-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-dependency-deploy
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: embedding-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(EMBEDDING_MODEL_ID)
- --auto-truncate
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
imagePullPolicy: IfNotPresent
name: embedding-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
cpu: 76
memory: 20000Mi
requests:
cpu: 76
memory: 20000Mi
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: embedding-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: embedding-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 6006
targetPort: 80
selector:
app: embedding-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-deploy
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: embedding-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/embedding-tei:latest
imagePullPolicy: IfNotPresent
name: embedding-deploy
ports:
- containerPort: 6000
resources:
requests:
cpu: 4
memory: 4000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: embedding-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: embedding-svc
namespace: default
spec:
ports:
- name: service
port: 6000
targetPort: 6000
selector:
app: embedding-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-dependency-deploy
namespace: default
spec:
replicas: 15
selector:
matchLabels:
app: llm-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(LLM_MODEL_ID)
- --max-input-length
- '1024'
- --max-total-tokens
- '2048'
- --max-batch-total-tokens
- '65536'
- --max-batch-prefill-tokens
- '4096'
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HUGGING_FACE_HUB_TOKEN
value: ${HF_TOKEN}
- name: ENABLE_HPU_GRAPH
value: 'true'
- name: LIMIT_HPU_GRAPH
value: 'true'
- name: USE_FLASH_ATTENTION
value: 'true'
- name: FLASH_ATTENTION_RECOMPUTE
value: 'true'
envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/tgi-gaudi:2.0.5
imagePullPolicy: IfNotPresent
name: llm-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
securityContext:
capabilities:
add:
- SYS_NICE
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: llm-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: llm-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 9009
targetPort: 80
selector:
app: llm-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-deploy
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: llm-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/llm-tgi:latest
imagePullPolicy: IfNotPresent
name: llm-deploy
ports:
- containerPort: 9000
resources:
requests:
cpu: 4
memory: 4000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: llm-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: llm-svc
namespace: default
spec:
ports:
- name: service
port: 9000
targetPort: 9000
selector:
app: llm-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: reranking-dependency-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: reranking-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: reranking-dependency-deploy
spec:
containers:
- args:
- --model-id
- $(RERANK_MODEL_ID)
- --auto-truncate
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HF_TOKEN
value: ${HF_TOKEN}
- name: MAX_WARMUP_SEQUENCE_LENGTH
value: '512'
envFrom:
- configMapRef:
name: qna-config
image: opea/tei-gaudi:latest
imagePullPolicy: IfNotPresent
name: reranking-dependency-deploy
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: reranking-dependency-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- hostPath:
path: /mnt/models
type: Directory
name: model-volume
- emptyDir:
medium: Memory
sizeLimit: 1Gi
name: shm
---
apiVersion: v1
kind: Service
metadata:
name: reranking-dependency-svc
namespace: default
spec:
ports:
- name: service
port: 8808
targetPort: 80
selector:
app: reranking-dependency-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: reranking-deploy
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: reranking-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: reranking-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/reranking-tei:latest
imagePullPolicy: IfNotPresent
name: reranking-deploy
ports:
- containerPort: 8000
resources:
requests:
cpu: 4
memory: 4000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: reranking-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: reranking-svc
namespace: default
spec:
ports:
- name: service
port: 8000
targetPort: 8000
selector:
app: reranking-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: retriever-deploy
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: retriever-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: retriever-deploy
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/retriever-redis:latest
imagePullPolicy: IfNotPresent
name: retriever-deploy
ports:
- containerPort: 7000
resources:
requests:
cpu: 4
memory: 4000Mi
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: retriever-deploy
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: retriever-svc
namespace: default
spec:
ports:
- name: service
port: 7000
targetPort: 7000
selector:
app: retriever-deploy
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: vector-db
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: vector-db
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: vector-db
spec:
containers:
- envFrom:
- configMapRef:
name: qna-config
image: redis/redis-stack:7.2.0-v9
imagePullPolicy: IfNotPresent
name: vector-db
ports:
- containerPort: 6379
- containerPort: 8001
hostIPC: true
nodeSelector:
node-type: chatqna-opea
serviceAccountName: default
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: vector-db
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
---
apiVersion: v1
kind: Service
metadata:
name: vector-db
namespace: default
spec:
ports:
- name: vector-db-service
port: 6379
targetPort: 6379
- name: vector-db-insight
port: 8001
targetPort: 8001
selector:
app: vector-db
type: ClusterIP
---

View File

@@ -0,0 +1,622 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: v1
kind: ConfigMap
metadata:
name: qna-config
namespace: default
data:
EMBEDDING_MODEL_ID: BAAI/bge-base-en-v1.5
RERANK_MODEL_ID: BAAI/bge-reranker-base
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
TEI_EMBEDDING_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_RERANKING_ENDPOINT: http://reranking-dependency-svc.default.svc.cluster.local:8808
TGI_LLM_ENDPOINT: http://llm-dependency-svc.default.svc.cluster.local:9009
REDIS_URL: redis://vector-db.default.svc.cluster.local:6379
INDEX_NAME: rag-redis
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
EMBEDDING_SERVICE_HOST_IP: embedding-svc
RETRIEVER_SERVICE_HOST_IP: retriever-svc
RERANK_SERVICE_HOST_IP: reranking-svc
NODE_SELECTOR: chatqna-opea
LLM_SERVICE_HOST_IP: llm-svc
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: chatqna-backend-server-deploy
namespace: default
spec:
replicas: 4
selector:
matchLabels:
app: chatqna-backend-server-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: chatqna-backend-server-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: chatqna-backend-server-deploy
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/chatqna-without-rerank:latest
imagePullPolicy: IfNotPresent
name: chatqna-backend-server-deploy
args: null
ports:
- containerPort: 8888
resources:
limits:
cpu: 8
memory: 4000Mi
requests:
cpu: 8
memory: 4000Mi
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: chatqna-backend-server-svc
namespace: default
spec:
type: NodePort
selector:
app: chatqna-backend-server-deploy
ports:
- name: service
port: 8888
targetPort: 8888
nodePort: 30888
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: dataprep-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: dataprep-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: dataprep-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: dataprep-deploy
hostIPC: true
containers:
- env:
- name: REDIS_URL
valueFrom:
configMapKeyRef:
name: qna-config
key: REDIS_URL
- name: TEI_ENDPOINT
valueFrom:
configMapKeyRef:
name: qna-config
key: TEI_EMBEDDING_ENDPOINT
- name: INDEX_NAME
valueFrom:
configMapKeyRef:
name: qna-config
key: INDEX_NAME
image: opea/dataprep-redis:latest
imagePullPolicy: IfNotPresent
name: dataprep-deploy
args: null
ports:
- containerPort: 6007
- containerPort: 6008
- containerPort: 6009
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: dataprep-svc
namespace: default
spec:
type: ClusterIP
selector:
app: dataprep-deploy
ports:
- name: port1
port: 6007
targetPort: 6007
- name: port2
port: 6008
targetPort: 6008
- name: port3
port: 6009
targetPort: 6009
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-dependency-deploy
namespace: default
spec:
replicas: 4
selector:
matchLabels:
app: embedding-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-dependency-deploy
spec:
nodeSelector:
node-type: chatqna-opea
containers:
- envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
name: embedding-dependency-deploy
args:
- --model-id
- $(EMBEDDING_MODEL_ID)
- --auto-truncate
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
ports:
- containerPort: 80
resources:
limits:
cpu: 76
memory: 20000Mi
requests:
cpu: 76
memory: 20000Mi
serviceAccountName: default
volumes:
- name: model-volume
hostPath:
path: /mnt/models
type: Directory
- name: shm
emptyDir:
medium: Memory
sizeLimit: 1Gi
---
kind: Service
apiVersion: v1
metadata:
name: embedding-dependency-svc
namespace: default
spec:
type: ClusterIP
selector:
app: embedding-dependency-deploy
ports:
- name: service
port: 6006
targetPort: 80
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-deploy
namespace: default
spec:
replicas: 4
selector:
matchLabels:
app: embedding-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: embedding-deploy
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/embedding-tei:latest
imagePullPolicy: IfNotPresent
name: embedding-deploy
args: null
ports:
- containerPort: 6000
resources:
limits:
cpu: 4
requests:
cpu: 4
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: embedding-svc
namespace: default
spec:
type: ClusterIP
selector:
app: embedding-deploy
ports:
- name: service
port: 6000
targetPort: 6000
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-dependency-deploy
namespace: default
spec:
replicas: 32
selector:
matchLabels:
app: llm-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-dependency-deploy
spec:
nodeSelector:
node-type: chatqna-opea
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/tgi-gaudi:2.0.5
name: llm-dependency-deploy-demo
securityContext:
capabilities:
add:
- SYS_NICE
args:
- --model-id
- $(LLM_MODEL_ID)
- --max-input-length
- '1024'
- --max-total-tokens
- '2048'
- --max-batch-total-tokens
- '65536'
- --max-batch-prefill-tokens
- '4096'
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HUGGING_FACE_HUB_TOKEN
value: ${HF_TOKEN}
- name: ENABLE_HPU_GRAPH
value: 'true'
- name: LIMIT_HPU_GRAPH
value: 'true'
- name: USE_FLASH_ATTENTION
value: 'true'
- name: FLASH_ATTENTION_RECOMPUTE
value: 'true'
serviceAccountName: default
volumes:
- name: model-volume
hostPath:
path: /mnt/models
type: Directory
- name: shm
emptyDir:
medium: Memory
sizeLimit: 1Gi
---
kind: Service
apiVersion: v1
metadata:
name: llm-dependency-svc
namespace: default
spec:
type: ClusterIP
selector:
app: llm-dependency-deploy
ports:
- name: service
port: 9009
targetPort: 80
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-deploy
namespace: default
spec:
replicas: 4
selector:
matchLabels:
app: llm-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: llm-deploy
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/llm-tgi:latest
imagePullPolicy: IfNotPresent
name: llm-deploy
args: null
ports:
- containerPort: 9000
resources:
limits:
cpu: 4
requests:
cpu: 4
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: llm-svc
namespace: default
spec:
type: ClusterIP
selector:
app: llm-deploy
ports:
- name: service
port: 9000
targetPort: 9000
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: retriever-deploy
namespace: default
spec:
replicas: 4
selector:
matchLabels:
app: retriever-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: retriever-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: retriever-deploy
hostIPC: true
containers:
- env:
- name: REDIS_URL
valueFrom:
configMapKeyRef:
name: qna-config
key: REDIS_URL
- name: TEI_EMBEDDING_ENDPOINT
valueFrom:
configMapKeyRef:
name: qna-config
key: TEI_EMBEDDING_ENDPOINT
- name: HUGGINGFACEHUB_API_TOKEN
valueFrom:
configMapKeyRef:
name: qna-config
key: HUGGINGFACEHUB_API_TOKEN
- name: INDEX_NAME
valueFrom:
configMapKeyRef:
name: qna-config
key: INDEX_NAME
image: opea/retriever-redis:latest
imagePullPolicy: IfNotPresent
name: retriever-deploy
args: null
ports:
- containerPort: 7000
resources:
limits:
cpu: 8
memory: 2500Mi
requests:
cpu: 8
memory: 2500Mi
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: retriever-svc
namespace: default
spec:
type: ClusterIP
selector:
app: retriever-deploy
ports:
- name: service
port: 7000
targetPort: 7000
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: vector-db
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: vector-db
template:
metadata:
labels:
app: vector-db
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: vector-db
containers:
- name: vector-db
image: redis/redis-stack:7.2.0-v9
ports:
- containerPort: 6379
- containerPort: 8001
---
apiVersion: v1
kind: Service
metadata:
name: vector-db
namespace: default
spec:
type: ClusterIP
selector:
app: vector-db
ports:
- name: vector-db-service
port: 6379
targetPort: 6379
- name: vector-db-insight
port: 8001
targetPort: 8001
---

View File

@@ -0,0 +1,622 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: v1
kind: ConfigMap
metadata:
name: qna-config
namespace: default
data:
EMBEDDING_MODEL_ID: BAAI/bge-base-en-v1.5
RERANK_MODEL_ID: BAAI/bge-reranker-base
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
TEI_EMBEDDING_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_RERANKING_ENDPOINT: http://reranking-dependency-svc.default.svc.cluster.local:8808
TGI_LLM_ENDPOINT: http://llm-dependency-svc.default.svc.cluster.local:9009
REDIS_URL: redis://vector-db.default.svc.cluster.local:6379
INDEX_NAME: rag-redis
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
EMBEDDING_SERVICE_HOST_IP: embedding-svc
RETRIEVER_SERVICE_HOST_IP: retriever-svc
RERANK_SERVICE_HOST_IP: reranking-svc
NODE_SELECTOR: chatqna-opea
LLM_SERVICE_HOST_IP: llm-svc
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: chatqna-backend-server-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: chatqna-backend-server-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: chatqna-backend-server-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: chatqna-backend-server-deploy
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/chatqna-without-rerank:latest
imagePullPolicy: IfNotPresent
name: chatqna-backend-server-deploy
args: null
ports:
- containerPort: 8888
resources:
limits:
cpu: 8
memory: 4000Mi
requests:
cpu: 8
memory: 4000Mi
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: chatqna-backend-server-svc
namespace: default
spec:
type: NodePort
selector:
app: chatqna-backend-server-deploy
ports:
- name: service
port: 8888
targetPort: 8888
nodePort: 30888
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: dataprep-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: dataprep-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: dataprep-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: dataprep-deploy
hostIPC: true
containers:
- env:
- name: REDIS_URL
valueFrom:
configMapKeyRef:
name: qna-config
key: REDIS_URL
- name: TEI_ENDPOINT
valueFrom:
configMapKeyRef:
name: qna-config
key: TEI_EMBEDDING_ENDPOINT
- name: INDEX_NAME
valueFrom:
configMapKeyRef:
name: qna-config
key: INDEX_NAME
image: opea/dataprep-redis:latest
imagePullPolicy: IfNotPresent
name: dataprep-deploy
args: null
ports:
- containerPort: 6007
- containerPort: 6008
- containerPort: 6009
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: dataprep-svc
namespace: default
spec:
type: ClusterIP
selector:
app: dataprep-deploy
ports:
- name: port1
port: 6007
targetPort: 6007
- name: port2
port: 6008
targetPort: 6008
- name: port3
port: 6009
targetPort: 6009
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-dependency-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: embedding-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-dependency-deploy
spec:
nodeSelector:
node-type: chatqna-opea
containers:
- envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
name: embedding-dependency-deploy
args:
- --model-id
- $(EMBEDDING_MODEL_ID)
- --auto-truncate
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
ports:
- containerPort: 80
resources:
limits:
cpu: 76
memory: 20000Mi
requests:
cpu: 76
memory: 20000Mi
serviceAccountName: default
volumes:
- name: model-volume
hostPath:
path: /mnt/models
type: Directory
- name: shm
emptyDir:
medium: Memory
sizeLimit: 1Gi
---
kind: Service
apiVersion: v1
metadata:
name: embedding-dependency-svc
namespace: default
spec:
type: ClusterIP
selector:
app: embedding-dependency-deploy
ports:
- name: service
port: 6006
targetPort: 80
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: embedding-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: embedding-deploy
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/embedding-tei:latest
imagePullPolicy: IfNotPresent
name: embedding-deploy
args: null
ports:
- containerPort: 6000
resources:
limits:
cpu: 4
requests:
cpu: 4
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: embedding-svc
namespace: default
spec:
type: ClusterIP
selector:
app: embedding-deploy
ports:
- name: service
port: 6000
targetPort: 6000
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-dependency-deploy
namespace: default
spec:
replicas: 8
selector:
matchLabels:
app: llm-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-dependency-deploy
spec:
nodeSelector:
node-type: chatqna-opea
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/tgi-gaudi:2.0.5
name: llm-dependency-deploy-demo
securityContext:
capabilities:
add:
- SYS_NICE
args:
- --model-id
- $(LLM_MODEL_ID)
- --max-input-length
- '1024'
- --max-total-tokens
- '2048'
- --max-batch-total-tokens
- '65536'
- --max-batch-prefill-tokens
- '4096'
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HUGGING_FACE_HUB_TOKEN
value: ${HF_TOKEN}
- name: ENABLE_HPU_GRAPH
value: 'true'
- name: LIMIT_HPU_GRAPH
value: 'true'
- name: USE_FLASH_ATTENTION
value: 'true'
- name: FLASH_ATTENTION_RECOMPUTE
value: 'true'
serviceAccountName: default
volumes:
- name: model-volume
hostPath:
path: /mnt/models
type: Directory
- name: shm
emptyDir:
medium: Memory
sizeLimit: 1Gi
---
kind: Service
apiVersion: v1
metadata:
name: llm-dependency-svc
namespace: default
spec:
type: ClusterIP
selector:
app: llm-dependency-deploy
ports:
- name: service
port: 9009
targetPort: 80
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: llm-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: llm-deploy
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/llm-tgi:latest
imagePullPolicy: IfNotPresent
name: llm-deploy
args: null
ports:
- containerPort: 9000
resources:
limits:
cpu: 4
requests:
cpu: 4
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: llm-svc
namespace: default
spec:
type: ClusterIP
selector:
app: llm-deploy
ports:
- name: service
port: 9000
targetPort: 9000
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: retriever-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: retriever-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: retriever-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: retriever-deploy
hostIPC: true
containers:
- env:
- name: REDIS_URL
valueFrom:
configMapKeyRef:
name: qna-config
key: REDIS_URL
- name: TEI_EMBEDDING_ENDPOINT
valueFrom:
configMapKeyRef:
name: qna-config
key: TEI_EMBEDDING_ENDPOINT
- name: HUGGINGFACEHUB_API_TOKEN
valueFrom:
configMapKeyRef:
name: qna-config
key: HUGGINGFACEHUB_API_TOKEN
- name: INDEX_NAME
valueFrom:
configMapKeyRef:
name: qna-config
key: INDEX_NAME
image: opea/retriever-redis:latest
imagePullPolicy: IfNotPresent
name: retriever-deploy
args: null
ports:
- containerPort: 7000
resources:
limits:
cpu: 8
memory: 2500Mi
requests:
cpu: 8
memory: 2500Mi
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: retriever-svc
namespace: default
spec:
type: ClusterIP
selector:
app: retriever-deploy
ports:
- name: service
port: 7000
targetPort: 7000
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: vector-db
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: vector-db
template:
metadata:
labels:
app: vector-db
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: vector-db
containers:
- name: vector-db
image: redis/redis-stack:7.2.0-v9
ports:
- containerPort: 6379
- containerPort: 8001
---
apiVersion: v1
kind: Service
metadata:
name: vector-db
namespace: default
spec:
type: ClusterIP
selector:
app: vector-db
ports:
- name: vector-db-service
port: 6379
targetPort: 6379
- name: vector-db-insight
port: 8001
targetPort: 8001
---

View File

@@ -0,0 +1,622 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: v1
kind: ConfigMap
metadata:
name: qna-config
namespace: default
data:
EMBEDDING_MODEL_ID: BAAI/bge-base-en-v1.5
RERANK_MODEL_ID: BAAI/bge-reranker-base
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
TEI_EMBEDDING_ENDPOINT: http://embedding-dependency-svc.default.svc.cluster.local:6006
TEI_RERANKING_ENDPOINT: http://reranking-dependency-svc.default.svc.cluster.local:8808
TGI_LLM_ENDPOINT: http://llm-dependency-svc.default.svc.cluster.local:9009
REDIS_URL: redis://vector-db.default.svc.cluster.local:6379
INDEX_NAME: rag-redis
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
EMBEDDING_SERVICE_HOST_IP: embedding-svc
RETRIEVER_SERVICE_HOST_IP: retriever-svc
RERANK_SERVICE_HOST_IP: reranking-svc
NODE_SELECTOR: chatqna-opea
LLM_SERVICE_HOST_IP: llm-svc
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: chatqna-backend-server-deploy
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: chatqna-backend-server-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: chatqna-backend-server-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: chatqna-backend-server-deploy
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/chatqna-without-rerank:latest
imagePullPolicy: IfNotPresent
name: chatqna-backend-server-deploy
args: null
ports:
- containerPort: 8888
resources:
limits:
cpu: 8
memory: 4000Mi
requests:
cpu: 8
memory: 4000Mi
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: chatqna-backend-server-svc
namespace: default
spec:
type: NodePort
selector:
app: chatqna-backend-server-deploy
ports:
- name: service
port: 8888
targetPort: 8888
nodePort: 30888
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: dataprep-deploy
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: dataprep-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: dataprep-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: dataprep-deploy
hostIPC: true
containers:
- env:
- name: REDIS_URL
valueFrom:
configMapKeyRef:
name: qna-config
key: REDIS_URL
- name: TEI_ENDPOINT
valueFrom:
configMapKeyRef:
name: qna-config
key: TEI_EMBEDDING_ENDPOINT
- name: INDEX_NAME
valueFrom:
configMapKeyRef:
name: qna-config
key: INDEX_NAME
image: opea/dataprep-redis:latest
imagePullPolicy: IfNotPresent
name: dataprep-deploy
args: null
ports:
- containerPort: 6007
- containerPort: 6008
- containerPort: 6009
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: dataprep-svc
namespace: default
spec:
type: ClusterIP
selector:
app: dataprep-deploy
ports:
- name: port1
port: 6007
targetPort: 6007
- name: port2
port: 6008
targetPort: 6008
- name: port3
port: 6009
targetPort: 6009
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-dependency-deploy
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: embedding-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-dependency-deploy
spec:
nodeSelector:
node-type: chatqna-opea
containers:
- envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
name: embedding-dependency-deploy
args:
- --model-id
- $(EMBEDDING_MODEL_ID)
- --auto-truncate
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
ports:
- containerPort: 80
resources:
limits:
cpu: 76
memory: 20000Mi
requests:
cpu: 76
memory: 20000Mi
serviceAccountName: default
volumes:
- name: model-volume
hostPath:
path: /mnt/models
type: Directory
- name: shm
emptyDir:
medium: Memory
sizeLimit: 1Gi
---
kind: Service
apiVersion: v1
metadata:
name: embedding-dependency-svc
namespace: default
spec:
type: ClusterIP
selector:
app: embedding-dependency-deploy
ports:
- name: service
port: 6006
targetPort: 80
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: embedding-deploy
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: embedding-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: embedding-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: embedding-deploy
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/embedding-tei:latest
imagePullPolicy: IfNotPresent
name: embedding-deploy
args: null
ports:
- containerPort: 6000
resources:
limits:
cpu: 4
requests:
cpu: 4
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: embedding-svc
namespace: default
spec:
type: ClusterIP
selector:
app: embedding-deploy
ports:
- name: service
port: 6000
targetPort: 6000
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-dependency-deploy
namespace: default
spec:
replicas: 16
selector:
matchLabels:
app: llm-dependency-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-dependency-deploy
spec:
nodeSelector:
node-type: chatqna-opea
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: ghcr.io/huggingface/tgi-gaudi:2.0.5
name: llm-dependency-deploy-demo
securityContext:
capabilities:
add:
- SYS_NICE
args:
- --model-id
- $(LLM_MODEL_ID)
- --max-input-length
- '1024'
- --max-total-tokens
- '2048'
- --max-batch-total-tokens
- '65536'
- --max-batch-prefill-tokens
- '4096'
volumeMounts:
- mountPath: /data
name: model-volume
- mountPath: /dev/shm
name: shm
ports:
- containerPort: 80
resources:
limits:
habana.ai/gaudi: 1
env:
- name: OMPI_MCA_btl_vader_single_copy_mechanism
value: none
- name: PT_HPU_ENABLE_LAZY_COLLECTIVES
value: 'true'
- name: runtime
value: habana
- name: HABANA_VISIBLE_DEVICES
value: all
- name: HUGGING_FACE_HUB_TOKEN
value: ${HF_TOKEN}
- name: ENABLE_HPU_GRAPH
value: 'true'
- name: LIMIT_HPU_GRAPH
value: 'true'
- name: USE_FLASH_ATTENTION
value: 'true'
- name: FLASH_ATTENTION_RECOMPUTE
value: 'true'
serviceAccountName: default
volumes:
- name: model-volume
hostPath:
path: /mnt/models
type: Directory
- name: shm
emptyDir:
medium: Memory
sizeLimit: 1Gi
---
kind: Service
apiVersion: v1
metadata:
name: llm-dependency-svc
namespace: default
spec:
type: ClusterIP
selector:
app: llm-dependency-deploy
ports:
- name: service
port: 9009
targetPort: 80
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: llm-deploy
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: llm-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: llm-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: llm-deploy
hostIPC: true
containers:
- envFrom:
- configMapRef:
name: qna-config
image: opea/llm-tgi:latest
imagePullPolicy: IfNotPresent
name: llm-deploy
args: null
ports:
- containerPort: 9000
resources:
limits:
cpu: 4
requests:
cpu: 4
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: llm-svc
namespace: default
spec:
type: ClusterIP
selector:
app: llm-deploy
ports:
- name: service
port: 9000
targetPort: 9000
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: retriever-deploy
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: retriever-deploy
template:
metadata:
annotations:
sidecar.istio.io/rewriteAppHTTPProbers: 'true'
labels:
app: retriever-deploy
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: retriever-deploy
hostIPC: true
containers:
- env:
- name: REDIS_URL
valueFrom:
configMapKeyRef:
name: qna-config
key: REDIS_URL
- name: TEI_EMBEDDING_ENDPOINT
valueFrom:
configMapKeyRef:
name: qna-config
key: TEI_EMBEDDING_ENDPOINT
- name: HUGGINGFACEHUB_API_TOKEN
valueFrom:
configMapKeyRef:
name: qna-config
key: HUGGINGFACEHUB_API_TOKEN
- name: INDEX_NAME
valueFrom:
configMapKeyRef:
name: qna-config
key: INDEX_NAME
image: opea/retriever-redis:latest
imagePullPolicy: IfNotPresent
name: retriever-deploy
args: null
ports:
- containerPort: 7000
resources:
limits:
cpu: 8
memory: 2500Mi
requests:
cpu: 8
memory: 2500Mi
serviceAccountName: default
---
kind: Service
apiVersion: v1
metadata:
name: retriever-svc
namespace: default
spec:
type: ClusterIP
selector:
app: retriever-deploy
ports:
- name: service
port: 7000
targetPort: 7000
---
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: vector-db
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: vector-db
template:
metadata:
labels:
app: vector-db
spec:
nodeSelector:
node-type: chatqna-opea
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: vector-db
containers:
- name: vector-db
image: redis/redis-stack:7.2.0-v9
ports:
- containerPort: 6379
- containerPort: 8001
---
apiVersion: v1
kind: Service
metadata:
name: vector-db
namespace: default
spec:
type: ClusterIP
selector:
app: vector-db
ports:
- name: vector-db-service
port: 6379
targetPort: 6379
- name: vector-db-insight
port: 8001
targetPort: 8001
---

View File

@@ -19,7 +19,7 @@ opea_micro_services:
tei-embedding-service: tei-embedding-service:
host: ${TEI_EMBEDDING_SERVICE_IP} host: ${TEI_EMBEDDING_SERVICE_IP}
ports: ${TEI_EMBEDDING_SERVICE_PORT} ports: ${TEI_EMBEDDING_SERVICE_PORT}
image: opea/tei-gaudi:latest image: ghcr.io/huggingface/tei-gaudi:latest
volumes: volumes:
- "./data:/data" - "./data:/data"
runtime: habana runtime: habana
@@ -48,7 +48,7 @@ opea_micro_services:
tgi-service: tgi-service:
host: ${TGI_SERVICE_IP} host: ${TGI_SERVICE_IP}
ports: ${TGI_SERVICE_PORT} ports: ${TGI_SERVICE_PORT}
image: ghcr.io/huggingface/tgi-gaudi:2.0.1 image: ghcr.io/huggingface/tgi-gaudi:2.0.5
volumes: volumes:
- "./data:/data" - "./data:/data"
runtime: habana runtime: habana
@@ -56,10 +56,13 @@ opea_micro_services:
- SYS_NICE - SYS_NICE
ipc: host ipc: host
environment: environment:
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
HABANA_VISIBLE_DEVICES: all HABANA_VISIBLE_DEVICES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none OMPI_MCA_btl_vader_single_copy_mechanism: none
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} ENABLE_HPU_GRAPH: true
LIMIT_HPU_GRAPH: true
USE_FLASH_ATTENTION: true
FLASH_ATTENTION_RECOMPUTE: true
model-id: ${LLM_MODEL_ID} model-id: ${LLM_MODEL_ID}
llm: llm:
host: ${LLM_SERVICE_HOST_IP} host: ${LLM_SERVICE_HOST_IP}

View File

@@ -69,7 +69,7 @@ services:
INDEX_NAME: ${INDEX_NAME} INDEX_NAME: ${INDEX_NAME}
restart: unless-stopped restart: unless-stopped
tei-reranking-service: tei-reranking-service:
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.2 image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
container_name: tei-reranking-server container_name: tei-reranking-server
ports: ports:
- "6041:80" - "6041:80"

View File

@@ -25,7 +25,7 @@ services:
TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
tei-embedding-service: tei-embedding-service:
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.2 image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
container_name: tei-embedding-server container_name: tei-embedding-server
ports: ports:
- "6006:80" - "6006:80"
@@ -75,7 +75,7 @@ services:
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
restart: unless-stopped restart: unless-stopped
tei-reranking-service: tei-reranking-service:
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.2 image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
container_name: tei-reranking-server container_name: tei-reranking-server
ports: ports:
- "8808:80" - "8808:80"

View File

@@ -25,7 +25,7 @@ services:
TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
tei-embedding-service: tei-embedding-service:
image: ${REGISTRY:-opea}/tei-gaudi:${TAG:-latest} image: ghcr.io/huggingface/tei-gaudi:latest
container_name: tei-embedding-gaudi-server container_name: tei-embedding-gaudi-server
ports: ports:
- "8090:80" - "8090:80"
@@ -108,7 +108,7 @@ services:
HF_HUB_ENABLE_HF_TRANSFER: 0 HF_HUB_ENABLE_HF_TRANSFER: 0
restart: unless-stopped restart: unless-stopped
tgi-service: tgi-service:
image: ghcr.io/huggingface/tgi-gaudi:2.0.1 image: ghcr.io/huggingface/tgi-gaudi:2.0.5
container_name: tgi-gaudi-server container_name: tgi-gaudi-server
ports: ports:
- "8005:80" - "8005:80"
@@ -118,11 +118,15 @@ services:
no_proxy: ${no_proxy} no_proxy: ${no_proxy}
http_proxy: ${http_proxy} http_proxy: ${http_proxy}
https_proxy: ${https_proxy} https_proxy: ${https_proxy}
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
HF_HUB_DISABLE_PROGRESS_BARS: 1 HF_HUB_DISABLE_PROGRESS_BARS: 1
HF_HUB_ENABLE_HF_TRANSFER: 0 HF_HUB_ENABLE_HF_TRANSFER: 0
HABANA_VISIBLE_DEVICES: ${llm_service_devices} HABANA_VISIBLE_DEVICES: ${llm_service_devices}
OMPI_MCA_btl_vader_single_copy_mechanism: none OMPI_MCA_btl_vader_single_copy_mechanism: none
ENABLE_HPU_GRAPH: true
LIMIT_HPU_GRAPH: true
USE_FLASH_ATTENTION: true
FLASH_ATTENTION_RECOMPUTE: true
runtime: habana runtime: habana
cap_add: cap_add:
- SYS_NICE - SYS_NICE

View File

@@ -25,7 +25,7 @@ services:
TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
tgi-guardrails-service: tgi-guardrails-service:
image: ghcr.io/huggingface/tgi-gaudi:2.0.1 image: ghcr.io/huggingface/tgi-gaudi:2.0.5
container_name: tgi-guardrails-server container_name: tgi-guardrails-server
ports: ports:
- "8088:80" - "8088:80"
@@ -35,11 +35,15 @@ services:
no_proxy: ${no_proxy} no_proxy: ${no_proxy}
http_proxy: ${http_proxy} http_proxy: ${http_proxy}
https_proxy: ${https_proxy} https_proxy: ${https_proxy}
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
HF_HUB_DISABLE_PROGRESS_BARS: 1 HF_HUB_DISABLE_PROGRESS_BARS: 1
HF_HUB_ENABLE_HF_TRANSFER: 0 HF_HUB_ENABLE_HF_TRANSFER: 0
HABANA_VISIBLE_DEVICES: all HABANA_VISIBLE_DEVICES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none OMPI_MCA_btl_vader_single_copy_mechanism: none
ENABLE_HPU_GRAPH: true
LIMIT_HPU_GRAPH: true
USE_FLASH_ATTENTION: true
FLASH_ATTENTION_RECOMPUTE: true
runtime: habana runtime: habana
cap_add: cap_add:
- SYS_NICE - SYS_NICE
@@ -60,7 +64,7 @@ services:
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
restart: unless-stopped restart: unless-stopped
tei-embedding-service: tei-embedding-service:
image: ${REGISTRY:-opea}/tei-gaudi:${TAG:-latest} image: ghcr.io/huggingface/tei-gaudi:latest
container_name: tei-embedding-gaudi-server container_name: tei-embedding-gaudi-server
ports: ports:
- "8090:80" - "8090:80"
@@ -141,7 +145,7 @@ services:
HF_HUB_ENABLE_HF_TRANSFER: 0 HF_HUB_ENABLE_HF_TRANSFER: 0
restart: unless-stopped restart: unless-stopped
tgi-service: tgi-service:
image: ghcr.io/huggingface/tgi-gaudi:2.0.1 image: ghcr.io/huggingface/tgi-gaudi:2.0.5
container_name: tgi-gaudi-server container_name: tgi-gaudi-server
ports: ports:
- "8008:80" - "8008:80"
@@ -151,11 +155,15 @@ services:
no_proxy: ${no_proxy} no_proxy: ${no_proxy}
http_proxy: ${http_proxy} http_proxy: ${http_proxy}
https_proxy: ${https_proxy} https_proxy: ${https_proxy}
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
HF_HUB_DISABLE_PROGRESS_BARS: 1 HF_HUB_DISABLE_PROGRESS_BARS: 1
HF_HUB_ENABLE_HF_TRANSFER: 0 HF_HUB_ENABLE_HF_TRANSFER: 0
HABANA_VISIBLE_DEVICES: all HABANA_VISIBLE_DEVICES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none OMPI_MCA_btl_vader_single_copy_mechanism: none
ENABLE_HPU_GRAPH: true
LIMIT_HPU_GRAPH: true
USE_FLASH_ATTENTION: true
FLASH_ATTENTION_RECOMPUTE: true
runtime: habana runtime: habana
cap_add: cap_add:
- SYS_NICE - SYS_NICE

View File

@@ -25,7 +25,7 @@ services:
TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
tei-embedding-service: tei-embedding-service:
image: ${REGISTRY:-opea}/tei-gaudi:${TAG:-latest} image: ghcr.io/huggingface/tei-gaudi:latest
container_name: tei-embedding-gaudi-server container_name: tei-embedding-gaudi-server
ports: ports:
- "8090:80" - "8090:80"
@@ -108,7 +108,7 @@ services:
# HF_HUB_ENABLE_HF_TRANSFER: 0 # HF_HUB_ENABLE_HF_TRANSFER: 0
# restart: unless-stopped # restart: unless-stopped
tgi-service: tgi-service:
image: ghcr.io/huggingface/tgi-gaudi:2.0.1 image: ghcr.io/huggingface/tgi-gaudi:2.0.5
container_name: tgi-gaudi-server container_name: tgi-gaudi-server
ports: ports:
- "8005:80" - "8005:80"
@@ -118,11 +118,15 @@ services:
no_proxy: ${no_proxy} no_proxy: ${no_proxy}
http_proxy: ${http_proxy} http_proxy: ${http_proxy}
https_proxy: ${https_proxy} https_proxy: ${https_proxy}
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
HF_HUB_DISABLE_PROGRESS_BARS: 1 HF_HUB_DISABLE_PROGRESS_BARS: 1
HF_HUB_ENABLE_HF_TRANSFER: 0 HF_HUB_ENABLE_HF_TRANSFER: 0
HABANA_VISIBLE_DEVICES: all HABANA_VISIBLE_DEVICES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none OMPI_MCA_btl_vader_single_copy_mechanism: none
ENABLE_HPU_GRAPH: true
LIMIT_HPU_GRAPH: true
USE_FLASH_ATTENTION: true
FLASH_ATTENTION_RECOMPUTE: true
runtime: habana runtime: habana
cap_add: cap_add:
- SYS_NICE - SYS_NICE

View File

@@ -25,7 +25,7 @@ services:
TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
tei-embedding-service: tei-embedding-service:
image: ${REGISTRY:-opea}/tei-gaudi:${TAG:-latest} image: ghcr.io/huggingface/tei-gaudi:latest
container_name: tei-embedding-gaudi-server container_name: tei-embedding-gaudi-server
ports: ports:
- "8090:80" - "8090:80"
@@ -73,7 +73,7 @@ services:
INDEX_NAME: ${INDEX_NAME} INDEX_NAME: ${INDEX_NAME}
restart: unless-stopped restart: unless-stopped
tei-reranking-service: tei-reranking-service:
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.2 image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
container_name: tei-reranking-gaudi-server container_name: tei-reranking-gaudi-server
ports: ports:
- "8808:80" - "8808:80"

View File

@@ -25,7 +25,7 @@ services:
TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
tei-embedding-service: tei-embedding-service:
image: ${REGISTRY:-opea}/tei-gaudi:${TAG:-latest} image: ghcr.io/huggingface/tei-gaudi:latest
container_name: tei-embedding-gaudi-server container_name: tei-embedding-gaudi-server
ports: ports:
- "8090:80" - "8090:80"
@@ -73,7 +73,7 @@ services:
INDEX_NAME: ${INDEX_NAME} INDEX_NAME: ${INDEX_NAME}
restart: unless-stopped restart: unless-stopped
tei-reranking-service: tei-reranking-service:
image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.2 image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
container_name: tei-reranking-gaudi-server container_name: tei-reranking-gaudi-server
ports: ports:
- "8808:80" - "8808:80"

View File

@@ -25,7 +25,7 @@ services:
TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
tei-embedding-service: tei-embedding-service:
image: ${REGISTRY:-opea}/tei-gaudi:${TAG:-latest} image: ghcr.io/huggingface/tei-gaudi:latest
container_name: tei-embedding-gaudi-server container_name: tei-embedding-gaudi-server
ports: ports:
- "8090:80" - "8090:80"
@@ -75,7 +75,7 @@ services:
INDEX_NAME: ${INDEX_NAME} INDEX_NAME: ${INDEX_NAME}
restart: unless-stopped restart: unless-stopped
tgi-service: tgi-service:
image: ghcr.io/huggingface/tgi-gaudi:2.0.1 image: ghcr.io/huggingface/tgi-gaudi:2.0.5
container_name: tgi-gaudi-server container_name: tgi-gaudi-server
ports: ports:
- "8005:80" - "8005:80"
@@ -85,11 +85,15 @@ services:
no_proxy: ${no_proxy} no_proxy: ${no_proxy}
http_proxy: ${http_proxy} http_proxy: ${http_proxy}
https_proxy: ${https_proxy} https_proxy: ${https_proxy}
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
HF_HUB_DISABLE_PROGRESS_BARS: 1 HF_HUB_DISABLE_PROGRESS_BARS: 1
HF_HUB_ENABLE_HF_TRANSFER: 0 HF_HUB_ENABLE_HF_TRANSFER: 0
HABANA_VISIBLE_DEVICES: all HABANA_VISIBLE_DEVICES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none OMPI_MCA_btl_vader_single_copy_mechanism: none
ENABLE_HPU_GRAPH: true
LIMIT_HPU_GRAPH: true
USE_FLASH_ATTENTION: true
FLASH_ATTENTION_RECOMPUTE: true
runtime: habana runtime: habana
cap_add: cap_add:
- SYS_NICE - SYS_NICE

View File

@@ -56,16 +56,16 @@ f810f3b4d329 opea/embedding-tei:latest "python e
2fa17d84605f opea/dataprep-redis:latest "python prepare_doc_…" 2 minutes ago Up 2 minutes 0.0.0.0:6007->6007/tcp, :::6007->6007/tcp dataprep-redis-server 2fa17d84605f opea/dataprep-redis:latest "python prepare_doc_…" 2 minutes ago Up 2 minutes 0.0.0.0:6007->6007/tcp, :::6007->6007/tcp dataprep-redis-server
69e1fb59e92c opea/retriever-redis:latest "/home/user/comps/re…" 2 minutes ago Up 2 minutes 0.0.0.0:7000->7000/tcp, :::7000->7000/tcp retriever-redis-server 69e1fb59e92c opea/retriever-redis:latest "/home/user/comps/re…" 2 minutes ago Up 2 minutes 0.0.0.0:7000->7000/tcp, :::7000->7000/tcp retriever-redis-server
313b9d14928a opea/reranking-tei:latest "python reranking_te…" 2 minutes ago Up 2 minutes 0.0.0.0:8000->8000/tcp, :::8000->8000/tcp reranking-tei-gaudi-server 313b9d14928a opea/reranking-tei:latest "python reranking_te…" 2 minutes ago Up 2 minutes 0.0.0.0:8000->8000/tcp, :::8000->8000/tcp reranking-tei-gaudi-server
05c40b636239 ghcr.io/huggingface/tgi-gaudi:1.2.1 "text-generation-lau…" 2 minutes ago Exited (1) About a minute ago tgi-gaudi-server 05c40b636239 ghcr.io/huggingface/tgi-gaudi:2.0.5 "text-generation-lau…" 2 minutes ago Exited (1) About a minute ago tgi-gaudi-server
174bd43fa6b5 opea/tei-gaudi:latest "text-embeddings-rou…" 2 minutes ago Up 2 minutes 0.0.0.0:8090->80/tcp, :::8090->80/tcp tei-embedding-gaudi-server 174bd43fa6b5 ghcr.io/huggingface/tei-gaudi:latest "text-embeddings-rou…" 2 minutes ago Up 2 minutes 0.0.0.0:8090->80/tcp, :::8090->80/tcp tei-embedding-gaudi-server
74084469aa33 redis/redis-stack:7.2.0-v9 "/entrypoint.sh" 2 minutes ago Up 2 minutes 0.0.0.0:6379->6379/tcp, :::6379->6379/tcp, 0.0.0.0:8001->8001/tcp, :::8001->8001/tcp redis-vector-db 74084469aa33 redis/redis-stack:7.2.0-v9 "/entrypoint.sh" 2 minutes ago Up 2 minutes 0.0.0.0:6379->6379/tcp, :::6379->6379/tcp, 0.0.0.0:8001->8001/tcp, :::8001->8001/tcp redis-vector-db
88399dbc9e43 ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 "text-embeddings-rou…" 2 minutes ago Up 2 minutes 0.0.0.0:8808->80/tcp, :::8808->80/tcp tei-reranking-gaudi-server 88399dbc9e43 ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 "text-embeddings-rou…" 2 minutes ago Up 2 minutes 0.0.0.0:8808->80/tcp, :::8808->80/tcp tei-reranking-gaudi-server
``` ```
In this case, `ghcr.io/huggingface/tgi-gaudi:1.2.1` Existed. In this case, `ghcr.io/huggingface/tgi-gaudi:2.0.5` Existed.
``` ```
05c40b636239 ghcr.io/huggingface/tgi-gaudi:1.2.1 "text-generation-lau…" 2 minutes ago Exited (1) About a minute ago tgi-gaudi-server 05c40b636239 ghcr.io/huggingface/tgi-gaudi:2.0.5 "text-generation-lau…" 2 minutes ago Exited (1) About a minute ago tgi-gaudi-server
``` ```
Next we can check the container logs to get to know what happened during the docker start. Next we can check the container logs to get to know what happened during the docker start.
@@ -76,7 +76,7 @@ Check the log of container by:
`docker logs <CONTAINER ID> -t` `docker logs <CONTAINER ID> -t`
View the logs of `ghcr.io/huggingface/tgi-gaudi:1.2.1` View the logs of `ghcr.io/huggingface/tgi-gaudi:2.0.5`
`docker logs 05c40b636239 -t` `docker logs 05c40b636239 -t`
@@ -105,7 +105,7 @@ So just make sure the devices are available.
Here is another failure example: Here is another failure example:
``` ```
f7a08f9867f9 ghcr.io/huggingface/tgi-gaudi:1.2.1 "text-generation-lau…" 16 seconds ago Exited (2) 14 seconds ago tgi-gaudi-server f7a08f9867f9 ghcr.io/huggingface/tgi-gaudi:2.0.5 "text-generation-lau…" 16 seconds ago Exited (2) 14 seconds ago tgi-gaudi-server
``` ```
Check the log by `docker logs f7a08f9867f9 -t`. Check the log by `docker logs f7a08f9867f9 -t`.
@@ -122,7 +122,7 @@ View the docker input parameters in `./ChatQnA/docker_compose/intel/hpu/gaudi/co
``` ```
tgi-service: tgi-service:
image: ghcr.io/huggingface/tgi-gaudi:1.2.1 image: ghcr.io/huggingface/tgi-gaudi:2.0.5
container_name: tgi-gaudi-server container_name: tgi-gaudi-server
ports: ports:
- "8008:80" - "8008:80"
@@ -131,9 +131,13 @@ View the docker input parameters in `./ChatQnA/docker_compose/intel/hpu/gaudi/co
environment: environment:
http_proxy: ${http_proxy} http_proxy: ${http_proxy}
https_proxy: ${https_proxy} https_proxy: ${https_proxy}
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
HABANA_VISIBLE_DEVICES: all HABANA_VISIBLE_DEVICES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none OMPI_MCA_btl_vader_single_copy_mechanism: none
ENABLE_HPU_GRAPH: true
LIMIT_HPU_GRAPH: true
USE_FLASH_ATTENTION: true
FLASH_ATTENTION_RECOMPUTE: true
runtime: habana runtime: habana
cap_add: cap_add:
- SYS_NICE - SYS_NICE

View File

@@ -125,12 +125,6 @@ services:
dockerfile: comps/guardrails/llama_guard/langchain/Dockerfile dockerfile: comps/guardrails/llama_guard/langchain/Dockerfile
extends: chatqna extends: chatqna
image: ${REGISTRY:-opea}/guardrails-tgi:${TAG:-latest} image: ${REGISTRY:-opea}/guardrails-tgi:${TAG:-latest}
tei-gaudi:
build:
context: tei-gaudi
dockerfile: Dockerfile-hpu
extends: chatqna
image: ${REGISTRY:-opea}/tei-gaudi:${TAG:-latest}
vllm: vllm:
build: build:
context: vllm context: vllm

View File

@@ -27,8 +27,8 @@ The ChatQnA uses the below prebuilt images if you choose a Xeon deployment
Should you desire to use the Gaudi accelerator, two alternate images are used for the embedding and llm services. Should you desire to use the Gaudi accelerator, two alternate images are used for the embedding and llm services.
For Gaudi: For Gaudi:
- tei-embedding-service: opea/tei-gaudi:latest - tei-embedding-service: ghcr.io/huggingface/tei-gaudi:latest
- tgi-service: ghcr.io/huggingface/tgi-gaudi:1.2.1 - tgi-service: gghcr.io/huggingface/tgi-gaudi:2.0.5
> [NOTE] > [NOTE]
> Please refer to [Xeon README](https://github.com/opea-project/GenAIExamples/blob/main/ChatQnA/docker_compose/intel/cpu/xeon/README.md) or [Gaudi README](https://github.com/opea-project/GenAIExamples/blob/main/ChatQnA/docker_compose/intel/hpu/gaudi/README.md) to build the OPEA images. These too will be available on Docker Hub soon to simplify use. > Please refer to [Xeon README](https://github.com/opea-project/GenAIExamples/blob/main/ChatQnA/docker_compose/intel/cpu/xeon/README.md) or [Gaudi README](https://github.com/opea-project/GenAIExamples/blob/main/ChatQnA/docker_compose/intel/hpu/gaudi/README.md) to build the OPEA images. These too will be available on Docker Hub soon to simplify use.

View File

@@ -1477,7 +1477,7 @@ spec:
runAsUser: 1000 runAsUser: 1000
seccompProfile: seccompProfile:
type: RuntimeDefault type: RuntimeDefault
image: "ghcr.io/huggingface/tgi-gaudi:2.0.1" image: "ghcr.io/huggingface/tgi-gaudi:2.0.5"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
volumeMounts: volumeMounts:
- mountPath: /data - mountPath: /data
@@ -1558,7 +1558,7 @@ spec:
runAsUser: 1000 runAsUser: 1000
seccompProfile: seccompProfile:
type: RuntimeDefault type: RuntimeDefault
image: "ghcr.io/huggingface/tgi-gaudi:2.0.1" image: "ghcr.io/huggingface/tgi-gaudi:2.0.5"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
volumeMounts: volumeMounts:
- mountPath: /data - mountPath: /data

View File

@@ -1298,7 +1298,7 @@ spec:
runAsUser: 1000 runAsUser: 1000
seccompProfile: seccompProfile:
type: RuntimeDefault type: RuntimeDefault
image: "ghcr.io/huggingface/tgi-gaudi:2.0.1" image: "ghcr.io/huggingface/tgi-gaudi:2.0.5"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
volumeMounts: volumeMounts:
- mountPath: /data - mountPath: /data

View File

@@ -17,14 +17,14 @@ ip_address=$(hostname -I | awk '{print $1}')
function build_docker_images() { function build_docker_images() {
cd $WORKPATH/docker_image_build cd $WORKPATH/docker_image_build
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../ git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
git clone https://github.com/huggingface/tei-gaudi
echo "Build all the images with --no-cache, check docker_image_build.log for details..." echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="chatqna-guardrails chatqna-ui dataprep-redis embedding-tei retriever-redis reranking-tei llm-tgi tei-gaudi guardrails-tgi" service_list="chatqna-guardrails chatqna-ui dataprep-redis embedding-tei retriever-redis reranking-tei llm-tgi guardrails-tgi"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1 docker pull ghcr.io/huggingface/tgi-gaudi:2.0.5
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
docker pull ghcr.io/huggingface/tei-gaudi:latest
docker images && sleep 1s docker images && sleep 1s
} }

View File

@@ -17,14 +17,14 @@ ip_address=$(hostname -I | awk '{print $1}')
function build_docker_images() { function build_docker_images() {
cd $WORKPATH/docker_image_build cd $WORKPATH/docker_image_build
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../ git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
git clone https://github.com/huggingface/tei-gaudi
echo "Build all the images with --no-cache, check docker_image_build.log for details..." echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="chatqna-no-wrapper chatqna-ui dataprep-redis retriever-redis tei-gaudi" service_list="chatqna-no-wrapper chatqna-ui dataprep-redis retriever-redis"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1 docker pull ghcr.io/huggingface/tgi-gaudi:2.0.5
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
docker pull ghcr.io/huggingface/tei-gaudi:latest
docker images && sleep 1s docker images && sleep 1s
} }

View File

@@ -22,7 +22,7 @@ function build_docker_images() {
service_list="chatqna-no-wrapper chatqna-ui chatqna-conversation-ui dataprep-redis retriever-redis" service_list="chatqna-no-wrapper chatqna-ui chatqna-conversation-ui dataprep-redis retriever-redis"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1 docker pull ghcr.io/huggingface/tgi-gaudi:2.0.5
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
docker images && sleep 1s docker images && sleep 1s

View File

@@ -17,14 +17,14 @@ ip_address=$(hostname -I | awk '{print $1}')
function build_docker_images() { function build_docker_images() {
cd $WORKPATH/docker_image_build cd $WORKPATH/docker_image_build
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../ git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
git clone https://github.com/huggingface/tei-gaudi
echo "Build all the images with --no-cache, check docker_image_build.log for details..." echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="chatqna chatqna-ui dataprep-redis embedding-tei retriever-redis reranking-tei llm-tgi tei-gaudi nginx" service_list="chatqna chatqna-ui dataprep-redis embedding-tei retriever-redis reranking-tei llm-tgi nginx"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1 docker pull ghcr.io/huggingface/tgi-gaudi:2.0.5
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
docker pull ghcr.io/huggingface/tei-gaudi:latest
docker images && sleep 1s docker images && sleep 1s
} }

View File

@@ -22,7 +22,7 @@ function build_docker_images() {
service_list="chatqna chatqna-ui chatqna-conversation-ui dataprep-redis embedding-tei retriever-redis reranking-tei llm-tgi nginx" service_list="chatqna chatqna-ui chatqna-conversation-ui dataprep-redis embedding-tei retriever-redis reranking-tei llm-tgi nginx"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1 docker pull ghcr.io/huggingface/tgi-gaudi:2.0.5
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
docker images && sleep 1s docker images && sleep 1s

View File

@@ -17,13 +17,13 @@ ip_address=$(hostname -I | awk '{print $1}')
function build_docker_images() { function build_docker_images() {
cd $WORKPATH/docker_image_build cd $WORKPATH/docker_image_build
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../ git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
git clone https://github.com/huggingface/tei-gaudi
echo "Build all the images with --no-cache, check docker_image_build.log for details..." echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="chatqna chatqna-ui dataprep-redis embedding-tei retriever-redis reranking-tei tei-gaudi llm-vllm-hpu llm-vllm" service_list="chatqna chatqna-ui dataprep-redis embedding-tei retriever-redis reranking-tei llm-vllm-hpu llm-vllm"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.2 docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
docker pull ghcr.io/huggingface/tei-gaudi:latest
docker images && sleep 1s docker images && sleep 1s
} }

View File

@@ -23,7 +23,7 @@ function build_docker_images() {
service_list="chatqna chatqna-ui dataprep-redis embedding-tei retriever-redis reranking-tei llm-vllm vllm" service_list="chatqna chatqna-ui dataprep-redis embedding-tei retriever-redis reranking-tei llm-vllm vllm"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1 docker pull ghcr.io/huggingface/tgi-gaudi:2.0.5
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
docker images && sleep 1s docker images && sleep 1s

View File

@@ -17,13 +17,13 @@ ip_address=$(hostname -I | awk '{print $1}')
function build_docker_images() { function build_docker_images() {
cd $WORKPATH/docker_image_build cd $WORKPATH/docker_image_build
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../ git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
git clone https://github.com/huggingface/tei-gaudi
echo "Build all the images with --no-cache, check docker_image_build.log for details..." echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="chatqna chatqna-ui dataprep-redis embedding-tei retriever-redis reranking-tei tei-gaudi llm-vllm-ray-hpu llm-vllm-ray" service_list="chatqna chatqna-ui dataprep-redis embedding-tei retriever-redis reranking-tei llm-vllm-ray-hpu llm-vllm-ray"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.2 docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
docker pull ghcr.io/huggingface/tei-gaudi:latest
docker images && sleep 1s docker images && sleep 1s
} }

View File

@@ -17,14 +17,14 @@ ip_address=$(hostname -I | awk '{print $1}')
function build_docker_images() { function build_docker_images() {
cd $WORKPATH/docker_image_build cd $WORKPATH/docker_image_build
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../ git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
git clone https://github.com/huggingface/tei-gaudi
echo "Build all the images with --no-cache, check docker_image_build.log for details..." echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="chatqna-without-rerank chatqna-ui dataprep-redis embedding-tei retriever-redis llm-tgi tei-gaudi" service_list="chatqna-without-rerank chatqna-ui dataprep-redis embedding-tei retriever-redis llm-tgi"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1 docker pull ghcr.io/huggingface/tgi-gaudi:2.0.5
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
docker pull ghcr.io/huggingface/tei-gaudi:latest
docker images && sleep 1s docker images && sleep 1s
} }

View File

@@ -22,7 +22,7 @@ function build_docker_images() {
service_list="chatqna-without-rerank chatqna-ui chatqna-conversation-ui dataprep-redis embedding-tei retriever-redis llm-tgi" service_list="chatqna-without-rerank chatqna-ui chatqna-conversation-ui dataprep-redis embedding-tei retriever-redis llm-tgi"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1 docker pull ghcr.io/huggingface/tgi-gaudi:2.0.5
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
docker images && sleep 1s docker images && sleep 1s

View File

@@ -6,7 +6,7 @@ opea_micro_services:
tgi-service: tgi-service:
host: ${TGI_SERVICE_IP} host: ${TGI_SERVICE_IP}
ports: ${TGI_SERVICE_PORT} ports: ${TGI_SERVICE_PORT}
image: ghcr.io/huggingface/tgi-gaudi:2.0.1 image: ghcr.io/huggingface/tgi-gaudi:2.0.5
volumes: volumes:
- "./data:/data" - "./data:/data"
runtime: habana runtime: habana
@@ -17,7 +17,11 @@ opea_micro_services:
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
HABANA_VISIBLE_DEVICES: all HABANA_VISIBLE_DEVICES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none OMPI_MCA_btl_vader_single_copy_mechanism: none
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
ENABLE_HPU_GRAPH: true
LIMIT_HPU_GRAPH: true
USE_FLASH_ATTENTION: true
FLASH_ATTENTION_RECOMPUTE: true
model-id: ${LLM_MODEL_ID} model-id: ${LLM_MODEL_ID}
llm: llm:
host: ${LLM_SERVICE_HOST_IP} host: ${LLM_SERVICE_HOST_IP}

View File

@@ -3,7 +3,7 @@
services: services:
tgi-service: tgi-service:
image: ghcr.io/huggingface/tgi-gaudi:2.0.1 image: ghcr.io/huggingface/tgi-gaudi:2.0.5
container_name: tgi-gaudi-server container_name: tgi-gaudi-server
ports: ports:
- "8028:80" - "8028:80"
@@ -15,7 +15,11 @@ services:
https_proxy: ${https_proxy} https_proxy: ${https_proxy}
HABANA_VISIBLE_DEVICES: all HABANA_VISIBLE_DEVICES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none OMPI_MCA_btl_vader_single_copy_mechanism: none
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
ENABLE_HPU_GRAPH: true
LIMIT_HPU_GRAPH: true
USE_FLASH_ATTENTION: true
FLASH_ATTENTION_RECOMPUTE: true
runtime: habana runtime: habana
cap_add: cap_add:
- SYS_NICE - SYS_NICE

View File

@@ -405,7 +405,7 @@ spec:
runAsUser: 1000 runAsUser: 1000
seccompProfile: seccompProfile:
type: RuntimeDefault type: RuntimeDefault
image: "ghcr.io/huggingface/tgi-gaudi:2.0.1" image: "ghcr.io/huggingface/tgi-gaudi:2.0.5"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
volumeMounts: volumeMounts:
- mountPath: /data - mountPath: /data

View File

@@ -22,7 +22,7 @@ function build_docker_images() {
service_list="codegen codegen-ui llm-tgi" service_list="codegen codegen-ui llm-tgi"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1 docker pull ghcr.io/huggingface/tgi-gaudi:2.0.5
docker images && sleep 1s docker images && sleep 1s
} }

View File

@@ -6,7 +6,7 @@ opea_micro_services:
tgi-service: tgi-service:
host: ${TGI_SERVICE_IP} host: ${TGI_SERVICE_IP}
ports: ${TGI_SERVICE_PORT} ports: ${TGI_SERVICE_PORT}
image: ghcr.io/huggingface/tgi-gaudi:2.0.1 image: ghcr.io/huggingface/tgi-gaudi:2.0.5
volumes: volumes:
- "./data:/data" - "./data:/data"
runtime: habana runtime: habana
@@ -17,7 +17,11 @@ opea_micro_services:
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
HABANA_VISIBLE_DEVICES: all HABANA_VISIBLE_DEVICES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none OMPI_MCA_btl_vader_single_copy_mechanism: none
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
ENABLE_HPU_GRAPH: true
LIMIT_HPU_GRAPH: true
USE_FLASH_ATTENTION: true
FLASH_ATTENTION_RECOMPUTE: true
model-id: ${LLM_MODEL_ID} model-id: ${LLM_MODEL_ID}
llm: llm:
host: ${LLM_SERVICE_HOST_IP} host: ${LLM_SERVICE_HOST_IP}

View File

@@ -3,7 +3,7 @@
services: services:
tgi-service: tgi-service:
image: ghcr.io/huggingface/tgi-gaudi:2.0.1 image: ghcr.io/huggingface/tgi-gaudi:2.0.5
container_name: codetrans-tgi-service container_name: codetrans-tgi-service
ports: ports:
- "8008:80" - "8008:80"
@@ -15,7 +15,11 @@ services:
https_proxy: ${https_proxy} https_proxy: ${https_proxy}
HABANA_VISIBLE_DEVICES: all HABANA_VISIBLE_DEVICES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none OMPI_MCA_btl_vader_single_copy_mechanism: none
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
ENABLE_HPU_GRAPH: true
LIMIT_HPU_GRAPH: true
USE_FLASH_ATTENTION: true
FLASH_ATTENTION_RECOMPUTE: true
runtime: habana runtime: habana
cap_add: cap_add:
- SYS_NICE - SYS_NICE

View File

@@ -405,7 +405,7 @@ spec:
runAsUser: 1000 runAsUser: 1000
seccompProfile: seccompProfile:
type: RuntimeDefault type: RuntimeDefault
image: "ghcr.io/huggingface/tgi-gaudi:2.0.1" image: "ghcr.io/huggingface/tgi-gaudi:2.0.5"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
volumeMounts: volumeMounts:
- mountPath: /data - mountPath: /data

View File

@@ -22,7 +22,7 @@ function build_docker_images() {
service_list="codetrans codetrans-ui llm-tgi nginx" service_list="codetrans codetrans-ui llm-tgi nginx"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1 docker pull ghcr.io/huggingface/tgi-gaudi:2.0.5
docker images && sleep 1s docker images && sleep 1s
} }

View File

@@ -28,7 +28,7 @@ services:
TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
tei-embedding-service: tei-embedding-service:
image: ${REGISTRY:-opea}/tei-gaudi:${TAG:-latest} image: ghcr.io/huggingface/tei-gaudi:latest
container_name: tei-embedding-gaudi-server container_name: tei-embedding-gaudi-server
ports: ports:
- "8090:80" - "8090:80"

View File

@@ -35,9 +35,3 @@ services:
dockerfile: comps/dataprep/redis/langchain/Dockerfile dockerfile: comps/dataprep/redis/langchain/Dockerfile
extends: doc-index-retriever extends: doc-index-retriever
image: ${REGISTRY:-opea}/dataprep-redis:${TAG:-latest} image: ${REGISTRY:-opea}/dataprep-redis:${TAG:-latest}
tei-gaudi:
build:
context: tei-gaudi
dockerfile: Dockerfile-hpu
extends: doc-index-retriever
image: ${REGISTRY:-opea}/tei-gaudi:${TAG:-latest}

View File

@@ -19,14 +19,12 @@ function build_docker_images() {
if [ ! -d "GenAIComps" ] ; then if [ ! -d "GenAIComps" ] ; then
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../ git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
fi fi
if [ ! -d "tei-gaudi" ] ; then
git clone https://github.com/huggingface/tei-gaudi
fi
echo "Build all the images with --no-cache, check docker_image_build.log for details..." echo "Build all the images with --no-cache, check docker_image_build.log for details..."
docker compose -f build.yaml build --no-cache > ${LOG_PATH}/docker_image_build.log docker compose -f build.yaml build --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull redis/redis-stack:7.2.0-v9 docker pull redis/redis-stack:7.2.0-v9
docker pull ghcr.io/huggingface/tei-gaudi:latest
docker images && sleep 1s docker images && sleep 1s
} }

View File

@@ -3,7 +3,7 @@
services: services:
tgi-service: tgi-service:
image: ghcr.io/huggingface/tgi-gaudi:2.0.1 image: ghcr.io/huggingface/tgi-gaudi:2.0.5
container_name: tgi-gaudi-server container_name: tgi-gaudi-server
ports: ports:
- "8008:80" - "8008:80"
@@ -11,8 +11,11 @@ services:
no_proxy: ${no_proxy} no_proxy: ${no_proxy}
http_proxy: ${http_proxy} http_proxy: ${http_proxy}
https_proxy: ${https_proxy} https_proxy: ${https_proxy}
TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT} HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} ENABLE_HPU_GRAPH: true
LIMIT_HPU_GRAPH: true
USE_FLASH_ATTENTION: true
FLASH_ATTENTION_RECOMPUTE: true
volumes: volumes:
- "./data:/data" - "./data:/data"
runtime: habana runtime: habana

View File

@@ -6,7 +6,7 @@ opea_micro_services:
tgi-service: tgi-service:
host: ${TGI_SERVICE_IP} host: ${TGI_SERVICE_IP}
ports: ${TGI_SERVICE_PORT} ports: ${TGI_SERVICE_PORT}
image: ghcr.io/huggingface/tgi-gaudi:2.0.1 image: ghcr.io/huggingface/tgi-gaudi:2.0.5
volumes: volumes:
- "./data:/data" - "./data:/data"
runtime: habana runtime: habana
@@ -17,7 +17,11 @@ opea_micro_services:
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
HABANA_VISIBLE_DEVICES: all HABANA_VISIBLE_DEVICES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none OMPI_MCA_btl_vader_single_copy_mechanism: none
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
ENABLE_HPU_GRAPH: true
LIMIT_HPU_GRAPH: true
USE_FLASH_ATTENTION: true
FLASH_ATTENTION_RECOMPUTE: true
model-id: ${LLM_MODEL_ID} model-id: ${LLM_MODEL_ID}
llm: llm:
host: ${LLM_SERVICE_HOST_IP} host: ${LLM_SERVICE_HOST_IP}

View File

@@ -9,7 +9,7 @@ The DocSum application is defined as a Custom Resource (CR) file that the above
The DocSum pipeline uses prebuilt images. The Xeon version uses the prebuilt image llm-docsum-tgi:latest which internally leverages the The DocSum pipeline uses prebuilt images. The Xeon version uses the prebuilt image llm-docsum-tgi:latest which internally leverages the
the image ghcr.io/huggingface/text-generation-inference:sha-e4201f4-intel-cpu. The service is called tgi-svc. Meanwhile, the Gaudi version launches the the image ghcr.io/huggingface/text-generation-inference:sha-e4201f4-intel-cpu. The service is called tgi-svc. Meanwhile, the Gaudi version launches the
service tgi-gaudi-svc, which uses the image ghcr.io/huggingface/tgi-gaudi:1.2.1. Both TGI model services serve the model specified in the LLM_MODEL_ID variable that is exported by you. In the below example we use Intel/neural-chat-7b-v3-3. service tgi-gaudi-svc, which uses the image `ghcr.io/huggingface/tgi-gaudi:2.0.5`. Both TGI model services serve the model specified in the LLM_MODEL_ID variable that is exported by you. In the below example we use Intel/neural-chat-7b-v3-3.
[NOTE] [NOTE]
Refer to [Docker Xeon README](https://github.com/opea-project/GenAIExamples/blob/main/DocSum/docker_compose/intel/cpu/xeon/README.md) or Refer to [Docker Xeon README](https://github.com/opea-project/GenAIExamples/blob/main/DocSum/docker_compose/intel/cpu/xeon/README.md) or

View File

@@ -405,7 +405,7 @@ spec:
runAsUser: 1000 runAsUser: 1000
seccompProfile: seccompProfile:
type: RuntimeDefault type: RuntimeDefault
image: "ghcr.io/huggingface/tgi-gaudi:2.0.1" image: "ghcr.io/huggingface/tgi-gaudi:2.0.5"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
volumeMounts: volumeMounts:
- mountPath: /data - mountPath: /data

View File

@@ -22,7 +22,7 @@ function build_docker_images() {
service_list="docsum docsum-ui llm-docsum-tgi" service_list="docsum docsum-ui llm-docsum-tgi"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1 docker pull ghcr.io/huggingface/tgi-gaudi:2.0.5
docker images && sleep 1s docker images && sleep 1s
} }

View File

@@ -3,7 +3,7 @@
services: services:
tgi-service: tgi-service:
image: ghcr.io/huggingface/tgi-gaudi:2.0.1 image: ghcr.io/huggingface/tgi-gaudi:2.0.5
container_name: tgi-gaudi-server container_name: tgi-gaudi-server
ports: ports:
- "8008:80" - "8008:80"
@@ -18,6 +18,10 @@ services:
HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
PREFILL_BATCH_BUCKET_SIZE: 1 PREFILL_BATCH_BUCKET_SIZE: 1
BATCH_BUCKET_SIZE: 8 BATCH_BUCKET_SIZE: 8
ENABLE_HPU_GRAPH: true
LIMIT_HPU_GRAPH: true
USE_FLASH_ATTENTION: true
FLASH_ATTENTION_RECOMPUTE: true
runtime: habana runtime: habana
cap_add: cap_add:
- SYS_NICE - SYS_NICE

View File

@@ -6,7 +6,7 @@ opea_micro_services:
tgi-service: tgi-service:
host: ${TGI_SERVICE_IP} host: ${TGI_SERVICE_IP}
ports: ${TGI_SERVICE_PORT} ports: ${TGI_SERVICE_PORT}
image: ghcr.io/huggingface/tgi-gaudi:1.2.1 image: ghcr.io/huggingface/tgi-gaudi:2.0.5
volumes: volumes:
- "./data:/data" - "./data:/data"
runtime: habana runtime: habana
@@ -14,10 +14,13 @@ opea_micro_services:
- SYS_NICE - SYS_NICE
ipc: host ipc: host
environment: environment:
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
HABANA_VISIBLE_DEVICES: all HABANA_VISIBLE_DEVICES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none OMPI_MCA_btl_vader_single_copy_mechanism: none
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
ENABLE_HPU_GRAPH: true
LIMIT_HPU_GRAPH: true
USE_FLASH_ATTENTION: true
FLASH_ATTENTION_RECOMPUTE: true
model-id: ${LLM_MODEL_ID} model-id: ${LLM_MODEL_ID}
llm: llm:
host: ${LLM_SERVICE_HOST_IP} host: ${LLM_SERVICE_HOST_IP}

View File

@@ -39,7 +39,15 @@ spec:
value: "8" value: "8"
- name: PORT - name: PORT
value: "80" value: "80"
image: ghcr.io/huggingface/tgi-gaudi:2.0.1 - name: ENABLE_HPU_GRAPH
value: 'true'
- name: LIMIT_HPU_GRAPH
value: 'true'
- name: USE_FLASH_ATTENTION
value: 'true'
- name: FLASH_ATTENTION_RECOMPUTE
value: 'true'
image: ghcr.io/huggingface/tgi-gaudi:2.0.5
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
securityContext: securityContext:
capabilities: capabilities:
@@ -72,7 +80,7 @@ spec:
volumes: volumes:
- name: model-volume - name: model-volume
hostPath: hostPath:
path: /home/sdp/cesg path: /mnt/models
type: Directory type: Directory
- name: shm - name: shm
emptyDir: emptyDir:

View File

@@ -22,7 +22,7 @@ function build_docker_images() {
service_list="faqgen faqgen-ui llm-faqgen-tgi" service_list="faqgen faqgen-ui llm-faqgen-tgi"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/tgi-gaudi:1.2.1 docker pull ghcr.io/huggingface/tgi-gaudi:2.0.5
docker images && sleep 1s docker images && sleep 1s
} }

View File

@@ -32,18 +32,7 @@ docker build --no-cache -t opea/reranking-tei:latest --build-arg https_proxy=$ht
docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile . docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
``` ```
### 5. Build TEI Gaudi Image ### 5. Build MegaService Docker Image
Since a TEI Gaudi Docker image hasn't been published, we'll need to build it from the [tei-guadi](https://github.com/huggingface/tei-gaudi) repository.
```bash
git clone https://github.com/huggingface/tei-gaudi
cd tei-gaudi/
docker build --no-cache -f Dockerfile-hpu -t opea/tei-gaudi:latest .
cd ../..
```
### 6. Build MegaService Docker Image
To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `searchqna.py` Python script. Build the MegaService Docker image using the command below: To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `searchqna.py` Python script. Build the MegaService Docker image using the command below:
@@ -62,12 +51,11 @@ docker build --no-cache -t opea/searchqna:latest --build-arg https_proxy=$https_
Then run the command `docker images`, you will have Then run the command `docker images`, you will have
1. `opea/tei-gaudi:latest` 1. `opea/embedding-tei:latest`
2. `opea/embedding-tei:latest` 2. `opea/web-retriever-chroma:latest`
3. `opea/web-retriever-chroma:latest` 3. `opea/reranking-tei:latest`
4. `opea/reranking-tei:latest` 4. `opea/llm-tgi:latest`
5. `opea/llm-tgi:latest` 5. `opea/searchqna:latest`
6. `opea/searchqna:latest`
## 🚀 Set the environment variables ## 🚀 Set the environment variables

View File

@@ -3,7 +3,7 @@
services: services:
tei-embedding-service: tei-embedding-service:
image: ${REGISTRY:-opea}/tei-gaudi:${TAG:-latest} image: ghcr.io/huggingface/tei-gaudi:latest
container_name: tei-embedding-gaudi-server container_name: tei-embedding-gaudi-server
ports: ports:
- "3001:80" - "3001:80"
@@ -80,7 +80,7 @@ services:
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
restart: unless-stopped restart: unless-stopped
tgi-service: tgi-service:
image: ghcr.io/huggingface/tgi-gaudi:2.0.1 image: ghcr.io/huggingface/tgi-gaudi:2.0.5
container_name: tgi-gaudi-server container_name: tgi-gaudi-server
ports: ports:
- "3006:80" - "3006:80"
@@ -90,11 +90,15 @@ services:
no_proxy: ${no_proxy} no_proxy: ${no_proxy}
http_proxy: ${http_proxy} http_proxy: ${http_proxy}
https_proxy: ${https_proxy} https_proxy: ${https_proxy}
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
HF_HUB_DISABLE_PROGRESS_BARS: 1 HF_HUB_DISABLE_PROGRESS_BARS: 1
HF_HUB_ENABLE_HF_TRANSFER: 0 HF_HUB_ENABLE_HF_TRANSFER: 0
HABANA_VISIBLE_DEVICES: all HABANA_VISIBLE_DEVICES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none OMPI_MCA_btl_vader_single_copy_mechanism: none
ENABLE_HPU_GRAPH: true
LIMIT_HPU_GRAPH: true
USE_FLASH_ATTENTION: true
FLASH_ATTENTION_RECOMPUTE: true
runtime: habana runtime: habana
cap_add: cap_add:
- SYS_NICE - SYS_NICE

View File

@@ -41,9 +41,3 @@ services:
dockerfile: comps/llms/text-generation/tgi/Dockerfile dockerfile: comps/llms/text-generation/tgi/Dockerfile
extends: searchqna extends: searchqna
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest} image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
tei-gaudi:
build:
context: tei-gaudi
dockerfile: Dockerfile-hpu
extends: searchqna
image: ${REGISTRY:-opea}/tei-gaudi:${TAG:-latest}

View File

@@ -17,14 +17,14 @@ ip_address=$(hostname -I | awk '{print $1}')
function build_docker_images() { function build_docker_images() {
cd $WORKPATH/docker_image_build cd $WORKPATH/docker_image_build
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../ git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
git clone https://github.com/huggingface/tei-gaudi
echo "Build all the images with --no-cache, check docker_image_build.log for details..." echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="searchqna searchqna-ui embedding-tei web-retriever-chroma reranking-tei llm-tgi tei-gaudi" service_list="searchqna searchqna-ui embedding-tei web-retriever-chroma reranking-tei llm-tgi"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1 docker pull ghcr.io/huggingface/tgi-gaudi:2.0.5
docker pull ghcr.io/huggingface/tei-gaudi:latest
docker images && sleep 1s docker images && sleep 1s
} }

View File

@@ -3,18 +3,23 @@
services: services:
tgi-service: tgi-service:
image: ghcr.io/huggingface/tgi-gaudi:2.0.1 image: ghcr.io/huggingface/tgi-gaudi:2.0.5
container_name: tgi-gaudi-server container_name: tgi-gaudi-server
ports: ports:
- "8008:80" - "8008:80"
environment: environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy} http_proxy: ${http_proxy}
https_proxy: ${https_proxy} https_proxy: ${https_proxy}
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
HF_HUB_DISABLE_PROGRESS_BARS: 1 HF_HUB_DISABLE_PROGRESS_BARS: 1
HF_HUB_ENABLE_HF_TRANSFER: 0 HF_HUB_ENABLE_HF_TRANSFER: 0
HABANA_VISIBLE_DEVICES: all HABANA_VISIBLE_DEVICES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none OMPI_MCA_btl_vader_single_copy_mechanism: none
ENABLE_HPU_GRAPH: true
LIMIT_HPU_GRAPH: true
USE_FLASH_ATTENTION: true
FLASH_ATTENTION_RECOMPUTE: true
runtime: habana runtime: habana
cap_add: cap_add:
- SYS_NICE - SYS_NICE
@@ -31,6 +36,7 @@ services:
- "9000:9000" - "9000:9000"
ipc: host ipc: host
environment: environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy} http_proxy: ${http_proxy}
https_proxy: ${https_proxy} https_proxy: ${https_proxy}
TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT} TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
@@ -47,6 +53,7 @@ services:
ports: ports:
- "8888:8888" - "8888:8888"
environment: environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy} - https_proxy=${https_proxy}
- http_proxy=${http_proxy} - http_proxy=${http_proxy}
- MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP} - MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP}
@@ -61,6 +68,7 @@ services:
ports: ports:
- "5173:5173" - "5173:5173"
environment: environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy} - https_proxy=${https_proxy}
- http_proxy=${http_proxy} - http_proxy=${http_proxy}
- BASE_URL=${BACKEND_SERVICE_ENDPOINT} - BASE_URL=${BACKEND_SERVICE_ENDPOINT}

View File

@@ -22,7 +22,7 @@ function build_docker_images() {
service_list="translation translation-ui llm-tgi nginx" service_list="translation translation-ui llm-tgi nginx"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1 docker pull ghcr.io/huggingface/tgi-gaudi:2.0.5
docker images && sleep 1s docker images && sleep 1s
} }
@@ -166,7 +166,7 @@ function main() {
validate_microservices validate_microservices
validate_megaservice validate_megaservice
validate_frontend #validate_frontend
stop_docker stop_docker
echo y | docker system prune echo y | docker system prune

View File

@@ -6,7 +6,7 @@ opea_micro_services:
tgi-service: tgi-service:
host: ${TGI_SERVICE_IP} host: ${TGI_SERVICE_IP}
ports: ${TGI_SERVICE_PORT} ports: ${TGI_SERVICE_PORT}
image: ghcr.io/huggingface/tgi-gaudi:2.0.1 image: ghcr.io/huggingface/tgi-gaudi:2.0.5
volumes: volumes:
- "./data:/data" - "./data:/data"
runtime: habana runtime: habana
@@ -14,10 +14,17 @@ opea_micro_services:
- SYS_NICE - SYS_NICE
ipc: host ipc: host
environment: environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
HABANA_VISIBLE_DEVICES: all HABANA_VISIBLE_DEVICES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none OMPI_MCA_btl_vader_single_copy_mechanism: none
HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
ENABLE_HPU_GRAPH: true
LIMIT_HPU_GRAPH: true
USE_FLASH_ATTENTION: true
FLASH_ATTENTION_RECOMPUTE: true
model-id: ${LLM_MODEL_ID} model-id: ${LLM_MODEL_ID}
llm: llm:
host: ${LLM_SERVICE_HOST_IP} host: ${LLM_SERVICE_HOST_IP}
@@ -25,6 +32,9 @@ opea_micro_services:
image: opea/llm-tgi:latest image: opea/llm-tgi:latest
endpoint: /v1/chat/completions endpoint: /v1/chat/completions
environment: environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT} TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
ui: ui:

View File

@@ -3,7 +3,7 @@
services: services:
llava-tgi-service: llava-tgi-service:
image: ghcr.io/huggingface/tgi-gaudi:2.0.4 image: ghcr.io/huggingface/tgi-gaudi:2.0.5
container_name: tgi-llava-gaudi-server container_name: tgi-llava-gaudi-server
ports: ports:
- "8399:80" - "8399:80"
@@ -17,6 +17,11 @@ services:
HF_HUB_ENABLE_HF_TRANSFER: 0 HF_HUB_ENABLE_HF_TRANSFER: 0
HABANA_VISIBLE_DEVICES: all HABANA_VISIBLE_DEVICES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none OMPI_MCA_btl_vader_single_copy_mechanism: none
HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
ENABLE_HPU_GRAPH: true
LIMIT_HPU_GRAPH: true
USE_FLASH_ATTENTION: true
FLASH_ATTENTION_RECOMPUTE: true
runtime: habana runtime: habana
cap_add: cap_add:
- SYS_NICE - SYS_NICE

View File

@@ -21,7 +21,7 @@ function build_docker_images() {
echo "Build all the images with --no-cache, check docker_image_build.log for details..." echo "Build all the images with --no-cache, check docker_image_build.log for details..."
docker compose -f build.yaml build --no-cache > ${LOG_PATH}/docker_image_build.log docker compose -f build.yaml build --no-cache > ${LOG_PATH}/docker_image_build.log
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.4 docker pull ghcr.io/huggingface/tgi-gaudi:2.0.5
docker images && sleep 1s docker images && sleep 1s
} }