Files
GenAIExamples/CodeGen/kubernetes/codegen_xeon.yaml
Steve Zhang f5f1e323bb Revert the LLM model for kubernetes GMS (#675)
* revert the LLM model to meta-llama/CodeLlama-7b-hf
Signed-off-by: zhlsunshine <huailong.zhang@intel.com>
2024-08-30 13:54:42 +08:00

35 lines
841 B
YAML

# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
apiVersion: gmc.opea.io/v1alpha3
kind: GMConnector
metadata:
labels:
app.kubernetes.io/name: gmconnector
app.kubernetes.io/managed-by: kustomize
gmc/platform: xeon
name: codegen
namespace: codegen
spec:
routerConfig:
name: router
serviceName: router-service
nodes:
root:
routerType: Sequence
steps:
- name: Llm
data: $response
internalService:
serviceName: llm-service
config:
endpoint: /v1/chat/completions
TGI_LLM_ENDPOINT: tgi-service
- name: Tgi
internalService:
serviceName: tgi-service
config:
MODEL_ID: meta-llama/CodeLlama-7b-hf
endpoint: /generate
isDownstreamService: true