update codetrans default model (#1015)

Signed-off-by: Xinyao Wang <xinyao.wang@intel.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
XinyaoWa
2024-10-28 09:11:54 +08:00
committed by GitHub
parent 89f4c5fb41
commit a2afce1675
13 changed files with 21 additions and 21 deletions

View File

@@ -77,9 +77,9 @@ Currently we support two ways of deploying Code Translation services on docker:
By default, the LLM model is set to a default value as listed below:
| Service | Model |
| ------- | ----------------------------- |
| LLM | HuggingFaceH4/mistral-7b-grok |
| Service | Model |
| ------- | ---------------------------------- |
| LLM | mistralai/Mistral-7B-Instruct-v0.3 |
Change the `LLM_MODEL_ID` in `docker_compose/set_env.sh` for your needs.

View File

@@ -57,9 +57,9 @@ Then run the command `docker images`, you will have the following Docker Images:
By default, the LLM model is set to a default value as listed below:
| Service | Model |
| ------- | ----------------------------- |
| LLM | HuggingFaceH4/mistral-7b-grok |
| Service | Model |
| ------- | ---------------------------------- |
| LLM | mistralai/Mistral-7B-Instruct-v0.3 |
Change the `LLM_MODEL_ID` below for your needs.

View File

@@ -49,9 +49,9 @@ Then run the command `docker images`, you will have the following Docker Images:
By default, the LLM model is set to a default value as listed below:
| Service | Model |
| ------- | ----------------------------- |
| LLM | HuggingFaceH4/mistral-7b-grok |
| Service | Model |
| ------- | ---------------------------------- |
| LLM | mistralai/Mistral-7B-Instruct-v0.3 |
Change the `LLM_MODEL_ID` below for your needs.

View File

@@ -4,7 +4,7 @@
# SPDX-License-Identifier: Apache-2.0
export LLM_MODEL_ID="HuggingFaceH4/mistral-7b-grok"
export LLM_MODEL_ID="mistralai/Mistral-7B-Instruct-v0.3"
export TGI_LLM_ENDPOINT="http://${host_ip}:8008"
export MEGA_SERVICE_HOST_IP=${host_ip}
export LLM_SERVICE_HOST_IP=${host_ip}

View File

@@ -14,7 +14,7 @@ By default, the LLM model is set to a default value as listed below:
|Service |Model |
|---------|-------------------------|
|LLM |HuggingFaceH4/mistral-7b-grok|
|LLM |mistralai/Mistral-7B-Instruct-v0.3|
Change the `MODEL_ID` in `codetrans.yaml` for your needs.

View File

@@ -13,7 +13,7 @@ By default, the LLM model is set to a default value as listed below:
|Service |Model |
|---------|-------------------------|
|LLM |HuggingFaceH4/mistral-7b-grok|
|LLM |mistralai/Mistral-7B-Instruct-v0.3|
Change the `MODEL_ID` in `codetrans_xeon.yaml` for your needs.

View File

@@ -29,6 +29,6 @@ spec:
internalService:
serviceName: tgi-service
config:
MODEL_ID: HuggingFaceH4/mistral-7b-grok
MODEL_ID: mistralai/Mistral-7B-Instruct-v0.3
endpoint: /generate
isDownstreamService: true

View File

@@ -64,7 +64,7 @@ metadata:
app.kubernetes.io/version: "2.1.0"
app.kubernetes.io/managed-by: Helm
data:
MODEL_ID: "HuggingFaceH4/mistral-7b-grok"
MODEL_ID: "mistralai/Mistral-7B-Instruct-v0.3"
PORT: "2080"
HF_TOKEN: "insert-your-huggingface-token-here"
http_proxy: ""

View File

@@ -29,6 +29,6 @@ spec:
internalService:
serviceName: tgi-gaudi-svc
config:
MODEL_ID: HuggingFaceH4/mistral-7b-grok
MODEL_ID: mistralai/Mistral-7B-Instruct-v0.3
endpoint: /generate
isDownstreamService: true

View File

@@ -64,7 +64,7 @@ metadata:
app.kubernetes.io/version: "2.1.0"
app.kubernetes.io/managed-by: Helm
data:
MODEL_ID: "HuggingFaceH4/mistral-7b-grok"
MODEL_ID: "mistralai/Mistral-7B-Instruct-v0.3"
PORT: "2080"
HF_TOKEN: "insert-your-huggingface-token-here"
http_proxy: ""

View File

@@ -31,7 +31,7 @@ function start_services() {
export http_proxy=${http_proxy}
export https_proxy=${http_proxy}
export LLM_MODEL_ID="HuggingFaceH4/mistral-7b-grok"
export LLM_MODEL_ID="mistralai/Mistral-7B-Instruct-v0.3"
export TGI_LLM_ENDPOINT="http://${ip_address}:8008"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export MEGA_SERVICE_HOST_IP=${ip_address}

View File

@@ -30,7 +30,7 @@ function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon/
export http_proxy=${http_proxy}
export https_proxy=${http_proxy}
export LLM_MODEL_ID="HuggingFaceH4/mistral-7b-grok"
export LLM_MODEL_ID="mistralai/Mistral-7B-Instruct-v0.3"
export TGI_LLM_ENDPOINT="http://${ip_address}:8008"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export MEGA_SERVICE_HOST_IP=${ip_address}

View File

@@ -71,9 +71,9 @@ This document introduces the supported examples of GenAIExamples. The supported
[CodeTrans](./CodeTrans/README.md) is an example of chatbot for converting code written in one programming language to another programming language while maintaining the same functionality.
| Framework | LLM | Serving | HW | Description |
| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------- | --------------------------------------------------------------- | ----------- | ---------------- |
| [LangChain](https://www.langchain.com)/[LlamaIndex](https://www.llamaindex.ai) | [HuggingFaceH4/mistral-7b-grok](https://huggingface.co/HuggingFaceH4/mistral-7b-grok) | [TGI](https://github.com/huggingface/text-generation-inference) | Xeon/Gaudi2 | Code Translation |
| Framework | LLM | Serving | HW | Description |
| ------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------- | --------------------------------------------------------------- | ----------- | ---------------- |
| [LangChain](https://www.langchain.com)/[LlamaIndex](https://www.llamaindex.ai) | [mistralai/Mistral-7B-Instruct-v0.3](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3) | [TGI](https://github.com/huggingface/text-generation-inference) | Xeon/Gaudi2 | Code Translation |
### DocSum