Change the LLM for CodeGen Example (#206)
Signed-off-by: zehao-intel <zehao.huang@intel.com>
This commit is contained in:
@@ -53,7 +53,7 @@ Since the `docker_compose.yaml` will consume some environment variables, you nee
|
||||
```bash
|
||||
export http_proxy=${your_http_proxy}
|
||||
export https_proxy=${your_http_proxy}
|
||||
export LLM_MODEL_ID="ise-uiuc/Magicoder-S-DS-6.7B"
|
||||
export LLM_MODEL_ID="meta-llama/CodeLlama-7b-hf"
|
||||
export TGI_LLM_ENDPOINT="http://${host_ip}:8028"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token}
|
||||
export MEGA_SERVICE_HOST_IP=${host_ip}
|
||||
|
||||
@@ -61,7 +61,7 @@ Since the `docker_compose.yaml` will consume some environment variables, you nee
|
||||
```bash
|
||||
export http_proxy=${your_http_proxy}
|
||||
export https_proxy=${your_http_proxy}
|
||||
export LLM_MODEL_ID="ise-uiuc/Magicoder-S-DS-6.7B"
|
||||
export LLM_MODEL_ID="meta-llama/CodeLlama-7b-hf"
|
||||
export TGI_LLM_ENDPOINT="http://${host_ip}:8028"
|
||||
export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token}
|
||||
export MEGA_SERVICE_HOST_IP=${host_ip}
|
||||
|
||||
@@ -93,7 +93,7 @@ All the examples are well-validated on Intel platforms. In addition, these examp
|
||||
</tr>
|
||||
<tr>
|
||||
<td><a href="https://www.langchain.com">LangChain</a></td>
|
||||
<td><a href="https://huggingface.co/deepseek-ai/deepseek-coder-33b-instruct">Deepseek-Coder-33B</a></td>
|
||||
<td><a href="https://huggingface.co/meta-llama/CodeLlama-7b-hf">meta-llama/CodeLlama-7b-hf</a></td>
|
||||
<td><a href="https://github.com/huggingface/text-generation-inference">TGI</a></td>
|
||||
<td>Xeon/Gaudi2</td>
|
||||
<td>Copilot</td>
|
||||
|
||||
Reference in New Issue
Block a user