Revert hf_token setting (#226)

Signed-off-by: chensuyue <suyue.chen@intel.com>
This commit is contained in:
chen, suyue
2024-05-30 23:12:03 +08:00
committed by GitHub
parent d659c04a68
commit 7eb402e95b
59 changed files with 92 additions and 92 deletions

View File

@@ -24,7 +24,7 @@ opea_micro_services:
- SYS_NICE
ipc: host
environment:
HF_TOKEN: ${HF_TOKEN}
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
HABANA_VISIBLE_DEVICES: all
OMPI_MCA_btl_vader_single_copy_mechanism: none
model-id: ${LLM_MODEL_ID}
@@ -35,7 +35,7 @@ opea_micro_services:
endpoint: /v1/chat/completions
environment:
TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
HF_TOKEN: ${HF_TOKEN}
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
ui:
host: ${UI_SERVICE_HOST_IP}
ports:

View File

@@ -86,10 +86,10 @@ docker run -it -e http_proxy=${http_proxy} -e https_proxy=${https_proxy} --net=h
Make sure TGI-Gaudi service is running and also make sure data is populated into Redis. Launch the backend service:
Please follow this link [huggingface token](https://huggingface.co/docs/hub/security-tokens) to get the access token and export `HF_TOKEN` environment with the token.
Please follow this link [huggingface token](https://huggingface.co/docs/hub/security-tokens) to get the access token and export `HUGGINGFACEHUB_API_TOKEN` environment with the token.
```bash
export HF_TOKEN=<token>
export HUGGINGFACEHUB_API_TOKEN=<token>
nohup python server.py &
```

View File

@@ -63,7 +63,7 @@ function launch_server() {
# Start the Backend Service
docker exec $COPILOT_CONTAINER_NAME \
bash -c "export HF_TOKEN=$HF_TOKEN;nohup python server.py &"
bash -c "export HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN;nohup python server.py &"
sleep 1m
}

View File

@@ -55,7 +55,7 @@ export http_proxy=${your_http_proxy}
export https_proxy=${your_http_proxy}
export LLM_MODEL_ID="meta-llama/CodeLlama-7b-hf"
export TGI_LLM_ENDPOINT="http://${host_ip}:8028"
export HF_TOKEN=${your_hf_api_token}
export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token}
export MEGA_SERVICE_HOST_IP=${host_ip}
export LLM_SERVICE_HOST_IP=${host_ip}
export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:6666/v1/codegen"

View File

@@ -44,7 +44,7 @@ services:
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
HF_TOKEN: ${HF_TOKEN}
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
LANGCHAIN_API_KEY: ${LANGCHAIN_API_KEY}
LANGCHAIN_TRACING_V2: ${LANGCHAIN_TRACING_V2}
LANGCHAIN_PROJECT: "opea-llm-service"

View File

@@ -63,7 +63,7 @@ export http_proxy=${your_http_proxy}
export https_proxy=${your_http_proxy}
export LLM_MODEL_ID="meta-llama/CodeLlama-7b-hf"
export TGI_LLM_ENDPOINT="http://${host_ip}:8028"
export HF_TOKEN=${your_hf_api_token}
export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token}
export MEGA_SERVICE_HOST_IP=${host_ip}
export LLM_SERVICE_HOST_IP=${host_ip}
export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:6666/v1/codegen"

View File

@@ -26,7 +26,7 @@ services:
environment:
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
HF_TOKEN: ${HF_TOKEN}
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
command: --model-id ${LLM_MODEL_ID}
llm:
image: opea/llm-tgi:latest
@@ -40,7 +40,7 @@ services:
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
HF_TOKEN: ${HF_TOKEN}
HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
LANGCHAIN_API_KEY: ${LANGCHAIN_API_KEY}
LANGCHAIN_TRACING_V2: ${LANGCHAIN_TRACING_V2}
LANGCHAIN_PROJECT: "opea-llm-service"

View File

@@ -2,15 +2,15 @@
> [NOTE]
> The following values must be set before you can deploy:
> HF_TOKEN
> HUGGINGFACEHUB_API_TOKEN
> You can also customize the "MODEL_ID" and "model-volume"
## Deploy On Xeon
```
cd GenAIExamples/CodeGen/kubernetes/manifests/xeon
export HF_TOKEN="YourOwnToken"
sed -i "s/insert-your-huggingface-token-here/${HF_TOKEN}/g" codegen.yaml
export HUGGINGFACEHUB_API_TOKEN="YourOwnToken"
sed -i "s/insert-your-huggingface-token-here/${HUGGINGFACEHUB_API_TOKEN}/g" codegen.yaml
kubectl apply -f codegen.yaml
```
@@ -18,8 +18,8 @@ kubectl apply -f codegen.yaml
```
cd GenAIExamples/CodeGen/kubernetes/manifests/gaudi
export HF_TOKEN="YourOwnToken"
sed -i "s/insert-your-huggingface-token-here/${HF_TOKEN}/g" codegen.yaml
export HUGGINGFACEHUB_API_TOKEN="YourOwnToken"
sed -i "s/insert-your-huggingface-token-here/${HUGGINGFACEHUB_API_TOKEN}/g" codegen.yaml
kubectl apply -f codegen.yaml
```

View File

@@ -143,7 +143,7 @@ spec:
env:
- name: TGI_LLM_ENDPOINT
value: "http://codegen-tgi:80"
- name: HF_TOKEN
- name: HUGGINGFACEHUB_API_TOKEN
value: "insert-your-huggingface-token-here"
- name: http_proxy
value:

View File

@@ -141,7 +141,7 @@ spec:
env:
- name: TGI_LLM_ENDPOINT
value: "http://codegen-tgi:80"
- name: HF_TOKEN
- name: HUGGINGFACEHUB_API_TOKEN
value: "insert-your-huggingface-token-here"
- name: http_proxy
value:

View File

@@ -41,7 +41,7 @@ function start_services() {
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
export TGI_LLM_ENDPOINT="http://${ip_address}:8028"
export HF_TOKEN=${HF_TOKEN}
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export MEGA_SERVICE_HOST_IP=${ip_address}
export LLM_SERVICE_HOST_IP=${ip_address}
export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:6666/v1/codegen"

View File

@@ -29,7 +29,7 @@ function start_services() {
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
export TGI_LLM_ENDPOINT="http://${ip_address}:8028"
export HF_TOKEN=${HF_TOKEN}
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export MEGA_SERVICE_HOST_IP=${ip_address}
export LLM_SERVICE_HOST_IP=${ip_address}
export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:6666/v1/codegen"