Standardize name for LLM comps (#1402)

Update all the names for classes and files in llm comps to follow the standard format, related GenAIComps PR opea-project/GenAIComps#1162

Signed-off-by: Xinyao Wang <xinyao.wang@intel.com>
This commit is contained in:
XinyaoWa
2025-01-16 23:10:27 +08:00
committed by GitHub
parent 5ad24af2ee
commit 71e3c57366
12 changed files with 12 additions and 12 deletions

View File

@@ -81,7 +81,7 @@ export DOCSUM_HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token}
export DOCSUM_LLM_SERVER_PORT="8008" export DOCSUM_LLM_SERVER_PORT="8008"
export DOCSUM_BACKEND_SERVER_PORT="8888" export DOCSUM_BACKEND_SERVER_PORT="8888"
export DOCSUM_FRONTEND_PORT="5173" export DOCSUM_FRONTEND_PORT="5173"
export DocSum_COMPONENT_NAME="OPEADocSum_TGI" export DocSum_COMPONENT_NAME="OpeaDocSumTgi"
``` ```
Note: Please replace with `host_ip` with your external IP address, do not use localhost. Note: Please replace with `host_ip` with your external IP address, do not use localhost.

View File

@@ -20,4 +20,4 @@ export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:8888/v1/docsum"
export LLM_ENDPOINT_PORT=8008 export LLM_ENDPOINT_PORT=8008
export DOCSUM_PORT=9000 export DOCSUM_PORT=9000
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}" export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
export DocSum_COMPONENT_NAME="OPEADocSum_TGI" export DocSum_COMPONENT_NAME="OpeaDocSumTgi"

View File

@@ -26,7 +26,7 @@ export no_proxy="${no_proxy},${host_ip}"
export LLM_ENDPOINT_PORT=8008 export LLM_ENDPOINT_PORT=8008
export DOCSUM_PORT=9000 export DOCSUM_PORT=9000
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}" export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
export DocSum_COMPONENT_NAME="OPEADocSum_TGI" export DocSum_COMPONENT_NAME="OpeaDocSumTgi"
export LOGFLAG=True export LOGFLAG=True
WORKPATH=$(dirname "$PWD") WORKPATH=$(dirname "$PWD")

View File

@@ -32,7 +32,7 @@ export ASR_SERVICE_HOST_IP=${host_ip}
export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:8888/v1/docsum" export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:8888/v1/docsum"
export DOCSUM_CARD_ID="card1" export DOCSUM_CARD_ID="card1"
export DOCSUM_RENDER_ID="renderD136" export DOCSUM_RENDER_ID="renderD136"
export DocSum_COMPONENT_NAME="OPEADocSum_TGI" export DocSum_COMPONENT_NAME="OpeaDocSumTgi"
export LOGFLAG=True export LOGFLAG=True
function build_docker_images() { function build_docker_images() {

View File

@@ -26,7 +26,7 @@ export no_proxy="${no_proxy},${host_ip}"
export LLM_ENDPOINT_PORT=8008 export LLM_ENDPOINT_PORT=8008
export DOCSUM_PORT=9000 export DOCSUM_PORT=9000
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}" export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
export DocSum_COMPONENT_NAME="OPEADocSum_TGI" export DocSum_COMPONENT_NAME="OpeaDocSumTgi"
export LOGFLAG=True export LOGFLAG=True
WORKPATH=$(dirname "$PWD") WORKPATH=$(dirname "$PWD")

View File

@@ -34,7 +34,7 @@ export FAQGEN_HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token}
export FAQGEN_BACKEND_SERVER_PORT=8888 export FAQGEN_BACKEND_SERVER_PORT=8888
export FAGGEN_UI_PORT=5173 export FAGGEN_UI_PORT=5173
export LLM_ENDPOINT="http://${HOST_IP}:${FAQGEN_TGI_SERVICE_PORT}" export LLM_ENDPOINT="http://${HOST_IP}:${FAQGEN_TGI_SERVICE_PORT}"
export FAQGen_COMPONENT_NAME="OPEAFAQGen_TGI" export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi"
``` ```
Note: Please replace with `host_ip` with your external IP address, do not use localhost. Note: Please replace with `host_ip` with your external IP address, do not use localhost.

View File

@@ -77,7 +77,7 @@ export https_proxy=${your_http_proxy}
export host_ip=${your_host_ip} export host_ip=${your_host_ip}
export LLM_ENDPOINT_PORT=8008 export LLM_ENDPOINT_PORT=8008
export LLM_SERVICE_PORT=9000 export LLM_SERVICE_PORT=9000
export FAQGen_COMPONENT_NAME="OPEAFAQGen_TGI" export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token} export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token}
export MEGA_SERVICE_HOST_IP=${host_ip} export MEGA_SERVICE_HOST_IP=${host_ip}

View File

@@ -157,7 +157,7 @@ export https_proxy=${your_http_proxy}
export host_ip=${your_host_ip} export host_ip=${your_host_ip}
export LLM_ENDPOINT_PORT=8008 export LLM_ENDPOINT_PORT=8008
export LLM_SERVICE_PORT=9000 export LLM_SERVICE_PORT=9000
export FAQGen_COMPONENT_NAME="OPEAFAQGen_TGI" export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi"
export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct"
export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token} export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token}
export MEGA_SERVICE_HOST_IP=${host_ip} export MEGA_SERVICE_HOST_IP=${host_ip}

View File

@@ -31,7 +31,7 @@ function start_services() {
export host_ip=${ip_address} export host_ip=${ip_address}
export LLM_ENDPOINT_PORT=8008 export LLM_ENDPOINT_PORT=8008
export FAQGen_COMPONENT_NAME="OPEAFAQGen_TGI" export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi"
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}" export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}

View File

@@ -28,7 +28,7 @@ export MEGA_SERVICE_HOST_IP=${ip_address}
export LLM_SERVICE_HOST_IP=${ip_address} export LLM_SERVICE_HOST_IP=${ip_address}
export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:8888/v1/faqgen" export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:8888/v1/faqgen"
export PATH="~/miniconda3/bin:$PATH" export PATH="~/miniconda3/bin:$PATH"
export FAQGen_COMPONENT_NAME="OPEAFAQGen_TGI" export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi"
export LOGFLAG=True export LOGFLAG=True
function build_docker_images() { function build_docker_images() {

View File

@@ -31,7 +31,7 @@ function start_services() {
export host_ip=${ip_address} export host_ip=${ip_address}
export LLM_ENDPOINT_PORT=8008 export LLM_ENDPOINT_PORT=8008
export FAQGen_COMPONENT_NAME="OPEAFAQGen_TGI" export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi"
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}" export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}

View File

@@ -80,7 +80,7 @@ function start_services() {
export LLM_SERVER_PORT=9009 export LLM_SERVER_PORT=9009
export PROMPT_COLLECTION_NAME="prompt" export PROMPT_COLLECTION_NAME="prompt"
export host_ip=${ip_address} export host_ip=${ip_address}
export FAQGen_COMPONENT_NAME="OPEAFAQGen_TGI" export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi"
export LOGFLAG=True export LOGFLAG=True
# Start Docker Containers # Start Docker Containers