Update Code and README for GenAIComps Refactor (#1285)
Signed-off-by: lvliang-intel <liang1.lv@intel.com> Signed-off-by: chensuyue <suyue.chen@intel.com> Signed-off-by: Xinyao Wang <xinyao.wang@intel.com> Signed-off-by: letonghan <letong.han@intel.com> Signed-off-by: ZePan110 <ze.pan@intel.com> Signed-off-by: WenjiaoYue <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>
This commit is contained in:
@@ -19,7 +19,7 @@ docker build -t opea/whisper-gaudi:latest --build-arg https_proxy=$https_proxy -
|
||||
|
||||
### 3. Build LLM Image
|
||||
|
||||
Intel Xeon optimized image hosted in huggingface repo will be used for TGI service: ghcr.io/huggingface/tgi-gaudi:2.0.6 (https://github.com/huggingface/tgi-gaudi)
|
||||
Intel Gaudi optimized image hosted in huggingface repo will be used for TGI service: ghcr.io/huggingface/tgi-gaudi:2.0.6 (https://github.com/huggingface/tgi-gaudi)
|
||||
|
||||
### 4. Build TTS Image
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ services:
|
||||
llm-tgi:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/text-generation/tgi/Dockerfile
|
||||
dockerfile: comps/llms/src/text-generation/Dockerfile
|
||||
extends: avatarchatbot
|
||||
image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
|
||||
speecht5-gaudi:
|
||||
|
||||
@@ -72,7 +72,6 @@ function start_services() {
|
||||
|
||||
# Start Docker Containers
|
||||
docker compose up -d > ${LOG_PATH}/start_services_with_compose.log
|
||||
|
||||
n=0
|
||||
until [[ "$n" -ge 200 ]]; do
|
||||
docker logs tgi-gaudi-server > $LOG_PATH/tgi_service_start.log
|
||||
@@ -82,7 +81,6 @@ function start_services() {
|
||||
sleep 5s
|
||||
n=$((n+1))
|
||||
done
|
||||
|
||||
echo "All services are up and running"
|
||||
sleep 5s
|
||||
}
|
||||
|
||||
@@ -82,7 +82,6 @@ function start_services() {
|
||||
n=$((n+1))
|
||||
done
|
||||
echo "All services are up and running"
|
||||
sleep 5s
|
||||
}
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user