fix vllm output in chatqna (#1038)

Signed-off-by: Xinyao Wang <xinyao.wang@intel.com>
This commit is contained in:
XinyaoWa
2024-11-01 09:26:57 +08:00
committed by GitHub
parent 9d124161e0
commit c65d7d40fb
3 changed files with 4 additions and 8 deletions

View File

@@ -83,12 +83,6 @@ services:
dockerfile: comps/llms/text-generation/vllm/langchain/dependency/Dockerfile.intel_hpu
extends: chatqna
image: ${REGISTRY:-opea}/llm-vllm-hpu:${TAG:-latest}
llm-vllm-ray:
build:
context: GenAIComps
dockerfile: comps/llms/text-generation/vllm/ray/Dockerfile
extends: chatqna
image: ${REGISTRY:-opea}/llm-vllm-ray:${TAG:-latest}
llm-vllm-ray-hpu:
build:
context: GenAIComps