fix vllm output in chatqna (#1038)
Signed-off-by: Xinyao Wang <xinyao.wang@intel.com>
This commit is contained in:
@@ -83,12 +83,6 @@ services:
|
||||
dockerfile: comps/llms/text-generation/vllm/langchain/dependency/Dockerfile.intel_hpu
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/llm-vllm-hpu:${TAG:-latest}
|
||||
llm-vllm-ray:
|
||||
build:
|
||||
context: GenAIComps
|
||||
dockerfile: comps/llms/text-generation/vllm/ray/Dockerfile
|
||||
extends: chatqna
|
||||
image: ${REGISTRY:-opea}/llm-vllm-ray:${TAG:-latest}
|
||||
llm-vllm-ray-hpu:
|
||||
build:
|
||||
context: GenAIComps
|
||||
|
||||
Reference in New Issue
Block a user