Fix async in chatqna bug (#1589)
Algin async with comps: related PR: opea-project/GenAIComps#1300 Signed-off-by: Xinyao Wang <xinyao.wang@intel.com>
This commit is contained in:
@@ -166,10 +166,10 @@ def align_outputs(self, data, cur_node, inputs, runtime_graph, llm_parameters_di
|
||||
return next_data
|
||||
|
||||
|
||||
def align_generator(self, gen, **kwargs):
|
||||
async def align_generator(self, gen, **kwargs):
|
||||
# OpenAI response format
|
||||
# b'data:{"id":"","object":"text_completion","created":1725530204,"model":"meta-llama/Meta-Llama-3-8B-Instruct","system_fingerprint":"2.0.1-native","choices":[{"index":0,"delta":{"role":"assistant","content":"?"},"logprobs":null,"finish_reason":null}]}\n\n'
|
||||
for line in gen:
|
||||
async for line in gen:
|
||||
line = line.decode("utf-8")
|
||||
start = line.find("{")
|
||||
end = line.rfind("}") + 1
|
||||
|
||||
Reference in New Issue
Block a user