Refactor example code (#183)

Signed-off-by: lvliang-intel <liang1.lv@intel.com>
Signed-off-by: Yue, Wenjiao <wenjiao.yue@intel.com>
Signed-off-by: chensuyue <suyue.chen@intel.com>
This commit is contained in:
lvliang-intel
2024-05-24 13:32:14 +08:00
committed by GitHub
parent b91a9d10af
commit a6b3caf128
73 changed files with 461 additions and 555 deletions

57
DocSum/docsum.py Normal file
View File

@@ -0,0 +1,57 @@
# Copyright (c) 2024 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
from comps import DocSumGateway, MicroService, ServiceOrchestrator, ServiceType
MEGA_SERVICE_HOST_IP = os.getenv("MEGA_SERVICE_HOST_IP", "0.0.0.0")
MEGA_SERVICE_PORT = os.getenv("MEGA_SERVICE_PORT", 8888)
LLM_SERVICE_HOST_IP = os.getenv("LLM_SERVICE_HOST_IP", "0.0.0.0")
LLM_SERVICE_PORT = os.getenv("LLM_SERVICE_PORT", 9000)
class DocSumService:
def __init__(self, host="0.0.0.0", port=8000):
self.host = host
self.port = port
self.megaservice = ServiceOrchestrator()
def add_remote_service(self):
llm = MicroService(
name="llm",
host=LLM_SERVICE_HOST_IP,
port=LLM_SERVICE_PORT,
endpoint="/v1/chat/docsum",
use_remote_service=True,
service_type=ServiceType.LLM,
)
self.megaservice.add(llm)
self.gateway = DocSumGateway(megaservice=self.megaservice, host="0.0.0.0", port=self.port)
async def schedule(self):
await self.megaservice.schedule(
initial_inputs={
"text": "Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."
}
)
result_dict = self.megaservice.result_dict
print(result_dict)
if __name__ == "__main__":
docsum = DocSumService(host=MEGA_SERVICE_HOST_IP, port=MEGA_SERVICE_PORT)
docsum.add_remote_service()
asyncio.run(docsum.schedule())