Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
100 lines
3.8 KiB
Python
100 lines
3.8 KiB
Python
# Copyright (C) 2024 Intel Corporation
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
import asyncio
|
|
import os
|
|
|
|
from comps import MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceRoleType, ServiceType
|
|
from comps.cores.mega.utils import handle_message
|
|
from comps.cores.proto.api_protocol import (
|
|
ChatCompletionRequest,
|
|
ChatCompletionResponse,
|
|
ChatCompletionResponseChoice,
|
|
ChatMessage,
|
|
UsageInfo,
|
|
)
|
|
from comps.cores.proto.docarray import LLMParams
|
|
from fastapi import Request
|
|
from fastapi.responses import StreamingResponse
|
|
|
|
MEGA_SERVICE_PORT = int(os.getenv("MEGA_SERVICE_PORT", 7778))
|
|
LLM_SERVICE_HOST_IP = os.getenv("LLM_SERVICE_HOST_IP", "0.0.0.0")
|
|
LLM_SERVICE_PORT = int(os.getenv("LLM_SERVICE_PORT", 9000))
|
|
|
|
|
|
class CodeGenService:
|
|
def __init__(self, host="0.0.0.0", port=8000):
|
|
self.host = host
|
|
self.port = port
|
|
self.megaservice = ServiceOrchestrator()
|
|
self.endpoint = str(MegaServiceEndpoint.CODE_GEN)
|
|
|
|
def add_remote_service(self):
|
|
llm = MicroService(
|
|
name="llm",
|
|
host=LLM_SERVICE_HOST_IP,
|
|
port=LLM_SERVICE_PORT,
|
|
endpoint="/v1/chat/completions",
|
|
use_remote_service=True,
|
|
service_type=ServiceType.LLM,
|
|
)
|
|
self.megaservice.add(llm)
|
|
|
|
async def handle_request(self, request: Request):
|
|
data = await request.json()
|
|
stream_opt = data.get("stream", True)
|
|
chat_request = ChatCompletionRequest.parse_obj(data)
|
|
prompt = handle_message(chat_request.messages)
|
|
parameters = LLMParams(
|
|
max_tokens=chat_request.max_tokens if chat_request.max_tokens else 1024,
|
|
top_k=chat_request.top_k if chat_request.top_k else 10,
|
|
top_p=chat_request.top_p if chat_request.top_p else 0.95,
|
|
temperature=chat_request.temperature if chat_request.temperature else 0.01,
|
|
frequency_penalty=chat_request.frequency_penalty if chat_request.frequency_penalty else 0.0,
|
|
presence_penalty=chat_request.presence_penalty if chat_request.presence_penalty else 0.0,
|
|
repetition_penalty=chat_request.repetition_penalty if chat_request.repetition_penalty else 1.03,
|
|
streaming=stream_opt,
|
|
)
|
|
result_dict, runtime_graph = await self.megaservice.schedule(
|
|
initial_inputs={"query": prompt}, llm_parameters=parameters
|
|
)
|
|
for node, response in result_dict.items():
|
|
# Here it suppose the last microservice in the megaservice is LLM.
|
|
if (
|
|
isinstance(response, StreamingResponse)
|
|
and node == list(self.megaservice.services.keys())[-1]
|
|
and self.megaservice.services[node].service_type == ServiceType.LLM
|
|
):
|
|
return response
|
|
last_node = runtime_graph.all_leaves()[-1]
|
|
response = result_dict[last_node]["text"]
|
|
choices = []
|
|
usage = UsageInfo()
|
|
choices.append(
|
|
ChatCompletionResponseChoice(
|
|
index=0,
|
|
message=ChatMessage(role="assistant", content=response),
|
|
finish_reason="stop",
|
|
)
|
|
)
|
|
return ChatCompletionResponse(model="codegen", choices=choices, usage=usage)
|
|
|
|
def start(self):
|
|
self.service = MicroService(
|
|
self.__class__.__name__,
|
|
service_role=ServiceRoleType.MEGASERVICE,
|
|
host=self.host,
|
|
port=self.port,
|
|
endpoint=self.endpoint,
|
|
input_datatype=ChatCompletionRequest,
|
|
output_datatype=ChatCompletionResponse,
|
|
)
|
|
self.service.add_route(self.endpoint, self.handle_request, methods=["POST"])
|
|
self.service.start()
|
|
|
|
|
|
if __name__ == "__main__":
|
|
chatqna = CodeGenService(port=MEGA_SERVICE_PORT)
|
|
chatqna.add_remote_service()
|
|
chatqna.start()
|