remove examples gateway. (#1250)

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
lkk
2024-12-14 13:19:51 +08:00
committed by GitHub
parent 2af1ea0f8e
commit e18369ba0d
13 changed files with 128 additions and 71 deletions

View File

@@ -4,7 +4,7 @@
import asyncio
import os
from comps import Gateway, MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceType
from comps import MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceRoleType, ServiceType
from comps.cores.proto.api_protocol import AudioChatCompletionRequest, ChatCompletionResponse
from comps.cores.proto.docarray import LLMParams
from fastapi import Request
@@ -18,11 +18,12 @@ TTS_SERVICE_HOST_IP = os.getenv("TTS_SERVICE_HOST_IP", "0.0.0.0")
TTS_SERVICE_PORT = int(os.getenv("TTS_SERVICE_PORT", 9088))
class AudioQnAService(Gateway):
class AudioQnAService:
def __init__(self, host="0.0.0.0", port=8000):
self.host = host
self.port = port
self.megaservice = ServiceOrchestrator()
self.endpoint = str(MegaServiceEndpoint.AUDIO_QNA)
def add_remote_service(self):
asr = MicroService(
@@ -78,14 +79,17 @@ class AudioQnAService(Gateway):
return response
def start(self):
super().__init__(
megaservice=self.megaservice,
self.service = MicroService(
self.__class__.__name__,
service_role=ServiceRoleType.MEGASERVICE,
host=self.host,
port=self.port,
endpoint=str(MegaServiceEndpoint.AUDIO_QNA),
endpoint=self.endpoint,
input_datatype=AudioChatCompletionRequest,
output_datatype=ChatCompletionResponse,
)
self.service.add_route(self.endpoint, self.handle_request, methods=["POST"])
self.service.start()
if __name__ == "__main__":

View File

@@ -5,7 +5,7 @@ import asyncio
import base64
import os
from comps import Gateway, MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceType
from comps import MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceRoleType, ServiceType
from comps.cores.proto.api_protocol import AudioChatCompletionRequest, ChatCompletionResponse
from comps.cores.proto.docarray import LLMParams
from fastapi import Request
@@ -54,7 +54,7 @@ def align_outputs(self, data, cur_node, inputs, runtime_graph, llm_parameters_di
return data
class AudioQnAService(Gateway):
class AudioQnAService:
def __init__(self, host="0.0.0.0", port=8000):
self.host = host
self.port = port
@@ -62,6 +62,8 @@ class AudioQnAService(Gateway):
ServiceOrchestrator.align_outputs = align_outputs
self.megaservice = ServiceOrchestrator()
self.endpoint = str(MegaServiceEndpoint.AUDIO_QNA)
def add_remote_service(self):
asr = MicroService(
name="asr",
@@ -118,14 +120,17 @@ class AudioQnAService(Gateway):
return response
def start(self):
super().__init__(
megaservice=self.megaservice,
self.service = MicroService(
self.__class__.__name__,
service_role=ServiceRoleType.MEGASERVICE,
host=self.host,
port=self.port,
endpoint=str(MegaServiceEndpoint.AUDIO_QNA),
endpoint=self.endpoint,
input_datatype=AudioChatCompletionRequest,
output_datatype=ChatCompletionResponse,
)
self.service.add_route(self.endpoint, self.handle_request, methods=["POST"])
self.service.start()
if __name__ == "__main__":

View File

@@ -5,7 +5,7 @@ import asyncio
import os
import sys
from comps import Gateway, MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceType
from comps import MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceRoleType, ServiceType
from comps.cores.proto.api_protocol import AudioChatCompletionRequest, ChatCompletionResponse
from comps.cores.proto.docarray import LLMParams
from fastapi import Request
@@ -29,11 +29,12 @@ def check_env_vars(env_var_list):
print("All environment variables are set.")
class AvatarChatbotService(Gateway):
class AvatarChatbotService:
def __init__(self, host="0.0.0.0", port=8000):
self.host = host
self.port = port
self.megaservice = ServiceOrchestrator()
self.endpoint = str(MegaServiceEndpoint.AVATAR_CHATBOT)
def add_remote_service(self):
asr = MicroService(
@@ -97,14 +98,17 @@ class AvatarChatbotService(Gateway):
return response
def start(self):
super().__init__(
megaservice=self.megaservice,
self.service = MicroService(
self.__class__.__name__,
service_role=ServiceRoleType.MEGASERVICE,
host=self.host,
port=self.port,
endpoint=str(MegaServiceEndpoint.AVATAR_CHATBOT),
endpoint=self.endpoint,
input_datatype=AudioChatCompletionRequest,
output_datatype=ChatCompletionResponse,
)
self.service.add_route(self.endpoint, self.handle_request, methods=["POST"])
self.service.start()
if __name__ == "__main__":

View File

@@ -4,7 +4,8 @@
import asyncio
import os
from comps import Gateway, MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceType
from comps import MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceRoleType, ServiceType
from comps.cores.mega.utils import handle_message
from comps.cores.proto.api_protocol import (
ChatCompletionRequest,
ChatCompletionResponse,
@@ -21,11 +22,12 @@ LLM_SERVICE_HOST_IP = os.getenv("LLM_SERVICE_HOST_IP", "0.0.0.0")
LLM_SERVICE_PORT = int(os.getenv("LLM_SERVICE_PORT", 9000))
class CodeGenService(Gateway):
class CodeGenService:
def __init__(self, host="0.0.0.0", port=8000):
self.host = host
self.port = port
self.megaservice = ServiceOrchestrator()
self.endpoint = str(MegaServiceEndpoint.CODE_GEN)
def add_remote_service(self):
llm = MicroService(
@@ -42,7 +44,7 @@ class CodeGenService(Gateway):
data = await request.json()
stream_opt = data.get("stream", True)
chat_request = ChatCompletionRequest.parse_obj(data)
prompt = self._handle_message(chat_request.messages)
prompt = handle_message(chat_request.messages)
parameters = LLMParams(
max_tokens=chat_request.max_tokens if chat_request.max_tokens else 1024,
top_k=chat_request.top_k if chat_request.top_k else 10,
@@ -78,14 +80,17 @@ class CodeGenService(Gateway):
return ChatCompletionResponse(model="codegen", choices=choices, usage=usage)
def start(self):
super().__init__(
megaservice=self.megaservice,
self.service = MicroService(
self.__class__.__name__,
service_role=ServiceRoleType.MEGASERVICE,
host=self.host,
port=self.port,
endpoint=str(MegaServiceEndpoint.CODE_GEN),
endpoint=self.endpoint,
input_datatype=ChatCompletionRequest,
output_datatype=ChatCompletionResponse,
)
self.service.add_route(self.endpoint, self.handle_request, methods=["POST"])
self.service.start()
if __name__ == "__main__":

View File

@@ -4,7 +4,7 @@
import asyncio
import os
from comps import Gateway, MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceType
from comps import MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceRoleType, ServiceType
from comps.cores.proto.api_protocol import (
ChatCompletionRequest,
ChatCompletionResponse,
@@ -20,11 +20,12 @@ LLM_SERVICE_HOST_IP = os.getenv("LLM_SERVICE_HOST_IP", "0.0.0.0")
LLM_SERVICE_PORT = int(os.getenv("LLM_SERVICE_PORT", 9000))
class CodeTransService(Gateway):
class CodeTransService:
def __init__(self, host="0.0.0.0", port=8000):
self.host = host
self.port = port
self.megaservice = ServiceOrchestrator()
self.endpoint = str(MegaServiceEndpoint.CODE_TRANS)
def add_remote_service(self):
llm = MicroService(
@@ -77,14 +78,17 @@ class CodeTransService(Gateway):
return ChatCompletionResponse(model="codetrans", choices=choices, usage=usage)
def start(self):
super().__init__(
megaservice=self.megaservice,
self.service = MicroService(
self.__class__.__name__,
service_role=ServiceRoleType.MEGASERVICE,
host=self.host,
port=self.port,
endpoint=str(MegaServiceEndpoint.CODE_TRANS),
endpoint=self.endpoint,
input_datatype=ChatCompletionRequest,
output_datatype=ChatCompletionResponse,
)
self.service.add_route(self.endpoint, self.handle_request, methods=["POST"])
self.service.start()
if __name__ == "__main__":

View File

@@ -6,7 +6,7 @@ import asyncio
import os
from typing import Union
from comps import Gateway, MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceType
from comps import MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceRoleType, ServiceType
from comps.cores.proto.api_protocol import ChatCompletionRequest, EmbeddingRequest
from comps.cores.proto.docarray import LLMParamsDoc, RerankedDoc, RerankerParms, RetrieverParms, TextDoc
from fastapi import Request
@@ -21,11 +21,12 @@ RERANK_SERVICE_HOST_IP = os.getenv("RERANK_SERVICE_HOST_IP", "0.0.0.0")
RERANK_SERVICE_PORT = os.getenv("RERANK_SERVICE_PORT", 8000)
class RetrievalToolService(Gateway):
class RetrievalToolService:
def __init__(self, host="0.0.0.0", port=8000):
self.host = host
self.port = port
self.megaservice = ServiceOrchestrator()
self.endpoint = str(MegaServiceEndpoint.RETRIEVALTOOL)
def add_remote_service(self):
embedding = MicroService(
@@ -116,14 +117,17 @@ class RetrievalToolService(Gateway):
return response
def start(self):
super().__init__(
megaservice=self.megaservice,
self.service = MicroService(
self.__class__.__name__,
service_role=ServiceRoleType.MEGASERVICE,
host=self.host,
port=self.port,
endpoint=str(MegaServiceEndpoint.RETRIEVALTOOL),
endpoint=self.endpoint,
input_datatype=Union[TextDoc, EmbeddingRequest, ChatCompletionRequest],
output_datatype=Union[RerankedDoc, LLMParamsDoc],
)
self.service.add_route(self.endpoint, self.handle_request, methods=["POST"])
self.service.start()
def add_remote_service_without_rerank(self):
embedding = MicroService(

View File

@@ -9,7 +9,7 @@ MEGA_SERVICE_PORT = int(os.getenv("MEGA_SERVICE_PORT", 16011))
PIPELINE_SERVICE_HOST_IP = os.getenv("PIPELINE_SERVICE_HOST_IP", "127.0.0.1")
PIPELINE_SERVICE_PORT = int(os.getenv("PIPELINE_SERVICE_PORT", 16010))
from comps import Gateway, MegaServiceEndpoint
from comps import MegaServiceEndpoint, ServiceRoleType
from comps.cores.proto.api_protocol import (
ChatCompletionRequest,
ChatCompletionResponse,
@@ -22,11 +22,12 @@ from fastapi import Request
from fastapi.responses import StreamingResponse
class EdgeCraftRagService(Gateway):
class EdgeCraftRagService:
def __init__(self, host="0.0.0.0", port=16010):
self.host = host
self.port = port
self.megaservice = ServiceOrchestrator()
self.endpoint = str(MegaServiceEndpoint.CHAT_QNA)
def add_remote_service(self):
edgecraftrag = MicroService(
@@ -72,14 +73,17 @@ class EdgeCraftRagService(Gateway):
return ChatCompletionResponse(model="edgecraftrag", choices=choices, usage=usage)
def start(self):
super().__init__(
megaservice=self.megaservice,
self.service = MicroService(
self.__class__.__name__,
service_role=ServiceRoleType.MEGASERVICE,
host=self.host,
port=self.port,
endpoint=str(MegaServiceEndpoint.CHAT_QNA),
endpoint=self.endpoint,
input_datatype=ChatCompletionRequest,
output_datatype=ChatCompletionResponse,
)
self.service.add_route(self.endpoint, self.handle_request, methods=["POST"])
self.service.start()
if __name__ == "__main__":

View File

@@ -6,7 +6,8 @@ import json
import os
import re
from comps import Gateway, MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceType
from comps import MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceRoleType, ServiceType
from comps.cores.mega.utils import handle_message
from comps.cores.proto.api_protocol import (
ChatCompletionRequest,
ChatCompletionResponse,
@@ -127,7 +128,7 @@ def align_generator(self, gen, **kwargs):
yield "data: [DONE]\n\n"
class GraphRAGService(Gateway):
class GraphRAGService:
def __init__(self, host="0.0.0.0", port=8000):
self.host = host
self.port = port
@@ -135,6 +136,7 @@ class GraphRAGService(Gateway):
ServiceOrchestrator.align_outputs = align_outputs
ServiceOrchestrator.align_generator = align_generator
self.megaservice = ServiceOrchestrator()
self.endpoint = str(MegaServiceEndpoint.GRAPH_RAG)
def add_remote_service(self):
retriever = MicroService(
@@ -180,7 +182,7 @@ class GraphRAGService(Gateway):
raise ValueError(f"Unknown request type: {data}")
if chat_request is None:
raise ValueError(f"Unknown request type: {data}")
prompt = self._handle_message(chat_request.messages)
prompt = handle_message(chat_request.messages)
parameters = LLMParams(
max_tokens=chat_request.max_tokens if chat_request.max_tokens else 1024,
top_k=chat_request.top_k if chat_request.top_k else 10,
@@ -223,14 +225,17 @@ class GraphRAGService(Gateway):
return ChatCompletionResponse(model="chatqna", choices=choices, usage=usage)
def start(self):
super().__init__(
megaservice=self.megaservice,
self.service = MicroService(
self.__class__.__name__,
service_role=ServiceRoleType.MEGASERVICE,
host=self.host,
port=self.port,
endpoint=str(MegaServiceEndpoint.GRAPH_RAG),
endpoint=self.endpoint,
input_datatype=ChatCompletionRequest,
output_datatype=ChatCompletionResponse,
)
self.service.add_route(self.endpoint, self.handle_request, methods=["POST"])
self.service.start()
if __name__ == "__main__":

View File

@@ -7,7 +7,7 @@ import os
from io import BytesIO
import requests
from comps import Gateway, MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceType
from comps import MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceRoleType, ServiceType
from comps.cores.proto.api_protocol import (
ChatCompletionRequest,
ChatCompletionResponse,
@@ -29,7 +29,7 @@ LVM_SERVICE_HOST_IP = os.getenv("LVM_SERVICE_HOST_IP", "0.0.0.0")
LVM_SERVICE_PORT = int(os.getenv("LVM_SERVICE_PORT", 9399))
class MultimodalQnAService(Gateway):
class MultimodalQnAService:
asr_port = int(os.getenv("ASR_SERVICE_PORT", 3001))
asr_endpoint = os.getenv("ASR_SERVICE_ENDPOINT", "http://0.0.0.0:{}/v1/audio/transcriptions".format(asr_port))
@@ -38,6 +38,7 @@ class MultimodalQnAService(Gateway):
self.port = port
self.lvm_megaservice = ServiceOrchestrator()
self.megaservice = ServiceOrchestrator()
self.endpoint = str(MegaServiceEndpoint.MULTIMODAL_QNA)
def add_remote_service(self):
mm_embedding = MicroService(
@@ -74,7 +75,6 @@ class MultimodalQnAService(Gateway):
# for lvm megaservice
self.lvm_megaservice.add(lvm)
# this overrides _handle_message method of Gateway
def _handle_message(self, messages):
images = []
audios = []
@@ -303,14 +303,17 @@ class MultimodalQnAService(Gateway):
return ChatCompletionResponse(model="multimodalqna", choices=choices, usage=usage)
def start(self):
super().__init__(
megaservice=self.megaservice,
self.service = MicroService(
self.__class__.__name__,
service_role=ServiceRoleType.MEGASERVICE,
host=self.host,
port=self.port,
endpoint=str(MegaServiceEndpoint.MULTIMODAL_QNA),
endpoint=self.endpoint,
input_datatype=ChatCompletionRequest,
output_datatype=ChatCompletionResponse,
)
self.service.add_route(self.endpoint, self.handle_request, methods=["POST"])
self.service.start()
if __name__ == "__main__":

View File

@@ -3,7 +3,8 @@
import os
from comps import Gateway, MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceType
from comps import MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceRoleType, ServiceType
from comps.cores.mega.utils import handle_message
from comps.cores.proto.api_protocol import (
ChatCompletionRequest,
ChatCompletionResponse,
@@ -26,11 +27,12 @@ LLM_SERVICE_HOST_IP = os.getenv("LLM_SERVICE_HOST_IP", "0.0.0.0")
LLM_SERVICE_PORT = int(os.getenv("LLM_SERVICE_PORT", 9000))
class SearchQnAService(Gateway):
class SearchQnAService:
def __init__(self, host="0.0.0.0", port=8000):
self.host = host
self.port = port
self.megaservice = ServiceOrchestrator()
self.endpoint = str(MegaServiceEndpoint.SEARCH_QNA)
def add_remote_service(self):
embedding = MicroService(
@@ -74,7 +76,7 @@ class SearchQnAService(Gateway):
data = await request.json()
stream_opt = data.get("stream", True)
chat_request = ChatCompletionRequest.parse_obj(data)
prompt = self._handle_message(chat_request.messages)
prompt = handle_message(chat_request.messages)
parameters = LLMParams(
max_tokens=chat_request.max_tokens if chat_request.max_tokens else 1024,
top_k=chat_request.top_k if chat_request.top_k else 10,
@@ -110,14 +112,17 @@ class SearchQnAService(Gateway):
return ChatCompletionResponse(model="searchqna", choices=choices, usage=usage)
def start(self):
super().__init__(
megaservice=self.megaservice,
self.service = MicroService(
self.__class__.__name__,
service_role=ServiceRoleType.MEGASERVICE,
host=self.host,
port=self.port,
endpoint=str(MegaServiceEndpoint.SEARCH_QNA),
endpoint=self.endpoint,
input_datatype=ChatCompletionRequest,
output_datatype=ChatCompletionResponse,
)
self.service.add_route(self.endpoint, self.handle_request, methods=["POST"])
self.service.start()
if __name__ == "__main__":

View File

@@ -15,7 +15,7 @@
import asyncio
import os
from comps import Gateway, MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceType
from comps import MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceRoleType, ServiceType
from comps.cores.proto.api_protocol import (
ChatCompletionRequest,
ChatCompletionResponse,
@@ -31,11 +31,12 @@ LLM_SERVICE_HOST_IP = os.getenv("LLM_SERVICE_HOST_IP", "0.0.0.0")
LLM_SERVICE_PORT = int(os.getenv("LLM_SERVICE_PORT", 9000))
class TranslationService(Gateway):
class TranslationService:
def __init__(self, host="0.0.0.0", port=8000):
self.host = host
self.port = port
self.megaservice = ServiceOrchestrator()
self.endpoint = str(MegaServiceEndpoint.TRANSLATION)
def add_remote_service(self):
llm = MicroService(
@@ -87,14 +88,17 @@ class TranslationService(Gateway):
return ChatCompletionResponse(model="translation", choices=choices, usage=usage)
def start(self):
super().__init__(
megaservice=self.megaservice,
self.service = MicroService(
self.__class__.__name__,
service_role=ServiceRoleType.MEGASERVICE,
host=self.host,
port=self.port,
endpoint=str(MegaServiceEndpoint.TRANSLATION),
endpoint=self.endpoint,
input_datatype=ChatCompletionRequest,
output_datatype=ChatCompletionResponse,
)
self.service.add_route(self.endpoint, self.handle_request, methods=["POST"])
self.service.start()
if __name__ == "__main__":

View File

@@ -3,7 +3,8 @@
import os
from comps import Gateway, MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceType
from comps import MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceRoleType, ServiceType
from comps.cores.mega.utils import handle_message
from comps.cores.proto.api_protocol import (
ChatCompletionRequest,
ChatCompletionResponse,
@@ -26,11 +27,12 @@ LVM_SERVICE_HOST_IP = os.getenv("LVM_SERVICE_HOST_IP", "0.0.0.0")
LVM_SERVICE_PORT = int(os.getenv("LVM_SERVICE_PORT", 9000))
class VideoQnAService(Gateway):
class VideoQnAService:
def __init__(self, host="0.0.0.0", port=8888):
self.host = host
self.port = port
self.megaservice = ServiceOrchestrator()
self.endpoint = str(MegaServiceEndpoint.VIDEO_RAG_QNA)
def add_remote_service(self):
embedding = MicroService(
@@ -74,7 +76,7 @@ class VideoQnAService(Gateway):
data = await request.json()
stream_opt = data.get("stream", False)
chat_request = ChatCompletionRequest.parse_obj(data)
prompt = self._handle_message(chat_request.messages)
prompt = handle_message(chat_request.messages)
parameters = LLMParams(
max_new_tokens=chat_request.max_tokens if chat_request.max_tokens else 1024,
top_k=chat_request.top_k if chat_request.top_k else 10,
@@ -110,14 +112,17 @@ class VideoQnAService(Gateway):
return ChatCompletionResponse(model="videoqna", choices=choices, usage=usage)
def start(self):
super().__init__(
megaservice=self.megaservice,
self.service = MicroService(
self.__class__.__name__,
service_role=ServiceRoleType.MEGASERVICE,
host=self.host,
port=self.port,
endpoint=str(MegaServiceEndpoint.VIDEO_RAG_QNA),
endpoint=self.endpoint,
input_datatype=ChatCompletionRequest,
output_datatype=ChatCompletionResponse,
)
self.service.add_route(self.endpoint, self.handle_request, methods=["POST"])
self.service.start()
if __name__ == "__main__":

View File

@@ -3,7 +3,8 @@
import os
from comps import Gateway, MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceType
from comps import MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceRoleType, ServiceType
from comps.cores.mega.utils import handle_message
from comps.cores.proto.api_protocol import (
ChatCompletionRequest,
ChatCompletionResponse,
@@ -20,11 +21,12 @@ LVM_SERVICE_HOST_IP = os.getenv("LVM_SERVICE_HOST_IP", "0.0.0.0")
LVM_SERVICE_PORT = int(os.getenv("LLM_SERVICE_PORT", 9399))
class VisualQnAService(Gateway):
class VisualQnAService:
def __init__(self, host="0.0.0.0", port=8000):
self.host = host
self.port = port
self.megaservice = ServiceOrchestrator()
self.endpoint = str(MegaServiceEndpoint.VISUAL_QNA)
def add_remote_service(self):
llm = MicroService(
@@ -41,7 +43,7 @@ class VisualQnAService(Gateway):
data = await request.json()
stream_opt = data.get("stream", False)
chat_request = ChatCompletionRequest.parse_obj(data)
prompt, images = self._handle_message(chat_request.messages)
prompt, images = handle_message(chat_request.messages)
parameters = LLMParams(
max_new_tokens=chat_request.max_tokens if chat_request.max_tokens else 1024,
top_k=chat_request.top_k if chat_request.top_k else 10,
@@ -77,14 +79,17 @@ class VisualQnAService(Gateway):
return ChatCompletionResponse(model="visualqna", choices=choices, usage=usage)
def start(self):
super().__init__(
megaservice=self.megaservice,
self.service = MicroService(
self.__class__.__name__,
service_role=ServiceRoleType.MEGASERVICE,
host=self.host,
port=self.port,
endpoint=str(MegaServiceEndpoint.VISUAL_QNA),
endpoint=self.endpoint,
input_datatype=ChatCompletionRequest,
output_datatype=ChatCompletionResponse,
)
self.service.add_route(self.endpoint, self.handle_request, methods=["POST"])
self.service.start()
if __name__ == "__main__":