Files
GenAIExamples/EdgeCraftRAG/edgecraftrag/server.py
Zhu Yongbo c9088eb824 Add EdgeCraftRag as a GenAIExample (#1072)
Signed-off-by: ZePan110 <ze.pan@intel.com>
Signed-off-by: chensuyue <suyue.chen@intel.com>
Signed-off-by: Zhu, Yongbo <yongbo.zhu@intel.com>
Signed-off-by: Wang, Xigui <xigui.wang@intel.com>
Co-authored-by: ZePan110 <ze.pan@intel.com>
Co-authored-by: chen, suyue <suyue.chen@intel.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: xiguiw <111278656+xiguiw@users.noreply.github.com>
Co-authored-by: lvliang-intel <liang1.lv@intel.com>
2024-11-08 21:07:24 +08:00

28 lines
770 B
Python

# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import uvicorn
from edgecraftrag.api.v1.chatqna import chatqna_app
from edgecraftrag.api.v1.data import data_app
from edgecraftrag.api.v1.model import model_app
from edgecraftrag.api.v1.pipeline import pipeline_app
from fastapi import FastAPI
from llama_index.core.settings import Settings
app = FastAPI()
sub_apps = [data_app, model_app, pipeline_app, chatqna_app]
for sub_app in sub_apps:
for route in sub_app.routes:
app.router.routes.append(route)
if __name__ == "__main__":
Settings.llm = None
host = os.getenv("PIPELINE_SERVICE_HOST_IP", "0.0.0.0")
port = int(os.getenv("PIPELINE_SERVICE_PORT", 16010))
uvicorn.run(app, host=host, port=port)