- Added requirements file

- Modified server.py a bit
This commit is contained in:
2025-09-01 14:04:27 -06:00
parent 4ea7d1516b
commit 7c3baefb0f
5 changed files with 51 additions and 17 deletions

View File

@@ -1,18 +1,37 @@
# 1. Start from a base image with Python installed # Build:
FROM python:3.13.7 # docker build -t myapp .
# 2. Set the working directory in the container # Run:
# docker run -p 8000:8000 myapp
# Use Python 3.13.7 base image
FROM python:3.13.7-slim
# Set working directory
WORKDIR /app WORKDIR /app
# 3. Copy requirements file and install dependencies # Install dependencies for opentelemetry + ngrok
COPY requirements.txt . RUN pip install --no-cache-dir \
RUN pip install --no-cache-dir -r requirements.txt opentelemetry-distro \
opentelemetry-instrumentation \
opentelemetry-exporter-otlp \
pyngrok
# 4. Copy the rest of your apps code # Copy project files
COPY . . COPY . .
# 5. Expose port (optional, for web apps) # Install Python dependencies from requirements.txt
RUN pip install --no-cache-dir -r requirements.txt
# Download model at build time
RUN python downloadModel.py
# Expose FastAPI/Flask/HTTP server port
EXPOSE 8000 EXPOSE 8000
# 6. Default command to run the app # Copy entrypoint script
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"] COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
# Default command: run script
CMD ["/entrypoint.sh"]

View File

@@ -0,0 +1,9 @@
#!/bin/sh
# This file exists to use Ngrok inside Dockerfile
# Start server with OpenTelemetry in background
opentelemetry-instrument python server.py &
# Run ngrok to expose port 8000
ngrok http --url=pegasus-working-bison.ngrok-free.app 8000

View File

@@ -0,0 +1,8 @@
transformers
torch
# With CUDA support: pip install torch torchvision torchaudio
pillow
qdrant_client
fastapi
uvicorn
python-multipart

View File

@@ -9,6 +9,10 @@ import logging
from fastapi import FastAPI, Request from fastapi import FastAPI, Request
import uvicorn import uvicorn
from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.cors import CORSMiddleware
import uvicorn
import multiprocessing
from fastapi import FastAPI, File, UploadFile
from fastapi.responses import JSONResponse
#from pyngrok import ngrok, conf #from pyngrok import ngrok, conf
@@ -125,9 +129,6 @@ async def predict(request: Request):
except Exception as e: except Exception as e:
logging.error(f"Error in /predict: {e}") logging.error(f"Error in /predict: {e}")
return {"status": False, "error": str(e)} return {"status": False, "error": str(e)}
from fastapi import FastAPI, File, UploadFile
from fastapi.responses import JSONResponse
@app.post("/predictfile") @app.post("/predictfile")
async def predict_file(file: UploadFile = File(...)): async def predict_file(file: UploadFile = File(...)):
@@ -148,9 +149,7 @@ async def predict_file(file: UploadFile = File(...)):
return JSONResponse(content={"status": False, "error": str(e)}) return JSONResponse(content={"status": False, "error": str(e)})
#from pyngrok import ngrokS
import uvicorn
import multiprocessing
def run_server(): def run_server():
uvicorn.run("server:app", host="0.0.0.0", port=8000) uvicorn.run("server:app", host="0.0.0.0", port=8000)
@@ -164,6 +163,5 @@ if __name__ == "__main__":
#p = multiprocessing.Process(target=run_server) #p = multiprocessing.Process(target=run_server)
#p.start() #p.start()
#p.join() #p.join()
import uvicorn
uvicorn.run("server:app", host="0.0.0.0", port=8000) uvicorn.run("server:app", host="0.0.0.0", port=8000)