- Added requirements file
- Modified server.py a bit
This commit is contained in:
@@ -1,18 +1,37 @@
|
||||
# 1. Start from a base image with Python installed
|
||||
FROM python:3.13.7
|
||||
# Build:
|
||||
# docker build -t myapp .
|
||||
|
||||
# 2. Set the working directory in the container
|
||||
# Run:
|
||||
# docker run -p 8000:8000 myapp
|
||||
|
||||
# Use Python 3.13.7 base image
|
||||
FROM python:3.13.7-slim
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# 3. Copy requirements file and install dependencies
|
||||
COPY requirements.txt .
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
# Install dependencies for opentelemetry + ngrok
|
||||
RUN pip install --no-cache-dir \
|
||||
opentelemetry-distro \
|
||||
opentelemetry-instrumentation \
|
||||
opentelemetry-exporter-otlp \
|
||||
pyngrok
|
||||
|
||||
# 4. Copy the rest of your app’s code
|
||||
# Copy project files
|
||||
COPY . .
|
||||
|
||||
# 5. Expose port (optional, for web apps)
|
||||
# Install Python dependencies from requirements.txt
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Download model at build time
|
||||
RUN python downloadModel.py
|
||||
|
||||
# Expose FastAPI/Flask/HTTP server port
|
||||
EXPOSE 8000
|
||||
|
||||
# 6. Default command to run the app
|
||||
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
|
||||
# Copy entrypoint script
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
# Default command: run script
|
||||
CMD ["/entrypoint.sh"]
|
||||
|
||||
Binary file not shown.
9
python_server/entrypoint.sh
Normal file
9
python_server/entrypoint.sh
Normal file
@@ -0,0 +1,9 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This file exists to use Ngrok inside Dockerfile
|
||||
|
||||
# Start server with OpenTelemetry in background
|
||||
opentelemetry-instrument python server.py &
|
||||
|
||||
# Run ngrok to expose port 8000
|
||||
ngrok http --url=pegasus-working-bison.ngrok-free.app 8000
|
||||
8
python_server/requirements.txt
Normal file
8
python_server/requirements.txt
Normal file
@@ -0,0 +1,8 @@
|
||||
transformers
|
||||
torch
|
||||
# With CUDA support: pip install torch torchvision torchaudio
|
||||
pillow
|
||||
qdrant_client
|
||||
fastapi
|
||||
uvicorn
|
||||
python-multipart
|
||||
@@ -9,6 +9,10 @@ import logging
|
||||
from fastapi import FastAPI, Request
|
||||
import uvicorn
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
import uvicorn
|
||||
import multiprocessing
|
||||
from fastapi import FastAPI, File, UploadFile
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
#from pyngrok import ngrok, conf
|
||||
|
||||
@@ -125,9 +129,6 @@ async def predict(request: Request):
|
||||
except Exception as e:
|
||||
logging.error(f"Error in /predict: {e}")
|
||||
return {"status": False, "error": str(e)}
|
||||
|
||||
from fastapi import FastAPI, File, UploadFile
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
@app.post("/predictfile")
|
||||
async def predict_file(file: UploadFile = File(...)):
|
||||
@@ -148,9 +149,7 @@ async def predict_file(file: UploadFile = File(...)):
|
||||
return JSONResponse(content={"status": False, "error": str(e)})
|
||||
|
||||
|
||||
#from pyngrok import ngrokS
|
||||
import uvicorn
|
||||
import multiprocessing
|
||||
|
||||
|
||||
def run_server():
|
||||
uvicorn.run("server:app", host="0.0.0.0", port=8000)
|
||||
@@ -164,6 +163,5 @@ if __name__ == "__main__":
|
||||
#p = multiprocessing.Process(target=run_server)
|
||||
#p.start()
|
||||
#p.join()
|
||||
import uvicorn
|
||||
uvicorn.run("server:app", host="0.0.0.0", port=8000)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user