Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,40 +1,78 @@
|
|
| 1 |
from fastapi import FastAPI, HTTPException, Header
|
| 2 |
-
|
| 3 |
-
import
|
|
|
|
|
|
|
|
|
|
| 4 |
from huggingface_hub import hf_hub_download
|
| 5 |
from prometheus_client import Counter, Histogram, Gauge, generate_latest
|
| 6 |
|
| 7 |
-
|
| 8 |
|
| 9 |
-
|
|
|
|
|
|
|
|
|
|
| 10 |
HF_MODEL_REPO = os.getenv("HF_MODEL_REPO")
|
| 11 |
PROM_PUSHGATEWAY = os.getenv("PROM_PUSHGATEWAY")
|
| 12 |
|
|
|
|
|
|
|
|
|
|
| 13 |
# Prometheus metrics
|
| 14 |
REQS = Counter("pred_requests_total", "Total prediction requests")
|
| 15 |
LAT = Histogram("pred_request_latency_seconds", "Request latency")
|
| 16 |
LATEST = Gauge("latest_prediction", "Last predicted value")
|
| 17 |
|
| 18 |
-
#
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
except Exception as ex:
|
| 31 |
-
print("Model load error:", ex)
|
| 32 |
loaded = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
|
| 35 |
@app.get("/")
|
| 36 |
def health():
|
| 37 |
-
return {
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
|
| 39 |
|
| 40 |
@app.post("/predict")
|
|
@@ -45,26 +83,56 @@ def predict(payload: dict, x_api_key: str = Header(None)):
|
|
| 45 |
if not loaded:
|
| 46 |
raise HTTPException(status_code=503, detail="Model not loaded")
|
| 47 |
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
|
| 67 |
|
| 68 |
@app.get("/metrics")
|
| 69 |
def metrics():
|
| 70 |
-
return generate_latest()
|
|
|
|
| 1 |
from fastapi import FastAPI, HTTPException, Header
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import os
|
| 4 |
+
import joblib
|
| 5 |
+
import time
|
| 6 |
+
import requests
|
| 7 |
from huggingface_hub import hf_hub_download
|
| 8 |
from prometheus_client import Counter, Histogram, Gauge, generate_latest
|
| 9 |
|
| 10 |
+
print("🚀 Starting FastAPI application...")
|
| 11 |
|
| 12 |
+
app = FastAPI(title="Loan Approval API", version="1.0")
|
| 13 |
+
|
| 14 |
+
# Environment variables
|
| 15 |
+
API_KEY = os.getenv("API_KEY", "test-key-123")
|
| 16 |
HF_MODEL_REPO = os.getenv("HF_MODEL_REPO")
|
| 17 |
PROM_PUSHGATEWAY = os.getenv("PROM_PUSHGATEWAY")
|
| 18 |
|
| 19 |
+
print(f"API_KEY: {'Set' if API_KEY else 'Not set'}")
|
| 20 |
+
print(f"HF_MODEL_REPO: {HF_MODEL_REPO}")
|
| 21 |
+
|
| 22 |
# Prometheus metrics
|
| 23 |
REQS = Counter("pred_requests_total", "Total prediction requests")
|
| 24 |
LAT = Histogram("pred_request_latency_seconds", "Request latency")
|
| 25 |
LATEST = Gauge("latest_prediction", "Last predicted value")
|
| 26 |
|
| 27 |
+
# Global variables
|
| 28 |
+
model = None
|
| 29 |
+
encoders = {}
|
| 30 |
+
scaler = None
|
| 31 |
+
feature_columns = []
|
| 32 |
+
categorical_columns = []
|
| 33 |
+
boolean_columns = []
|
| 34 |
+
loaded = False
|
| 35 |
+
|
| 36 |
+
# Load model on startup
|
| 37 |
+
if not HF_MODEL_REPO:
|
| 38 |
+
print("⚠️ WARNING: HF_MODEL_REPO not set. Using mock mode.")
|
|
|
|
|
|
|
| 39 |
loaded = False
|
| 40 |
+
else:
|
| 41 |
+
try:
|
| 42 |
+
print(f" Downloading model from {HF_MODEL_REPO}...")
|
| 43 |
+
|
| 44 |
+
m = hf_hub_download(repo_id=HF_MODEL_REPO, filename="best_model.joblib")
|
| 45 |
+
e = hf_hub_download(repo_id=HF_MODEL_REPO, filename="models/encoders.joblib")
|
| 46 |
+
s = hf_hub_download(repo_id=HF_MODEL_REPO, filename="models/scaler.joblib")
|
| 47 |
+
f = hf_hub_download(repo_id=HF_MODEL_REPO, filename="models/feature_columns.joblib")
|
| 48 |
+
c = hf_hub_download(repo_id=HF_MODEL_REPO, filename="models/categorical_columns.joblib")
|
| 49 |
+
b = hf_hub_download(repo_id=HF_MODEL_REPO, filename="models/boolean_columns.joblib")
|
| 50 |
+
|
| 51 |
+
print(" Loading artifacts...")
|
| 52 |
+
model = joblib.load(m)
|
| 53 |
+
encoders = joblib.load(e)
|
| 54 |
+
scaler = joblib.load(s)
|
| 55 |
+
feature_columns = joblib.load(f)
|
| 56 |
+
categorical_columns = joblib.load(c)
|
| 57 |
+
boolean_columns = joblib.load(b)
|
| 58 |
+
loaded = True
|
| 59 |
+
|
| 60 |
+
print(" Model loaded successfully!")
|
| 61 |
+
print(f" Features: {len(feature_columns)}")
|
| 62 |
+
except Exception as ex:
|
| 63 |
+
print(f" Model load error: {ex}")
|
| 64 |
+
loaded = False
|
| 65 |
+
|
| 66 |
+
print(" FastAPI app initialized")
|
| 67 |
|
| 68 |
|
| 69 |
@app.get("/")
|
| 70 |
def health():
|
| 71 |
+
return {
|
| 72 |
+
"status": "ok",
|
| 73 |
+
"model_loaded": loaded,
|
| 74 |
+
"features": feature_columns if loaded else []
|
| 75 |
+
}
|
| 76 |
|
| 77 |
|
| 78 |
@app.post("/predict")
|
|
|
|
| 83 |
if not loaded:
|
| 84 |
raise HTTPException(status_code=503, detail="Model not loaded")
|
| 85 |
|
| 86 |
+
try:
|
| 87 |
+
df = pd.DataFrame([payload])
|
| 88 |
+
|
| 89 |
+
for col in feature_columns:
|
| 90 |
+
if col not in df.columns:
|
| 91 |
+
df[col] = 0
|
| 92 |
+
|
| 93 |
+
for col in boolean_columns:
|
| 94 |
+
if col in df.columns:
|
| 95 |
+
if df[col].dtype == bool:
|
| 96 |
+
df[col] = df[col].astype(int)
|
| 97 |
+
elif df[col].dtype == 'object':
|
| 98 |
+
df[col] = df[col].map({
|
| 99 |
+
'True': 1, 'true': 1, True: 1, 1: 1,
|
| 100 |
+
'False': 0, 'false': 0, False: 0, 0: 0
|
| 101 |
+
}).fillna(0).astype(int)
|
| 102 |
+
|
| 103 |
+
for col in categorical_columns:
|
| 104 |
+
if col in df.columns and col in encoders:
|
| 105 |
+
try:
|
| 106 |
+
df[col] = encoders[col].transform(df[col])
|
| 107 |
+
except:
|
| 108 |
+
df[col] = 0
|
| 109 |
+
|
| 110 |
+
df = df[feature_columns]
|
| 111 |
+
df_scaled = scaler.transform(df)
|
| 112 |
+
|
| 113 |
+
start = time.time()
|
| 114 |
+
pred = model.predict(df_scaled)[0]
|
| 115 |
+
latency = time.time() - start
|
| 116 |
+
|
| 117 |
+
LAT.observe(latency)
|
| 118 |
+
REQS.inc()
|
| 119 |
+
LATEST.set(pred)
|
| 120 |
+
|
| 121 |
+
if PROM_PUSHGATEWAY:
|
| 122 |
+
try:
|
| 123 |
+
requests.post(f"{PROM_PUSHGATEWAY}/metrics/job/loan_model", data=generate_latest(), timeout=2)
|
| 124 |
+
except:
|
| 125 |
+
pass
|
| 126 |
+
|
| 127 |
+
return {
|
| 128 |
+
"prediction": int(pred),
|
| 129 |
+
"prediction_label": "Approved" if pred == 1 else "Rejected",
|
| 130 |
+
"latency_seconds": round(latency, 4)
|
| 131 |
+
}
|
| 132 |
+
except Exception as e:
|
| 133 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 134 |
|
| 135 |
|
| 136 |
@app.get("/metrics")
|
| 137 |
def metrics():
|
| 138 |
+
return generate_latest()
|