Damanger commited on
Commit
1444ed5
·
1 Parent(s): bde80ce

Initial commit: FastAPI plates API

Browse files
Files changed (4) hide show
  1. Dockerfile +17 -0
  2. README copy.md +9 -0
  3. app.py +206 -0
  4. requirements.txt +9 -0
Dockerfile ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11-slim
2
+
3
+ ENV PYTHONUNBUFFERED=1 \
4
+ HF_HOME=/data/.huggingface
5
+
6
+ # Dependencias del sistema mínimas
7
+ RUN apt-get update && apt-get install -y --no-install-recommends \
8
+ git wget ca-certificates && \
9
+ rm -rf /var/lib/apt/lists/*
10
+
11
+ WORKDIR /app
12
+ COPY requirements.txt .
13
+ RUN pip install --no-cache-dir -r requirements.txt
14
+
15
+ COPY . .
16
+ EXPOSE 7860
17
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
README copy.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Plates FastAPI
3
+ emoji: 🚗
4
+ colorFrom: indigo
5
+ colorTo: green
6
+ sdk: docker
7
+ app_port: 7860
8
+ pinned: false
9
+ ---
app.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, io, base64, urllib.request, ssl, time, json
2
+ from typing import Optional, List
3
+ import numpy as np, cv2, torch
4
+ from ultralytics import YOLO
5
+ import easyocr
6
+
7
+ from fastapi import FastAPI, HTTPException, File, UploadFile
8
+ from fastapi.middleware.cors import CORSMiddleware
9
+ from pydantic import BaseModel, Field
10
+ from huggingface_hub import hf_hub_download
11
+
12
+ # --- Modelo YOLO desde el Hub (cachea en HF_HOME si lo configuras) ---
13
+ WEIGHTS = hf_hub_download(
14
+ repo_id="keremberke/yolov8n-license-plate",
15
+ filename="yolov8n-license-plate.pt"
16
+ ) # devuelve una ruta local. :contentReference[oaicite:2]{index=2}
17
+ yolo = YOLO(WEIGHTS)
18
+
19
+ # EasyOCR con GPU si está disponible
20
+ reader = easyocr.Reader(['en'], gpu=torch.cuda.is_available())
21
+
22
+ ALLOW = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
23
+
24
+ def preprocess_for_ocr(plate_bgr):
25
+ img = plate_bgr.copy()
26
+ h, w = img.shape[:2]
27
+ if max(h, w) < 160:
28
+ img = cv2.resize(img, (w*2, h*2), interpolation=cv2.INTER_CUBIC)
29
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
30
+ gray = cv2.bilateralFilter(gray, 7, 50, 50)
31
+ th = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
32
+ cv2.THRESH_BINARY, 31, 5)
33
+ return th
34
+
35
+ def ocr_plate(plate_bgr):
36
+ img = preprocess_for_ocr(plate_bgr)
37
+ out1 = reader.readtext(img, detail=1, allowlist=ALLOW)
38
+ out2 = reader.readtext(plate_bgr, detail=1, allowlist=ALLOW)
39
+ cands = []
40
+ for out in (out1, out2):
41
+ for _, text, score in out:
42
+ t = "".join([c for c in text.upper() if c in ALLOW])
43
+ if len(t) >= 4:
44
+ cands.append((t, float(score)))
45
+ if not cands:
46
+ return "", 0.0
47
+ cands.sort(key=lambda x: (x[1], len(x[0])), reverse=True)
48
+ return cands[0]
49
+
50
+ def draw_box_text(img, xyxy, text, color=(0, 255, 0)):
51
+ x1, y1, x2, y2 = [int(v) for v in xyxy]
52
+ cv2.rectangle(img, (x1,y1), (x2,y2), color, 2)
53
+ if text:
54
+ tsize = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2)[0]
55
+ cv2.rectangle(img, (x1, y1 - tsize[1] - 6), (x1 + tsize[0] + 4, y1), color, -1)
56
+ cv2.putText(img, text, (x1 + 2, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,0,0), 2, cv2.LINE_AA)
57
+
58
+ def detect_plates_bgr(bgr, conf=0.25, iou=0.45):
59
+ res = yolo.predict(bgr, conf=conf, iou=iou, verbose=False)[0]
60
+ boxes = res.boxes.xyxy.cpu().numpy() if res.boxes is not None else np.empty((0,4))
61
+ confs = res.boxes.conf.cpu().numpy() if res.boxes is not None else np.empty((0,))
62
+ return boxes, confs
63
+
64
+ def run_on_image_bgr(bgr, conf=0.25, iou=0.45, with_ocr=True, annotate=True):
65
+ h, w = bgr.shape[:2]
66
+ vis = bgr.copy()
67
+ t0 = time.time()
68
+ boxes, confs = detect_plates_bgr(bgr, conf, iou)
69
+ detections = []
70
+ for xyxy, c in zip(boxes, confs):
71
+ x1, y1, x2, y2 = [int(v) for v in xyxy]
72
+ crop = bgr[max(0,y1):max(0,y2), max(0,x1):max(0,x2)]
73
+ txt, s = ("", 0.0)
74
+ if with_ocr and crop.size:
75
+ txt, s = ocr_plate(crop)
76
+ if annotate:
77
+ label = f"{txt or 'plate'} {c:.2f}"
78
+ draw_box_text(vis, xyxy, label)
79
+ detections.append({
80
+ "box_xyxy": [x1, y1, x2, y2],
81
+ "det_conf": float(c),
82
+ "ocr_text": txt,
83
+ "ocr_conf": float(s),
84
+ })
85
+ dt_ms = int((time.time() - t0) * 1000)
86
+ return vis, detections, (w, h), dt_ms
87
+
88
+ def bgr_to_jpeg_base64(bgr):
89
+ ok, buf = cv2.imencode(".jpg", bgr, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
90
+ if not ok:
91
+ return None
92
+ return base64.b64encode(buf.tobytes()).decode("ascii")
93
+
94
+ def load_image_from_url(url: str):
95
+ ssl._create_default_https_context = ssl._create_unverified_context
96
+ data = urllib.request.urlopen(url).read()
97
+ arr = np.frombuffer(data, np.uint8)
98
+ bgr = cv2.imdecode(arr, cv2.IMREAD_COLOR)
99
+ if bgr is None:
100
+ raise ValueError("No pude decodificar la imagen desde URL.")
101
+ return bgr
102
+
103
+ def load_image_from_b64(b64_or_data_url: str):
104
+ s = b64_or_data_url
105
+ if s.startswith("data:"):
106
+ s = s.split(",", 1)[1]
107
+ raw = base64.b64decode(s)
108
+ arr = np.frombuffer(raw, np.uint8)
109
+ bgr = cv2.imdecode(arr, cv2.IMREAD_COLOR)
110
+ if bgr is None:
111
+ raise ValueError("No pude decodificar la imagen desde base64.")
112
+ return bgr
113
+
114
+ # --- FastAPI ---
115
+ app = FastAPI(title="Plates API (HF Space)")
116
+
117
+ ALLOWED = [
118
+ "http://localhost:5173", "http://127.0.0.1:5173",
119
+ "https://www.omar-cruz.com", "https://omar-cruz.com",
120
+ ]
121
+ app.add_middleware(
122
+ CORSMiddleware,
123
+ allow_origins=ALLOWED,
124
+ allow_origin_regex=r"^https?://([a-z0-9-]+\.)*hf\.space$",
125
+ allow_credentials=False,
126
+ allow_methods=["*"],
127
+ allow_headers=["*"],
128
+ )
129
+
130
+ class Detection(BaseModel):
131
+ box_xyxy: List[int]
132
+ det_conf: float
133
+ ocr_text: str = ""
134
+ ocr_conf: float = 0.0
135
+
136
+ class DetectResponse(BaseModel):
137
+ detections: List[Detection]
138
+ count: int
139
+ width: int
140
+ height: int
141
+ time_ms: int
142
+ annotated_image_b64: Optional[str] = None
143
+
144
+ class DetectRequest(BaseModel):
145
+ image_url: Optional[str] = None
146
+ image_b64: Optional[str] = None
147
+ conf: float = Field(0.25, ge=0.05, le=0.95)
148
+ iou: float = Field(0.45, ge=0.1, le=0.9)
149
+ ocr: bool = True
150
+ return_image: bool = False
151
+
152
+ @app.get("/")
153
+ def health():
154
+ return {
155
+ "status": "ok",
156
+ "service": "plates-api",
157
+ "model": os.path.basename(WEIGHTS),
158
+ "ocr_gpu": torch.cuda.is_available(),
159
+ "allow_origins": ALLOWED,
160
+ }
161
+
162
+ @app.post("/detect", response_model=DetectResponse)
163
+ def detect(req: DetectRequest):
164
+ try:
165
+ if not req.image_url and not req.image_b64:
166
+ raise HTTPException(400, "Proporciona 'image_url' o 'image_b64'.")
167
+ bgr = load_image_from_url(req.image_url) if req.image_url else load_image_from_b64(req.image_b64)
168
+ vis, dets, (w, h), dt_ms = run_on_image_bgr(
169
+ bgr, conf=req.conf, iou=req.iou, with_ocr=req.ocr, annotate=req.return_image
170
+ )
171
+ b64 = bgr_to_jpeg_base64(vis) if req.return_image else None
172
+ return DetectResponse(
173
+ detections=dets, count=len(dets), width=w, height=h, time_ms=dt_ms,
174
+ annotated_image_b64=b64
175
+ )
176
+ except HTTPException:
177
+ raise
178
+ except Exception as e:
179
+ raise HTTPException(500, f"Error procesando la imagen: {e}")
180
+
181
+ @app.post("/detect_upload", response_model=DetectResponse)
182
+ async def detect_upload(
183
+ image: UploadFile = File(...),
184
+ conf: float = 0.25,
185
+ iou: float = 0.45,
186
+ ocr: bool = True,
187
+ return_image: bool = False,
188
+ ):
189
+ try:
190
+ data = await image.read()
191
+ arr = np.frombuffer(data, np.uint8)
192
+ bgr = cv2.imdecode(arr, cv2.IMREAD_COLOR)
193
+ if bgr is None:
194
+ raise HTTPException(400, "No pude decodificar el archivo subido.")
195
+ vis, dets, (w, h), dt_ms = run_on_image_bgr(
196
+ bgr, conf=conf, iou=iou, with_ocr=ocr, annotate=return_image
197
+ )
198
+ b64 = bgr_to_jpeg_base64(vis) if return_image else None
199
+ return DetectResponse(
200
+ detections=dets, count=len(dets), width=w, height=h, time_ms=dt_ms,
201
+ annotated_image_b64=b64
202
+ )
203
+ except HTTPException:
204
+ raise
205
+ except Exception as e:
206
+ raise HTTPException(500, f"Error procesando la imagen: {e}")
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn[standard]
3
+ pydantic==2.*
4
+ python-multipart
5
+ ultralytics
6
+ easyocr
7
+ opencv-python-headless==4.10.0.84
8
+ huggingface_hub
9
+ torch # HF instalará una build CPU/GPU según el hardware del Space