NovaIZ commited on
Commit
aa2b8a5
·
1 Parent(s): 61db0cf
Files changed (1) hide show
  1. app.py +72 -0
app.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from PIL import Image
3
+ import cv2
4
+ from ultralytics import YOLO
5
+
6
+ # Load model (ensure the path to the weights file is correct)
7
+ model_path = "Car-Logos/train15/weights/best.pt"
8
+ detection_model = YOLO(model_path)
9
+
10
+ def predict_image(pil_image):
11
+ """Process an image and return the annotated image."""
12
+ # Convert PIL image to NumPy array
13
+ frame = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
14
+
15
+ # Run YOLO model
16
+ results = detection_model.predict(frame, conf=0.5, iou=0.6)
17
+
18
+ # Annotate the image
19
+ annotated_frame = results[0].plot()
20
+
21
+
22
+ out_pil_image = Image.fromarray(annotated_frame[..., ::-1])
23
+ return out_pil_image
24
+
25
+ def predict_video(video_path):
26
+ """Process a video and return the path to the annotated output video."""
27
+ cap = cv2.VideoCapture(video_path)
28
+ output_frames = []
29
+
30
+
31
+ while cap.isOpened():
32
+ ret, frame = cap.read()
33
+ if not ret:
34
+ break
35
+ results = detection_model.predict(frame, conf=0.5, iou=0.6)
36
+ annotated_frame = results[0].plot()
37
+ output_frames.append(annotated_frame)
38
+
39
+ cap.release()
40
+
41
+
42
+ if output_frames:
43
+ height, width, _ = output_frames[0].shape
44
+ out_path = "output_video.mp4"
45
+ out = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*"mp4v"), 30, (width, height))
46
+ for frame in output_frames:
47
+ out.write(frame)
48
+ out.release()
49
+ return out_path
50
+ else:
51
+ return "No frames processed."
52
+
53
+ def create_gradio_interface():
54
+ with gr.Blocks() as demo:
55
+ with gr.Tab("Upload and Honda or Toyota Logo image"):
56
+ gr.Markdown("### Upload Honda or Toyota for Object Detection")
57
+ image_input = gr.Image(type="pil", label="Input Image")
58
+ image_output = gr.Image(type="pil", label="Annotated Image")
59
+ image_button = gr.Button("Process Image")
60
+ image_button.click(fn=predict_image, inputs=image_input, outputs=image_output)
61
+
62
+ with gr.Tab("Video Upload"):
63
+ gr.Markdown("### Upload a Video for Object Detection")
64
+ video_input = gr.Video(label="Input Video")
65
+ video_output = gr.File(label="Annotated Video")
66
+ video_button = gr.Button("Process Video")
67
+ video_button.click(fn=predict_video, inputs=video_input, outputs=video_output)
68
+
69
+ demo.launch()
70
+
71
+
72
+ create_gradio_interface()