File size: 2,403 Bytes
aa2b8a5
 
 
 
 
 
d1eb7e5
aa2b8a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import gradio as gr
from PIL import Image
import cv2
from ultralytics import YOLO

# Load model (ensure the path to the weights file is correct)
model_path = "car_logos.pt"
detection_model = YOLO(model_path)

def predict_image(pil_image):
    """Process an image and return the annotated image."""
    # Convert PIL image to NumPy array
    frame = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
    
    # Run YOLO model
    results = detection_model.predict(frame, conf=0.5, iou=0.6)
    
    # Annotate the image
    annotated_frame = results[0].plot()
    

    out_pil_image = Image.fromarray(annotated_frame[..., ::-1])  
    return out_pil_image

def predict_video(video_path):
    """Process a video and return the path to the annotated output video."""
    cap = cv2.VideoCapture(video_path)
    output_frames = []
    
 
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        results = detection_model.predict(frame, conf=0.5, iou=0.6)
        annotated_frame = results[0].plot()
        output_frames.append(annotated_frame)
    
    cap.release()
    
 
    if output_frames:
        height, width, _ = output_frames[0].shape
        out_path = "output_video.mp4"
        out = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*"mp4v"), 30, (width, height))
        for frame in output_frames:
            out.write(frame)
        out.release()
        return out_path
    else:
        return "No frames processed."

def create_gradio_interface():
    with gr.Blocks() as demo:
        with gr.Tab("Upload and Honda or Toyota Logo image"):
            gr.Markdown("### Upload Honda or Toyota  for Object Detection")
            image_input = gr.Image(type="pil", label="Input Image")
            image_output = gr.Image(type="pil", label="Annotated Image")
            image_button = gr.Button("Process Image")
            image_button.click(fn=predict_image, inputs=image_input, outputs=image_output)
        
        with gr.Tab("Video Upload"):
            gr.Markdown("### Upload a Video for Object Detection")
            video_input = gr.Video(label="Input Video")
            video_output = gr.File(label="Annotated Video")
            video_button = gr.Button("Process Video")
            video_button.click(fn=predict_video, inputs=video_input, outputs=video_output)
        
        demo.launch()


create_gradio_interface()