itsomk commited on
Commit
9748112
·
verified ·
1 Parent(s): 7a4e327

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +160 -0
app.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from torchvision import models, transforms
4
+ from safetensors.torch import load_file
5
+ from huggingface_hub import hf_hub_download
6
+ from PIL import Image
7
+ import numpy as np
8
+ from skimage.transform import resize
9
+ from pytorch_grad_cam import GradCAM
10
+ from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
11
+ from pytorch_grad_cam.utils.image import show_cam_on_image
12
+
13
+ # Constants
14
+ REPO_ID = "itsomk/chexpert-densenet121"
15
+ FILENAME = "pytorch_model.safetensors"
16
+
17
+ # Model Definition
18
+ class DenseNet121_CheXpert(torch.nn.Module):
19
+ def __init__(self, num_labels=14, pretrained=None):
20
+ super().__init__()
21
+ self.densenet = models.densenet121(weights=pretrained)
22
+ num_features = self.densenet.classifier.in_features
23
+ self.densenet.classifier = torch.nn.Linear(num_features, num_labels)
24
+
25
+ def forward(self, x):
26
+ return self.densenet(x)
27
+
28
+ # Labels
29
+ LABELS = [
30
+ "No Finding", "Enlarged Cardiomediastinum", "Cardiomegaly", "Lung Opacity",
31
+ "Lung Lesion", "Edema", "Consolidation", "Pneumonia", "Atelectasis",
32
+ "Pneumothorax", "Pleural Effusion", "Pleural Other", "Fracture", "Support Devices"
33
+ ]
34
+
35
+ # Preprocessing
36
+ preprocess = transforms.Compose([
37
+ transforms.Resize((224, 224)),
38
+ transforms.ToTensor(),
39
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
40
+ ])
41
+
42
+ # Load model
43
+ print("Loading model...")
44
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
45
+ local_path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME)
46
+ state = load_file(local_path)
47
+ model = DenseNet121_CheXpert(num_labels=14, pretrained=None)
48
+ model.load_state_dict(state, strict=False)
49
+ model.to(device)
50
+ model.eval()
51
+ print(f"Model loaded successfully on {device}")
52
+
53
+ def predict(image, threshold):
54
+ """Generate predictions and Grad-CAM visualizations"""
55
+ if image is None:
56
+ return None, None, "Please upload an X-ray image"
57
+
58
+ try:
59
+ # Convert to PIL Image
60
+ if isinstance(image, np.ndarray):
61
+ img = Image.fromarray(image).convert("RGB")
62
+ else:
63
+ img = image.convert("RGB")
64
+
65
+ # Preprocess
66
+ img_tensor = preprocess(img).unsqueeze(0).to(device)
67
+ rgb_img = np.array(img.resize((224, 224)), dtype=np.float32) / 255.0
68
+
69
+ # Get predictions
70
+ with torch.no_grad():
71
+ logits = model(img_tensor)
72
+ probs = torch.sigmoid(logits).squeeze().cpu().numpy()
73
+
74
+ # Setup Grad-CAM
75
+ target_layer = model.densenet.features.denseblock4
76
+ cam = GradCAM(model=model, target_layers=[target_layer])
77
+
78
+ # Generate visualizations for conditions above threshold
79
+ gradcam_images = []
80
+ detected_conditions = []
81
+
82
+ for i, prob in enumerate(probs):
83
+ if prob > threshold:
84
+ label = LABELS[i]
85
+ targets = [ClassifierOutputTarget(i)]
86
+ grayscale_cam = cam(input_tensor=img_tensor, targets=targets)
87
+ grayscale_cam = grayscale_cam[0, :]
88
+
89
+ resized_rgb_img = resize(rgb_img, grayscale_cam.shape, anti_aliasing=True)
90
+ cam_image = show_cam_on_image(resized_rgb_img, grayscale_cam, use_rgb=True)
91
+
92
+ gradcam_images.append(cam_image)
93
+ detected_conditions.append(f"**{label}**: {prob:.4f}")
94
+
95
+ # Create summary text
96
+ all_predictions = "\n".join([f"{LABELS[i]}: {prob:.4f}" for i, prob in enumerate(probs)])
97
+
98
+ if detected_conditions:
99
+ summary = f"## Detected Conditions (>{threshold}):\n" + "\n".join(detected_conditions)
100
+ summary += f"\n\n## All Predictions:\n{all_predictions}"
101
+ # Return first Grad-CAM image and original image
102
+ return gradcam_images[0], img, summary
103
+ else:
104
+ summary = f"No conditions detected above threshold {threshold}\n\n## All Predictions:\n{all_predictions}"
105
+ return None, img, summary
106
+
107
+ except Exception as e:
108
+ return None, None, f"Error: {str(e)}"
109
+
110
+ # Create Gradio interface
111
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
112
+ gr.Markdown(
113
+ """
114
+ # 🩻 X-Ray Grad-CAM Visualization
115
+
116
+ Upload a chest X-ray image to analyze potential conditions using DenseNet121 with Grad-CAM visualization.
117
+
118
+ **Model**: [itsomk/chexpert-densenet121](https://huggingface.co/itsomk/chexpert-densenet121)
119
+ """
120
+ )
121
+
122
+ with gr.Row():
123
+ with gr.Column():
124
+ input_image = gr.Image(label="Upload X-Ray Image", type="pil")
125
+ threshold = gr.Slider(
126
+ minimum=0.0,
127
+ maximum=1.0,
128
+ value=0.5,
129
+ step=0.05,
130
+ label="Prediction Threshold"
131
+ )
132
+ analyze_btn = gr.Button("🔍 Analyze X-Ray", variant="primary", size="lg")
133
+
134
+ with gr.Column():
135
+ output_gradcam = gr.Image(label="Grad-CAM Visualization")
136
+ output_image = gr.Image(label="Original Image")
137
+
138
+ with gr.Row():
139
+ output_text = gr.Markdown(label="Analysis Results")
140
+
141
+ # Examples
142
+ gr.Markdown("### 📋 Instructions:")
143
+ gr.Markdown(
144
+ """
145
+ 1. Upload a chest X-ray image (JPG, PNG)
146
+ 2. Adjust the prediction threshold if needed (default: 0.5)
147
+ 3. Click 'Analyze X-Ray' to see results
148
+ 4. View detected conditions with Grad-CAM heatmaps
149
+ """
150
+ )
151
+
152
+ # Connect components
153
+ analyze_btn.click(
154
+ fn=predict,
155
+ inputs=[input_image, threshold],
156
+ outputs=[output_gradcam, output_image, output_text]
157
+ )
158
+
159
+ if __name__ == "__main__":
160
+ demo.launch()