Spaces:
Runtime error
Runtime error
Update app.py
Browse filesPart 1 improved
app.py
CHANGED
|
@@ -1,30 +1,91 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
-
# 1
|
| 5 |
-
model_ia = pipeline("image-classification", model="google/cxr-foundation")
|
| 6 |
|
| 7 |
-
#
|
| 8 |
-
|
| 9 |
-
"""
|
| 10 |
-
Recibe a x-ray image and return label.
|
| 11 |
-
"""
|
| 12 |
-
predictions = model_ia(image)
|
| 13 |
-
|
| 14 |
-
return predictions
|
| 15 |
|
| 16 |
-
#
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
outputs="label",
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
title="Chest X-ray Analysis Assistant",
|
| 26 |
-
description="Upload a chest X-ray and the AI model will provide a preliminary classification."
|
| 27 |
)
|
| 28 |
|
| 29 |
-
#
|
| 30 |
interfaz.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
from huggingface_hub import snapshot_download
|
| 3 |
+
import tensorflow as tf
|
| 4 |
+
import numpy as np
|
| 5 |
+
from PIL import Image
|
| 6 |
+
import io
|
| 7 |
+
import png
|
| 8 |
|
| 9 |
+
# --- Part 1: Launch Embeddings Model ---
|
|
|
|
| 10 |
|
| 11 |
+
# Define local route to download the files of the model
|
| 12 |
+
MODEL_DIR = "./cxr_foundation_models"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
+
# Download the files of the model
|
| 15 |
+
snapshot_download(repo_id="google/cxr-foundation", local_dir=MODEL_DIR,
|
| 16 |
+
allow_patterns=['elixr-c-v2-pooled/*', 'pax-elixr-b-text/*'])
|
| 17 |
+
|
| 18 |
+
# Load saved TensorFlow models
|
| 19 |
+
elixrc_model = tf.saved_model.load(f"{MODEL_DIR}/elixr-c-v2-pooled")
|
| 20 |
+
qformer_model = tf.saved_model.load(f"{MODEL_DIR}/pax-elixr-b-text")
|
| 21 |
+
|
| 22 |
+
# Helper function to proccess images
|
| 23 |
+
def png_to_tfexample(image_array: np.ndarray) -> tf.train.Example:
|
| 24 |
+
image = image_array.astype(np.float32)
|
| 25 |
+
image -= image.min()
|
| 26 |
+
if image_array.dtype == np.uint8:
|
| 27 |
+
pixel_array = image.astype(np.uint8)
|
| 28 |
+
bitdepth = 8
|
| 29 |
+
else:
|
| 30 |
+
max_val = image.max()
|
| 31 |
+
if max_val > 0:
|
| 32 |
+
image *= 65535 / max_val
|
| 33 |
+
pixel_array = image.astype(np.uint16)
|
| 34 |
+
bitdepth = 16
|
| 35 |
+
if pixel_array.ndim != 2:
|
| 36 |
+
raise ValueError(f'Array must be 2-D. Actual dimensions: {pixel_array.ndim}')
|
| 37 |
+
output = io.BytesIO()
|
| 38 |
+
png.Writer(
|
| 39 |
+
width=pixel_array.shape[1],
|
| 40 |
+
height=pixel_array.shape[0],
|
| 41 |
+
greyscale=True,
|
| 42 |
+
bitdepth=bitdepth
|
| 43 |
+
).write(output, pixel_array.tolist())
|
| 44 |
+
png_bytes = output.getvalue()
|
| 45 |
+
example = tf.train.Example()
|
| 46 |
+
features = example.features.feature
|
| 47 |
+
features['image/encoded'].bytes_list.value.append(png_bytes)
|
| 48 |
+
features['image/format'].bytes_list.value.append(b'png')
|
| 49 |
+
return example
|
| 50 |
+
|
| 51 |
+
# --- Part 2: Application Logic with a Demo Classifier ---
|
| 52 |
+
|
| 53 |
+
def procesar_radiografia(imagen: Image.Image):
|
| 54 |
+
# Step 1: Generate the embedding
|
| 55 |
+
img_array = np.array(imagen.convert('L'))
|
| 56 |
+
elixrc_infer = elixrc_model.signatures['serving_default']
|
| 57 |
+
elixrc_output = elixrc_infer(input_example=tf.constant([png_to_tfexample(img_array).SerializeToString()]))
|
| 58 |
+
elixrc_embedding = elixrc_output['feature_maps_0'].numpy()
|
| 59 |
|
| 60 |
+
qformer_input = {
|
| 61 |
+
'image_feature': elixrc_embedding.tolist(),
|
| 62 |
+
'ids': np.zeros((1, 1, 128), dtype=np.int32).tolist(),
|
| 63 |
+
'paddings': np.zeros((1, 1, 128), dtype=np.float32).tolist(),
|
| 64 |
+
}
|
| 65 |
+
qformer_infer = qformer_model.signatures['serving_default']
|
| 66 |
+
qformer_output = qformer_infer(**qformer_input)
|
| 67 |
+
elixrb_embeddings = qformer_output['all_contrastive_img_emb']
|
| 68 |
+
|
| 69 |
+
# Step 2: Simulate classification based on embedding
|
| 70 |
+
# En un proyecto real, aqu铆 ir铆a el c贸digo de tu clasificador.
|
| 71 |
+
# Por ahora, simularemos un resultado
|
| 72 |
+
etiquetas = {
|
| 73 |
+
"Normal": 0.8,
|
| 74 |
+
"Neumon铆a": 0.15,
|
| 75 |
+
"Cardiomegalia": 0.05
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
# Devuelve el resultado en un formato de etiqueta
|
| 79 |
+
return etiquetas
|
| 80 |
+
|
| 81 |
+
# Crea la interfaz de Gradio
|
| 82 |
+
interfaz = gr.Interface(
|
| 83 |
+
fn=procesar_radiografia,
|
| 84 |
+
inputs=gr.Image(type="pil"),
|
| 85 |
outputs="label",
|
| 86 |
+
title="Asistente de An谩lisis de Radiograf铆as de T贸rax (Demo)",
|
| 87 |
+
description="Sube una radiograf铆a y el modelo de IA proporcionar谩 una clasificaci贸n preliminar. **Nota: Esto es una herramienta demostrativa y no un diagn贸stico m茅dico.**"
|
|
|
|
|
|
|
| 88 |
)
|
| 89 |
|
| 90 |
+
# Lanza la interfaz
|
| 91 |
interfaz.launch()
|