File size: 5,334 Bytes
e0f0969
 
 
 
 
 
 
1851abd
e0f0969
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d62d426
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
import os  # For filesystem operations
import shutil  # For directory cleanup
import zipfile  # For extracting model archives
import pathlib  # For path manipulations
import pandas  # For tabular data handling
import gradio  # For interactive UI
import huggingface_hub  # For downloading model assets
import autogluon.tabular  # For loading and running AutoGluon predictors


# Settings (Selected the following model: Tabular Flowers dataset predictor model )
MODEL_REPO_ID = "its-zion-18/flowers-tabular-autolguon-predictor"
ZIP_FILENAME  = "autogluon_predictor_dir.zip"
CACHE_DIR = pathlib.Path("hf_assets")
EXTRACT_DIR = CACHE_DIR / "predictor_native"

# Feature column names and target column names which match original dataset
FEATURE_COLS = [
    "flower_diameter_cm",
    "petal_length_cm",
    "petal_width_cm",
    "petal_count",
    "stem_height_cm",
    "color",
]
TARGET_COL = "color"

# Encoding for outcome questions
OUTCOME_LABELS = {
    0: "Pink",
    1: "Red",
    2: "White",
    3: "Orange",
    4: "Yellow",
    5: "Purple",
}


# Download & load the native predictor
def _prepare_predictor_dir() -> str:
    CACHE_DIR.mkdir(parents=True, exist_ok=True)
    local_zip = huggingface_hub.hf_hub_download(
        repo_id=MODEL_REPO_ID,
        filename=ZIP_FILENAME,
        repo_type="model",
        local_dir=str(CACHE_DIR),
        local_dir_use_symlinks=False,
    )
    if EXTRACT_DIR.exists():
        shutil.rmtree(EXTRACT_DIR)
    EXTRACT_DIR.mkdir(parents=True, exist_ok=True)
    with zipfile.ZipFile(local_zip, "r") as zf:
        zf.extractall(str(EXTRACT_DIR))
    contents = list(EXTRACT_DIR.iterdir())
    predictor_root = contents[0] if (len(contents) == 1 and contents[0].is_dir()) else EXTRACT_DIR
    return str(predictor_root)

PREDICTOR_DIR = _prepare_predictor_dir()
PREDICTOR = autogluon.tabular.TabularPredictor.load(PREDICTOR_DIR, require_py_version_match=False)

# A mapping utility to make it easier to encode the variables
def _human_label(c):
    try:
        ci = int(c)
        if ci in OUTCOME_LABELS:
            return OUTCOME_LABELS[ci]
    except Exception:
        pass
    if c in OUTCOME_LABELS:
        return OUTCOME_LABELS[c]
    return str(c)

# This functions takes all of our features, encoding removed due to lack of likert questions for original data.
def do_predict(flower_diameter_cm, petal_length_cm, petal_width_cm, petal_count, stem_height_cm):

    row = {
        FEATURE_COLS[0]: float(flower_diameter_cm),
        FEATURE_COLS[1]: float(petal_length_cm),
        FEATURE_COLS[2]: float(petal_width_cm),
        FEATURE_COLS[3]: int(petal_count),
        FEATURE_COLS[4]: float(stem_height_cm),
    }
    X = pandas.DataFrame([row], columns=FEATURE_COLS)

    pred_series = PREDICTOR.predict(X)
    raw_pred = pred_series.iloc[0]

    try:
        proba = PREDICTOR.predict_proba(X)
        if isinstance(proba, pandas.Series):
            proba = proba.to_frame().T
    except Exception:
        proba = None

    pred_label = _human_label(raw_pred)

    proba_dict = None
    if proba is not None:
        row0 = proba.iloc[0]
        tmp = {}
        for cls, val in row0.items():
            key = _human_label(cls)
            tmp[key] = float(val) + float(tmp.get(key, 0.0))
        proba_dict = dict(sorted(tmp.items(), key=lambda kv: kv[1], reverse=True))

    df_out = pandas.DataFrame([{
        "Predicted outcome": pred_label,
        "Confidence (%)": round((proba_dict.get(pred_label, 1.0) if proba_dict else 1.0) * 100, 2),
    }])

    md = f"**Prediction:** {pred_label}"
    if proba_dict:
        md += f"  \n**Confidence:** {round(proba_dict.get(pred_label, 0.0) * 100, 2)}%"

    return proba_dict

# Representative examples
EXAMPLES = [
    [3.4, 1.3, 1, 7, 68.7],
    [5.7, 3.2, 0.9, 5, 21.2],
    [6.9, 3.8, 0.6, 17, 64.2],
    [5.6, 1, 1.5, 7, 73.4],
    [7.7, 1.5, 1.8, 13, 73.8],
]

# Gradio UI
with gradio.Blocks() as demo:
    # Provide an introduction
    gradio.Markdown("# What color would your flower be?")
    gradio.Markdown("""
    This is a simple app that demonstrates builds on the datasets and models
    we've been making in class to answer what color flowers you may come across
    given the dimensions of the petals.
    To use the interface, make selections using the interface elements shown below.
    """)

    with gradio.Row():
        flower_diameter_cm = gradio.Slider(0, 10, step=0.1, value=5.0, label=FEATURE_COLS[0])
        petal_length_cm = gradio.Slider(0, 5, step=0.1, value=2.5, label=FEATURE_COLS[1])
        petal_width_cm = gradio.Slider(0, 2, step=0.1, value=1.0, label=FEATURE_COLS[2])

    with gradio.Row():
        petal_count = gradio.Slider(0, 30, step=0.5, value=15.0, label=FEATURE_COLS[3])
        stem_height_cm = gradio.Slider(0, 80, step=0.5, value=40.0, label=FEATURE_COLS[4])

    proba_pretty = gradio.Label(num_top_classes=5, label="Class probabilities")

    inputs = [flower_diameter_cm, petal_length_cm, petal_width_cm, petal_count, stem_height_cm]
    for comp in inputs:
        comp.change(fn=do_predict, inputs=inputs, outputs=[proba_pretty])


    gradio.Examples(
        examples=EXAMPLES,
        inputs=inputs,
        label="Representative examples",
        examples_per_page=5,
        cache_examples=False,
    )

if __name__ == "__main__":
    demo.launch()