Spaces:
Sleeping
Sleeping
| import pandas as pd | |
| import matplotlib.pyplot as plt | |
| import seaborn as sns | |
| import gradio as gr | |
| import requests | |
| from bs4 import BeautifulSoup | |
| import io | |
| import os | |
| import base64 | |
| import zipfile | |
| from PIL import Image | |
| from io import BytesIO | |
| import tempfile | |
| import sys | |
| # -------------------------------------------------------------------- | |
| # PART 1: YOUR EXISTING (TINY) DATA & PLOTS | |
| # -------------------------------------------------------------------- | |
| data_full = [ | |
| ['CultriX/Qwen2.5-14B-SLERPv7', 'https://huggingface.co/CultriX/Qwen2.5-14B-SLERPv7', 0.7205, 0.8272, 0.7541, 0.6581, 0.5, 0.729], | |
| ['djuna/Q2.5-Veltha-14B-0.5', 'https://huggingface.co/djuna/Q2.5-Veltha-14B-0.5', 0.7492, 0.8386, 0.7305, 0.598, 0.43, 0.7817], | |
| ['CultriX/Qwen2.5-14B-FinalMerge', 'https://huggingface.co/CultriX/Qwen2.5-14B-FinalMerge', 0.7248, 0.8277, 0.7113, 0.7052, 0.57, 0.7001], | |
| ['CultriX/Qwen2.5-14B-MultiCultyv2', 'https://huggingface.co/CultriX/Qwen2.5-14B-MultiCultyv2', 0.7295, 0.8359, 0.7363, 0.5767, 0.44, 0.7316], | |
| ['CultriX/Qwen2.5-14B-Brocav7', 'https://huggingface.co/CultriX/Qwen2.5-14B-Brocav7', 0.7445, 0.8353, 0.7508, 0.6292, 0.46, 0.7629], | |
| ['CultriX/Qwen2.5-14B-Broca', 'https://huggingface.co/CultriX/Qwen2.5-14B-Broca', 0.7456, 0.8352, 0.748, 0.6034, 0.44, 0.7716], | |
| ['CultriX/Qwen2.5-14B-Brocav3', 'https://huggingface.co/CultriX/Qwen2.5-14B-Brocav3', 0.7395, 0.8388, 0.7393, 0.6405, 0.47, 0.7659], | |
| ['CultriX/Qwen2.5-14B-Brocav4', 'https://huggingface.co/CultriX/Qwen2.5-14B-Brocav4', 0.7432, 0.8377, 0.7444, 0.6277, 0.48, 0.758], | |
| ['CultriX/Qwen2.5-14B-Brocav2', 'https://huggingface.co/CultriX/Qwen2.5-14B-Brocav2', 0.7492, 0.8302, 0.7508, 0.6377, 0.51, 0.7478], | |
| ['CultriX/Qwen2.5-14B-Brocav5', 'https://huggingface.co/CultriX/Qwen2.5-14B-Brocav5', 0.7445, 0.8313, 0.7547, 0.6376, 0.5, 0.7304], | |
| ['CultriX/Qwen2.5-14B-Brocav6', 'https://huggingface.co/CultriX/Qwen2.5-14B-Brocav6', 0.7179, 0.8354, 0.7531, 0.6378, 0.49, 0.7524], | |
| ['CultriX/Qwenfinity-2.5-14B', 'https://huggingface.co/CultriX/Qwenfinity-2.5-14B', 0.7347, 0.8254, 0.7279, 0.7267, 0.56, 0.697], | |
| ['CultriX/Qwen2.5-14B-Emergedv2', 'https://huggingface.co/CultriX/Qwen2.5-14B-Emergedv2', 0.7137, 0.8335, 0.7363, 0.5836, 0.44, 0.7344], | |
| ['CultriX/Qwen2.5-14B-Unity', 'https://huggingface.co/CultriX/Qwen2.5-14B-Unity', 0.7063, 0.8343, 0.7423, 0.682, 0.57, 0.7498], | |
| ['CultriX/Qwen2.5-14B-MultiCultyv3', 'https://huggingface.co/CultriX/Qwen2.5-14B-MultiCultyv3', 0.7132, 0.8216, 0.7395, 0.6792, 0.55, 0.712], | |
| ['CultriX/Qwen2.5-14B-Emergedv3', 'https://huggingface.co/CultriX/Qwen2.5-14B-Emergedv3', 0.7436, 0.8312, 0.7519, 0.6585, 0.55, 0.7068], | |
| ['CultriX/SeQwence-14Bv1', 'https://huggingface.co/CultriX/SeQwence-14Bv1', 0.7278, 0.841, 0.7541, 0.6816, 0.52, 0.7539], | |
| ['CultriX/Qwen2.5-14B-Wernickev2', 'https://huggingface.co/CultriX/Qwen2.5-14B-Wernickev2', 0.7391, 0.8168, 0.7273, 0.622, 0.45, 0.7572], | |
| ['CultriX/Qwen2.5-14B-Wernickev3', 'https://huggingface.co/CultriX/Qwen2.5-14B-Wernickev3', 0.7357, 0.8148, 0.7245, 0.7023, 0.55, 0.7869], | |
| ['CultriX/Qwen2.5-14B-Wernickev4', 'https://huggingface.co/CultriX/Qwen2.5-14B-Wernickev4', 0.7355, 0.829, 0.7497, 0.6306, 0.48, 0.7635], | |
| ['CultriX/SeQwential-14B-v1', 'https://huggingface.co/CultriX/SeQwential-14B-v1', 0.7355, 0.8205, 0.7549, 0.6367, 0.48, 0.7626], | |
| ['CultriX/Qwen2.5-14B-Wernickev5', 'https://huggingface.co/CultriX/Qwen2.5-14B-Wernickev5', 0.7224, 0.8272, 0.7541, 0.679, 0.51, 0.7578], | |
| ['CultriX/Qwen2.5-14B-Wernickev6', 'https://huggingface.co/CultriX/Qwen2.5-14B-Wernickev6', 0.6994, 0.7549, 0.5816, 0.6991, 0.58, 0.7267], | |
| ['CultriX/Qwen2.5-14B-Wernickev7', 'https://huggingface.co/CultriX/Qwen2.5-14B-Wernickev7', 0.7147, 0.7599, 0.6097, 0.7056, 0.57, 0.7164], | |
| ['CultriX/Qwen2.5-14B-FinalMerge-tmp2', 'https://huggingface.co/CultriX/Qwen2.5-14B-FinalMerge-tmp2', 0.7255, 0.8192, 0.7535, 0.6671, 0.5, 0.7612], | |
| ['CultriX/Qwen2.5-14B-BrocaV8', 'https://huggingface.co/CultriX/Qwen2.5-14B-BrocaV8', 0.7415, 0.8396, 0.7334, 0.5785, 0.4300, 0.7646], | |
| ] | |
| columns = ["Model Configuration", "Model Link", "tinyArc", "tinyHellaswag", | |
| "tinyMMLU", "tinyTruthfulQA", "tinyTruthfulQA_mc1", "tinyWinogrande"] | |
| df_full = pd.DataFrame(data_full, columns=columns) | |
| def plot_average_scores(): | |
| df_full["Average Score"] = df_full.iloc[:, 2:].mean(axis=1) | |
| df_avg_sorted = df_full.sort_values(by="Average Score", ascending=False) | |
| plt.figure(figsize=(14, 10)) | |
| plt.barh(df_avg_sorted["Model Configuration"], df_avg_sorted["Average Score"]) | |
| plt.title("Average Performance of Models Across Tasks", fontsize=16) | |
| plt.xlabel("Average Score", fontsize=14) | |
| plt.ylabel("Model Configuration", fontsize=14) | |
| plt.gca().invert_yaxis() | |
| plt.grid(axis='x', linestyle='--', alpha=0.7) | |
| plt.tight_layout() | |
| img_buffer = io.BytesIO() | |
| plt.savefig(img_buffer, format='png') | |
| img_buffer.seek(0) | |
| img_base64 = base64.b64encode(img_buffer.read()).decode('utf-8') | |
| plt.close() | |
| pil_image = Image.open(BytesIO(base64.b64decode(img_base64))) | |
| temp_image_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False) | |
| pil_image.save(temp_image_file.name) | |
| return pil_image, temp_image_file.name | |
| def plot_task_performance(): | |
| df_full_melted = df_full.melt(id_vars=["Model Configuration", "Model Link"], | |
| var_name="Task", value_name="Score") | |
| plt.figure(figsize=(16, 12)) | |
| for model in df_full["Model Configuration"]: | |
| model_data = df_full_melted[df_full_melted["Model Configuration"] == model] | |
| plt.plot(model_data["Task"], model_data["Score"], marker="o", label=model) | |
| plt.title("Performance of All Models Across Tasks", fontsize=16) | |
| plt.xlabel("Task", fontsize=14) | |
| plt.ylabel("Score", fontsize=14) | |
| plt.xticks(rotation=45) | |
| plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=9) | |
| plt.grid(axis='y', linestyle='--', alpha=0.7) | |
| plt.tight_layout() | |
| img_buffer = io.BytesIO() | |
| plt.savefig(img_buffer, format='png') | |
| img_buffer.seek(0) | |
| img_base64 = base64.b64encode(img_buffer.read()).decode('utf-8') | |
| plt.close() | |
| pil_image = Image.open(BytesIO(base64.b64decode(img_base64))) | |
| temp_image_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False) | |
| pil_image.save(temp_image_file.name) | |
| return pil_image, temp_image_file.name | |
| def plot_task_specific_top_models(): | |
| top_models = df_full.iloc[:, 2:].idxmax() | |
| top_scores = df_full.iloc[:, 2:].max() | |
| results = pd.DataFrame({"Top Model": top_models, "Score": top_scores}).reset_index().rename(columns={"index": "Task"}) | |
| plt.figure(figsize=(14, 8)) | |
| plt.bar(results["Task"], results["Score"]) | |
| plt.title("Task-Specific Top Models", fontsize=16) | |
| plt.xlabel("Task", fontsize=14) | |
| plt.ylabel("Score", fontsize=14) | |
| plt.grid(axis="y", linestyle="--", alpha=0.7) | |
| plt.tight_layout() | |
| img_buffer = io.BytesIO() | |
| plt.savefig(img_buffer, format='png') | |
| img_buffer.seek(0) | |
| img_base64 = base64.b64encode(img_buffer.read()).decode('utf-8') | |
| plt.close() | |
| pil_image = Image.open(BytesIO(base64.b64decode(img_base64))) | |
| temp_image_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False) | |
| pil_image.save(temp_image_file.name) | |
| return pil_image, temp_image_file.name | |
| def plot_heatmap(): | |
| plt.figure(figsize=(14, 10)) | |
| sns.heatmap(df_full.iloc[:, 2:], annot=True, cmap="YlGnBu", | |
| xticklabels=columns[2:], yticklabels=df_full["Model Configuration"]) | |
| plt.title("Performance Heatmap", fontsize=16) | |
| plt.tight_layout() | |
| img_buffer = io.BytesIO() | |
| plt.savefig(img_buffer, format='png') | |
| img_buffer.seek(0) | |
| img_base64 = base64.b64encode(img_buffer.read()).decode('utf-8') | |
| plt.close() | |
| pil_image = Image.open(BytesIO(base64.b64decode(img_base64))) | |
| temp_image_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False) | |
| pil_image.save(temp_image_file.name) | |
| return pil_image, temp_image_file.name | |
| def scrape_mergekit_config(model_name): | |
| model_link = df_full.loc[df_full["Model Configuration"] == model_name, "Model Link"].values[0] | |
| response = requests.get(model_link) | |
| if response.status_code != 200: | |
| return f"Failed to fetch model page for {model_name}. Please check the link." | |
| soup = BeautifulSoup(response.text, "html.parser") | |
| yaml_config = soup.find("pre") # Assume YAML is in <pre> tags | |
| if yaml_config: | |
| return yaml_config.text.strip() | |
| return f"No YAML configuration found for {model_name}." | |
| def download_yaml(yaml_content, model_name): | |
| if "No YAML configuration found" in yaml_content or "Failed to fetch model page" in yaml_content: | |
| return None | |
| filename = f"{model_name.replace('/', '_')}_config.yaml" | |
| return gr.File(value=yaml_content.encode(), filename=filename) | |
| def scrape_model_page(model_url): | |
| try: | |
| response = requests.get(model_url) | |
| if response.status_code != 200: | |
| return f"Error: Unable to fetch the page (Status Code: {response.status_code})" | |
| soup = BeautifulSoup(response.text, "html.parser") | |
| yaml_config = soup.find("pre") | |
| yaml_text = yaml_config.text.strip() if yaml_config else "No YAML configuration found." | |
| metadata_section = soup.find("div", class_="metadata") | |
| metadata_text = metadata_section.text.strip() if metadata_section else "No metadata found." | |
| return f"**YAML Configuration:**\n{yaml_text}\n\n**Metadata:**\n{metadata_text}" | |
| except Exception as e: | |
| return f"Error: {str(e)}" | |
| def display_scraped_model_data(model_url): | |
| return scrape_model_page(model_url) | |
| def download_all_data(): | |
| import io | |
| csv_buffer = io.StringIO() | |
| df_full.to_csv(csv_buffer, index=False) | |
| csv_data = csv_buffer.getvalue().encode('utf-8') | |
| average_plot_pil, average_plot_name = plot_average_scores() | |
| task_plot_pil, task_plot_name = plot_task_performance() | |
| top_models_plot_pil, top_models_plot_name = plot_task_specific_top_models() | |
| heatmap_plot_pil, heatmap_plot_name = plot_heatmap() | |
| plot_dict = { | |
| "average_performance": (average_plot_pil, average_plot_name), | |
| "task_performance": (task_plot_pil, task_plot_name), | |
| "top_models": (top_models_plot_pil, top_models_plot_name), | |
| "heatmap": (heatmap_plot_pil, heatmap_plot_name) | |
| } | |
| zip_buffer = io.BytesIO() | |
| with zipfile.ZipFile(zip_buffer, 'w') as zf: | |
| zf.writestr("model_scores.csv", csv_data) | |
| for name, (pil_image, filename) in plot_dict.items(): | |
| image_bytes = io.BytesIO() | |
| pil_image.save(image_bytes, format='PNG') | |
| image_bytes.seek(0) | |
| zf.writestr(filename, image_bytes.read()) | |
| # Also try scraping each model for a YAML config | |
| for model_name in df_full["Model Configuration"].to_list(): | |
| yaml_content = scrape_mergekit_config(model_name) | |
| if ("No YAML configuration found" not in yaml_content) and ("Failed to fetch model page" not in yaml_content): | |
| zf.writestr(f"{model_name.replace('/', '_')}_config.yaml", yaml_content.encode()) | |
| zip_buffer.seek(0) | |
| return zip_buffer, "analysis_data.zip" | |
| # -------------------------------------------------------------------- | |
| # PART 2: FULL "DATA START" SNIPPET (RANKS 44–105) + Parser | |
| # -------------------------------------------------------------------- | |
| benchmark_data = [ | |
| # The entire dataset from your "DATA START", rank 44..105 | |
| # (the code you posted with "knowledge of config" or scraping logic) | |
| { | |
| "rank": 44, | |
| "name": "sometimesanotion/Qwen2.5-14B-Vimarckoso-v3", | |
| "scores": { | |
| "average": 40.10, | |
| "IFEval": 72.57, | |
| "BBH": 48.58, | |
| "MATH": 34.44, | |
| "GPQA": 17.34, | |
| "MUSR": 19.39, | |
| "MMLU-PRO": 48.26 | |
| }, | |
| "hf_url": "https://huggingface.co/sometimesanotion/Qwen2.5-14B-Vimarckoso-v3", | |
| "known_config": { | |
| "models": [ | |
| {"model": "CultriX/SeQwence-14Bv1"}, | |
| {"model": "allknowingroger/Qwenslerp5-14B"} | |
| ], | |
| "merge_method": "slerp", | |
| "base_model": "CultriX/SeQwence-14Bv1", | |
| "dtype": "bfloat16", | |
| "parameters": { | |
| "t": [0, 0.5, 1, 0.5, 0] | |
| } | |
| } | |
| }, | |
| # ... rest of the snippet ... | |
| # (Exactly copy/paste your big block from rank=44 to rank=105) | |
| ] | |
| def snippet_scrape_model_page(url): | |
| """ | |
| Same as scrape_model_page, but we keep it separate for clarity. | |
| """ | |
| return scrape_model_page(url) | |
| def snippet_print_benchmark_and_config_info(model_info): | |
| """ | |
| Prints an overview for each model (your "DATA START" logic), | |
| either known config or scraping snippet. | |
| """ | |
| print(f"---\nModel Rank: {model_info['rank']}") | |
| print(f"Model Name: {model_info['name']}") | |
| print(f"Model average score across benchmarks in %: {model_info['scores']['average']}") | |
| print(f"Models average score on IFEval benchmarks in %: {model_info['scores']['IFEval']}") | |
| print(f"Models average score on BBH benchmarks in %: {model_info['scores']['BBH']}") | |
| print(f"Models average score on MATH benchmarks in %: {model_info['scores']['MATH']}") | |
| print(f"Models average score in GPQA benchmarks in %: {model_info['scores']['GPQA']}") | |
| print(f"Models average score in MUSR benchmarks in %: {model_info['scores']['MUSR']}") | |
| print(f"Models average score in MMLU-PRO benchmarks in %: {model_info['scores']['MMLU-PRO']}") | |
| # If there's a known_config, print it as YAML | |
| if model_info["known_config"] is not None: | |
| print("###") | |
| print("models:") | |
| for m in model_info["known_config"]["models"]: | |
| print(f" - model: {m['model']}") | |
| print(f"merge_method: {model_info['known_config']['merge_method']}") | |
| print(f"base_model: {model_info['known_config']['base_model']}") | |
| print(f"dtype: {model_info['known_config']['dtype']}") | |
| print("parameters:") | |
| print(f" t: {model_info['known_config']['parameters']['t']} # V shaped curve: Hermes for input & output, WizardMath in the middle layers") | |
| print("###") | |
| return | |
| # Otherwise, scrape | |
| scraped = snippet_scrape_model_page(model_info["hf_url"]) | |
| if isinstance(scraped, str): | |
| # Means it's an error string or something | |
| if "Error:" in scraped: | |
| print("(No MergeKit configuration found or error occurred.)\n") | |
| # optionally print snippet | |
| else: | |
| print(scraped) | |
| return | |
| else: | |
| # It's presumably a dict: { "yaml_configuration": "...", "metadata": "..." } | |
| if ("No YAML configuration found." in scraped["yaml_configuration"]): | |
| print("(No MergeKit configuration found.)\n") | |
| # Print your snippet code | |
| print("You can try the following Python script to scrape the model page:\n") | |
| print("#" * 70) | |
| print(f'''import requests | |
| from bs4 import BeautifulSoup | |
| def scrape_model_page(model_url): | |
| try: | |
| response = requests.get(model_url) | |
| if response.status_code != 200: | |
| return f"Error: Unable to fetch the page (Status Code: {{response.status_code}})" | |
| soup = BeautifulSoup(response.text, "html.parser") | |
| yaml_config = soup.find("pre") | |
| yaml_text = yaml_config.text.strip() if yaml_config else "No YAML configuration found." | |
| metadata_section = soup.find("div", class_="metadata") | |
| metadata_text = metadata_section.text.strip() if metadata_section else "No metadata found." | |
| return {{ | |
| "yaml_configuration": yaml_text, | |
| "metadata": metadata_text | |
| }} | |
| except Exception as e: | |
| return f"Error: {{str(e)}}" | |
| if __name__ == "__main__": | |
| model_url = "{model_info['hf_url']}" | |
| result = scrape_model_page(model_url) | |
| print(result)''') | |
| print("#" * 70) | |
| else: | |
| print("###") | |
| print(scraped["yaml_configuration"]) | |
| print("###") | |
| def run_non_tiny_benchmarks(): | |
| """ | |
| Captures the stdout from printing each model in benchmark_data | |
| (ranks 44 to 105), returning a single string for Gradio to display. | |
| """ | |
| old_stdout = sys.stdout | |
| buffer = io.StringIO() | |
| sys.stdout = buffer | |
| for model in benchmark_data: | |
| snippet_print_benchmark_and_config_info(model) | |
| sys.stdout = old_stdout | |
| return buffer.getvalue() | |
| # -------------------------------------------------------------------- | |
| # PART 3: GRADIO APP (Your existing UI plus the "Parse Non-Tiny" button) | |
| # -------------------------------------------------------------------- | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# Comprehensive Model Performance Analysis with Hugging Face Links") | |
| # The existing UI | |
| with gr.Row(): | |
| btn1 = gr.Button("Show Average Performance") | |
| img1 = gr.Image(type="pil", label="Average Performance Plot") | |
| img1_download = gr.File(label="Download Average Performance") | |
| btn1.click(plot_average_scores, outputs=[img1, img1_download]) | |
| with gr.Row(): | |
| btn2 = gr.Button("Show Task Performance") | |
| img2 = gr.Image(type="pil", label="Task Performance Plot") | |
| img2_download = gr.File(label="Download Task Performance") | |
| btn2.click(plot_task_performance, outputs=[img2, img2_download]) | |
| with gr.Row(): | |
| btn3 = gr.Button("Task-Specific Top Models") | |
| img3 = gr.Image(type="pil", label="Task-Specific Top Models Plot") | |
| img3_download = gr.File(label="Download Top Models") | |
| btn3.click(plot_task_specific_top_models, outputs=[img3, img3_download]) | |
| with gr.Row(): | |
| btn4 = gr.Button("Plot Performance Heatmap") | |
| heatmap_img = gr.Image(type="pil", label="Performance Heatmap") | |
| heatmap_download = gr.File(label="Download Heatmap") | |
| btn4.click(plot_heatmap, outputs=[heatmap_img, heatmap_download]) | |
| with gr.Row(): | |
| model_selector = gr.Dropdown(choices=df_full["Model Configuration"].tolist(), label="Select a Model") | |
| with gr.Column(): | |
| scrape_btn = gr.Button("Scrape MergeKit Configuration") | |
| yaml_output = gr.Textbox(lines=10, placeholder="YAML Configuration will appear here.") | |
| scrape_btn.click(scrape_mergekit_config, inputs=model_selector, outputs=yaml_output) | |
| with gr.Column(): | |
| save_yaml_btn = gr.Button("Save MergeKit Configuration") | |
| yaml_download = gr.File(label="Download MergeKit Configuration") | |
| save_yaml_btn.click(download_yaml, inputs=[yaml_output, model_selector], outputs=yaml_download) | |
| with gr.Row(): | |
| download_all_btn = gr.Button("Download Everything") | |
| all_downloads = gr.File(label="Download All Data") | |
| download_all_btn.click(download_all_data, outputs=all_downloads) | |
| # Live scraping feature | |
| gr.Markdown("## Live Scraping Features") | |
| with gr.Row(): | |
| url_input = gr.Textbox(label="Enter Hugging Face Model URL", placeholder="https://huggingface.co/<model>") | |
| live_scrape_btn = gr.Button("Scrape Model Page") | |
| live_scrape_output = gr.Textbox(label="Scraped Data", lines=15) | |
| live_scrape_btn.click(display_scraped_model_data, inputs=url_input, outputs=live_scrape_output) | |
| # NEW: Non-Tiny Benchmarks button | |
| gr.Markdown("## Non-Tiny Benchmark Parser (Ranks 44–105)") | |
| with gr.Row(): | |
| parse_non_tiny_btn = gr.Button("Parse Non-Tiny Benchmarks") | |
| parse_non_tiny_output = gr.Textbox(label="Non-Tiny Benchmark Output", lines=30) | |
| parse_non_tiny_btn.click(fn=run_non_tiny_benchmarks, outputs=parse_non_tiny_output) | |
| demo.launch() |