_leaderboard
stringclasses 1
value | _developer
stringclasses 559
values | _model
stringlengths 9
102
| _uuid
stringlengths 36
36
| schema_version
stringclasses 1
value | evaluation_id
stringlengths 35
133
| retrieved_timestamp
stringlengths 13
18
| source_data
stringclasses 1
value | evaluation_source_name
stringclasses 1
value | evaluation_source_type
stringclasses 1
value | source_organization_name
stringclasses 1
value | source_organization_url
null | source_organization_logo_url
null | evaluator_relationship
stringclasses 1
value | model_name
stringlengths 4
102
| model_id
stringlengths 9
102
| model_developer
stringclasses 559
values | model_inference_platform
stringclasses 1
value | evaluation_results
stringlengths 1.35k
1.41k
| additional_details
stringclasses 660
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
HF Open LLM v2
|
stabilityai
|
stabilityai/stablelm-2-12b
|
21f9d0a5-3ed3-40de-a233-a45f68d669e0
|
0.0.1
|
hfopenllm_v2/stabilityai_stablelm-2-12b/1762652580.536407
|
1762652580.536408
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
stabilityai/stablelm-2-12b
|
stabilityai/stablelm-2-12b
|
stabilityai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1569214129620518}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4508654171114765}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.04305135951661632}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2785234899328859}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44788541666666665}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3071808510638298}}]
|
{"precision": "bfloat16", "architecture": "StableLmForCausalLM", "params_billions": 12.143}
|
HF Open LLM v2
|
stabilityai
|
stabilityai/stablelm-3b-4e1t
|
3280f4cf-dbb7-46ad-a64c-d4e3c4a58e50
|
0.0.1
|
hfopenllm_v2/stabilityai_stablelm-3b-4e1t/1762652580.5377111
|
1762652580.537712
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
stabilityai/stablelm-3b-4e1t
|
stabilityai/stablelm-3b-4e1t
|
stabilityai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.22031986240951784}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3504211415826912}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.010574018126888218}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.23741610738255034}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37778124999999996}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1668882978723404}}]
|
{"precision": "bfloat16", "architecture": "StableLmForCausalLM", "params_billions": 2.795}
|
HF Open LLM v2
|
stabilityai
|
stabilityai/stablelm-zephyr-3b
|
94960f86-3898-4add-8590-8abeff66a987
|
0.0.1
|
hfopenllm_v2/stabilityai_stablelm-zephyr-3b/1762652580.537945
|
1762652580.5379462
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
stabilityai/stablelm-zephyr-3b
|
stabilityai/stablelm-zephyr-3b
|
stabilityai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36832271705740766}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3866361442837871}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.04305135951661632}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.23909395973154363}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4183020833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17677859042553193}}]
|
{"precision": "bfloat16", "architecture": "StableLmForCausalLM", "params_billions": 2.795}
|
HF Open LLM v2
|
stabilityai
|
stabilityai/stablelm-2-zephyr-1_6b
|
96179bdf-3e1a-47ee-9fc2-ac0b23307556
|
0.0.1
|
hfopenllm_v2/stabilityai_stablelm-2-zephyr-1_6b/1762652580.537471
|
1762652580.537472
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
stabilityai/stablelm-2-zephyr-1_6b
|
stabilityai/stablelm-2-zephyr-1_6b
|
stabilityai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.32793100085550786}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3351608706280727}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.03323262839879154}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24328859060402686}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3511458333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17137632978723405}}]
|
{"precision": "float16", "architecture": "StableLmForCausalLM", "params_billions": 1.645}
|
HF Open LLM v2
|
stabilityai
|
stabilityai/stablelm-2-1_6b-chat
|
552dc523-3082-4980-a533-ad5d48f1260a
|
0.0.1
|
hfopenllm_v2/stabilityai_stablelm-2-1_6b-chat/1762652580.5372329
|
1762652580.5372338
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
stabilityai/stablelm-2-1_6b-chat
|
stabilityai/stablelm-2-1_6b-chat
|
stabilityai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30599919325168334}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3390172395486522}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.024924471299093656}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24748322147651006}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35796875}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.16215093085106383}}]
|
{"precision": "bfloat16", "architecture": "StableLmForCausalLM", "params_billions": 1.645}
|
HF Open LLM v2
|
stabilityai
|
stabilityai/stablelm-2-1_6b
|
78db2373-3fcf-468b-8c87-21db03b2fdda
|
0.0.1
|
hfopenllm_v2/stabilityai_stablelm-2-1_6b/1762652580.5369868
|
1762652580.536989
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
stabilityai/stablelm-2-1_6b
|
stabilityai/stablelm-2-1_6b
|
stabilityai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11570521771122844}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.338457720511071}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0075528700906344415}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2483221476510067}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38819791666666664}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1463597074468085}}]
|
{"precision": "float16", "architecture": "StableLmForCausalLM", "params_billions": 1.645}
|
HF Open LLM v2
|
stabilityai
|
stabilityai/stablelm-2-12b-chat
|
22aad948-bcc7-4f8f-bb42-a839e3d1be96
|
0.0.1
|
hfopenllm_v2/stabilityai_stablelm-2-12b-chat/1762652580.536706
|
1762652580.5367072
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
stabilityai/stablelm-2-12b-chat
|
stabilityai/stablelm-2-12b-chat
|
stabilityai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4081647805600252}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4672024731282805}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.05362537764350453}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26677852348993286}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3914270833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2734375}}]
|
{"precision": "bfloat16", "architecture": "StableLmForCausalLM", "params_billions": 12.143}
|
HF Open LLM v2
|
Orion-zhen
|
Orion-zhen/Qwen2.5-7B-Instruct-Uncensored
|
141239bb-c7e3-4c38-b289-12cd59f592d2
|
0.0.1
|
hfopenllm_v2/Orion-zhen_Qwen2.5-7B-Instruct-Uncensored/1762652579.808624
|
1762652579.808625
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Orion-zhen/Qwen2.5-7B-Instruct-Uncensored
|
Orion-zhen/Qwen2.5-7B-Instruct-Uncensored
|
Orion-zhen
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7204317876567508}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5473918652157296}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4773413897280967}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3028523489932886}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43613541666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4426529255319149}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
|
HF Open LLM v2
|
ToastyPigeon
|
ToastyPigeon/Sto-vo-kor-12B
|
1c795b39-a382-4315-8b6b-626423b9ccfe
|
0.0.1
|
hfopenllm_v2/ToastyPigeon_Sto-vo-kor-12B/1762652579.920128
|
1762652579.920129
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
ToastyPigeon/Sto-vo-kor-12B
|
ToastyPigeon/Sto-vo-kor-12B
|
ToastyPigeon
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5501225636865739}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5064617128925814}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.10876132930513595}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3053691275167785}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39384375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.33976063829787234}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
|
HF Open LLM v2
|
fluently-lm
|
fluently-lm/Llama-TI-8B-Instruct
|
47960f3f-b39c-4641-8a94-fb70f9a6a53f
|
0.0.1
|
hfopenllm_v2/fluently-lm_Llama-TI-8B-Instruct/1762652580.156872
|
1762652580.156876
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
fluently-lm/Llama-TI-8B-Instruct
|
fluently-lm/Llama-TI-8B-Instruct
|
fluently-lm
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7716392505219485}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5252143041749421}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.23036253776435045}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2953020134228188}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38134375000000004}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37258976063829785}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
fluently-lm
|
fluently-lm/FluentlyLM-Prinum
|
950d2518-7245-4ed4-9b16-91f944aa8f15
|
0.0.1
|
hfopenllm_v2/fluently-lm_FluentlyLM-Prinum/1762652580.156252
|
1762652580.1562529
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
fluently-lm/FluentlyLM-Prinum
|
fluently-lm/FluentlyLM-Prinum
|
fluently-lm
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.809033364805383}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7143813967889198}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5400302114803626}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38674496644295303}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44714583333333335}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5807845744680851}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 32.764}
|
HF Open LLM v2
|
rootxhacker
|
rootxhacker/Apollo_v2-32B
|
2a3e824e-8fb2-41ac-b548-30ea18ecdceb
|
0.0.1
|
hfopenllm_v2/rootxhacker_Apollo_v2-32B/1762652580.500606
|
1762652580.500606
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
rootxhacker/Apollo_v2-32B
|
rootxhacker/Apollo_v2-32B
|
rootxhacker
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4280486885907171}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7072274795963693}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42749244712990936}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3783557046979866}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4993854166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5869348404255319}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 32.764}
|
HF Open LLM v2
|
rootxhacker
|
rootxhacker/Apollo-70B
|
14421b7b-6f4d-4b4f-91e1-27a9c0919498
|
0.0.1
|
hfopenllm_v2/rootxhacker_Apollo-70B/1762652580.500333
|
1762652580.500333
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
rootxhacker/Apollo-70B
|
rootxhacker/Apollo-70B
|
rootxhacker
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5098560707810831}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6804215148524603}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5611782477341389}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45721476510067116}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4947708333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5279255319148937}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
|
HF Open LLM v2
|
rootxhacker
|
rootxhacker/apollo-7B
|
ce364468-f5ef-4a29-8026-89e455fa4350
|
0.0.1
|
hfopenllm_v2/rootxhacker_apollo-7B/1762652580.500841
|
1762652580.500842
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
rootxhacker/apollo-7B
|
rootxhacker/apollo-7B
|
rootxhacker
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29533304964161755}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3636262699883149}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0256797583081571}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2785234899328859}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41312499999999996}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17478390957446807}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
|
HF Open LLM v2
|
OpenBuddy
|
OpenBuddy/openbuddy-zero-3b-v21.2-32k
|
9d135662-43d6-4b05-90cb-5d2c856b0b89
|
0.0.1
|
hfopenllm_v2/OpenBuddy_openbuddy-zero-3b-v21.2-32k/1762652579.8057752
|
1762652579.8057752
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
OpenBuddy/openbuddy-zero-3b-v21.2-32k
|
OpenBuddy/openbuddy-zero-3b-v21.2-32k
|
OpenBuddy
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3802377691192702}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3934791831798414}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0188821752265861}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2600671140939597}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3566354166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.20337433510638298}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 4.769}
|
HF Open LLM v2
|
OpenBuddy
|
OpenBuddy/openbuddy-qwq-32b-v24.2-200k
|
24684939-5eb8-40b1-99dd-1ebe693680fc
|
0.0.1
|
hfopenllm_v2/OpenBuddy_openbuddy-qwq-32b-v24.2-200k/1762652579.8051221
|
1762652579.8051221
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
OpenBuddy/openbuddy-qwq-32b-v24.2-200k
|
OpenBuddy/openbuddy-qwq-32b-v24.2-200k
|
OpenBuddy
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5969837808126881}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6771537576509328}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3776435045317221}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3766778523489933}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.47179166666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5446309840425532}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 32.764}
|
HF Open LLM v2
|
OpenBuddy
|
OpenBuddy/openbuddy-falcon3-10b-v24.2-131k
|
19bba814-812c-49c2-acf1-9d056fd7d62d
|
0.0.1
|
hfopenllm_v2/OpenBuddy_openbuddy-falcon3-10b-v24.2-131k/1762652579.800029
|
1762652579.80003
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
OpenBuddy/openbuddy-falcon3-10b-v24.2-131k
|
OpenBuddy/openbuddy-falcon3-10b-v24.2-131k
|
OpenBuddy
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5086315420861093}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6003725722032135}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.21299093655589124}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29949664429530204}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41864583333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3833942819148936}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 10.34}
|
HF Open LLM v2
|
OpenBuddy
|
OpenBuddy/openbuddy-mixtral-7bx8-v18.1-32k
|
247ee47c-e441-4020-97e3-14e3ed8d22c9
|
0.0.1
|
hfopenllm_v2/OpenBuddy_openbuddy-mixtral-7bx8-v18.1-32k/1762652579.803262
|
1762652579.803263
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
OpenBuddy/openbuddy-mixtral-7bx8-v18.1-32k
|
OpenBuddy/openbuddy-mixtral-7bx8-v18.1-32k
|
OpenBuddy
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.549347952322061}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46561770563515265}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.10800604229607251}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30453020134228187}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3830520833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38040226063829785}}]
|
{"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 46.741}
|
HF Open LLM v2
|
OpenBuddy
|
OpenBuddy/openbuddy-nemotron-70b-v23.1-131k
|
e4e4d8f4-7e49-4b08-8a08-97e4e2c28616
|
0.0.1
|
hfopenllm_v2/OpenBuddy_openbuddy-nemotron-70b-v23.1-131k/1762652579.803536
|
1762652579.803537
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
OpenBuddy/openbuddy-nemotron-70b-v23.1-131k
|
OpenBuddy/openbuddy-nemotron-70b-v23.1-131k
|
OpenBuddy
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7555275557742346}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6749472828128272}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.32099697885196377}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36325503355704697}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45375000000000004}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5174534574468085}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
|
HF Open LLM v2
|
OpenBuddy
|
OpenBuddy/openbuddy-yi1.5-34b-v21.3-32k
|
f6a36220-0b31-4b0d-9262-7e0e508e64db
|
0.0.1
|
hfopenllm_v2/OpenBuddy_openbuddy-yi1.5-34b-v21.3-32k/1762652579.8053398
|
1762652579.805341
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
OpenBuddy/openbuddy-yi1.5-34b-v21.3-32k
|
OpenBuddy/openbuddy-yi1.5-34b-v21.3-32k
|
OpenBuddy
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5420041046645123}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6162574860411373}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1782477341389728}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.348993288590604}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44394791666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4599401595744681}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 34.407}
|
HF Open LLM v2
|
OpenBuddy
|
OpenBuddy/openbuddy-zero-14b-v22.3-32k
|
0e288116-902d-4fef-9020-a3a4dc80e698
|
0.0.1
|
hfopenllm_v2/OpenBuddy_openbuddy-zero-14b-v22.3-32k/1762652579.805548
|
1762652579.8055491
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
OpenBuddy/openbuddy-zero-14b-v22.3-32k
|
OpenBuddy/openbuddy-zero-14b-v22.3-32k
|
OpenBuddy
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37529200299649373}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4859759816473639}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.09365558912386707}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3070469798657718}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41660416666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3187333776595745}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 14.022}
|
HF Open LLM v2
|
OpenBuddy
|
OpenBuddy/openbuddy-qwq-32b-v24.1-200k
|
a2b990cd-e692-44fc-8b39-ac91eab85cef
|
0.0.1
|
hfopenllm_v2/OpenBuddy_openbuddy-qwq-32b-v24.1-200k/1762652579.804893
|
1762652579.804894
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
OpenBuddy/openbuddy-qwq-32b-v24.1-200k
|
OpenBuddy/openbuddy-qwq-32b-v24.1-200k
|
OpenBuddy
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.593661484860171}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6798496773637743}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37386706948640486}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3808724832214765}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.484875}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5490359042553191}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 32.764}
|
HF Open LLM v2
|
OpenBuddy
|
OpenBuddy/openbuddy-nemotron-70b-v23.2-131k
|
b34ca7d7-6049-4f4f-a2e3-db736009fa4d
|
0.0.1
|
hfopenllm_v2/OpenBuddy_openbuddy-nemotron-70b-v23.2-131k/1762652579.803802
|
1762652579.803806
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
OpenBuddy/openbuddy-nemotron-70b-v23.2-131k
|
OpenBuddy/openbuddy-nemotron-70b-v23.2-131k
|
OpenBuddy
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7226547782107031}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6704805157570325}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3157099697885196}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3598993288590604}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46959375000000003}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5120511968085106}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
|
HF Open LLM v2
|
OpenBuddy
|
OpenBuddy/openbuddy-zero-56b-v21.2-32k
|
7636a893-1404-4257-9778-653f3cfb601b
|
0.0.1
|
hfopenllm_v2/OpenBuddy_openbuddy-zero-56b-v21.2-32k/1762652579.8059928
|
1762652579.805994
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
OpenBuddy/openbuddy-zero-56b-v21.2-32k
|
OpenBuddy/openbuddy-zero-56b-v21.2-32k
|
OpenBuddy
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5057092957796425}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6128345897750148}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.16238670694864046}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3179530201342282}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4305208333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43991023936170215}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 56.707}
|
HF Open LLM v2
|
Sharathhebbar24
|
Sharathhebbar24/SSH_355M
|
9ff82d83-2a89-48d8-8ad0-91637a77bc76
|
0.0.1
|
hfopenllm_v2/Sharathhebbar24_SSH_355M/1762652579.8797262
|
1762652579.8797271
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Sharathhebbar24/SSH_355M
|
Sharathhebbar24/SSH_355M
|
Sharathhebbar24
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1423589409433636}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30985907344593705}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.00906344410876133}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25838926174496646}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41775}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11760305851063829}}]
|
{"precision": "float16", "architecture": "GPT2LMHeadModel", "params_billions": 0.355}
|
HF Open LLM v2
|
suayptalha
|
suayptalha/Lix-14B-v0.1
|
f4866eb3-28b0-416b-92c7-764d38905686
|
0.0.1
|
hfopenllm_v2/suayptalha_Lix-14B-v0.1/1762652580.5443048
|
1762652580.5443058
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
suayptalha/Lix-14B-v0.1
|
suayptalha/Lix-14B-v0.1
|
suayptalha
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7813313120298586}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6607910825152539}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5294561933534743}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3699664429530201}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43378125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5314162234042553}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.766}
|
HF Open LLM v2
|
suayptalha
|
suayptalha/Clarus-7B-v0.2
|
c85bdaec-43e5-4507-a615-89549901e392
|
0.0.1
|
hfopenllm_v2/suayptalha_Clarus-7B-v0.2/1762652580.542793
|
1762652580.542794
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
suayptalha/Clarus-7B-v0.2
|
suayptalha/Clarus-7B-v0.2
|
suayptalha
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7679423928509688}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5490057426751466}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48564954682779454}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30201342281879195}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44165625000000003}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4399933510638298}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.613}
|
HF Open LLM v2
|
suayptalha
|
suayptalha/Lamarckvergence-14B
|
2c918f65-3565-41f6-a9c2-d042608bc592
|
0.0.1
|
hfopenllm_v2/suayptalha_Lamarckvergence-14B/1762652580.544092
|
1762652580.544093
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
suayptalha/Lamarckvergence-14B
|
suayptalha/Lamarckvergence-14B
|
suayptalha
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7655941790006073}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.651698573892736}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5400302114803626}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36325503355704697}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44215625000000003}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5283410904255319}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.766}
|
HF Open LLM v2
|
suayptalha
|
suayptalha/Rombos-2.5-T.E-8.1
|
fa7a31f9-9c10-4f5f-a06f-e628363a726a
|
0.0.1
|
hfopenllm_v2/suayptalha_Rombos-2.5-T.E-8.1/1762652580.544959
|
1762652580.544959
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
suayptalha/Rombos-2.5-T.E-8.1
|
suayptalha/Rombos-2.5-T.E-8.1
|
suayptalha
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6925047762159957}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5514641249478369}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.49244712990936557}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.311241610738255}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41663541666666665}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4445644946808511}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
|
HF Open LLM v2
|
suayptalha
|
suayptalha/Maestro-10B
|
b302d40a-64bd-4cdd-b5fb-3a9c1dbf1406
|
0.0.1
|
hfopenllm_v2/suayptalha_Maestro-10B/1762652580.5447612
|
1762652580.5447621
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
suayptalha/Maestro-10B
|
suayptalha/Maestro-10B
|
suayptalha
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7767601076255447}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5746090622656775}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.19108761329305135}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.33305369127516776}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43972916666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42179188829787234}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 10.306}
|
HF Open LLM v2
|
suayptalha
|
suayptalha/HomerCreativeAnvita-Mix-Qw7B
|
7bb9a15a-ece4-4fb7-b0ae-dc8cf69efb6b
|
0.0.1
|
hfopenllm_v2/suayptalha_HomerCreativeAnvita-Mix-Qw7B/1762652580.543669
|
1762652580.54367
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
suayptalha/HomerCreativeAnvita-Mix-Qw7B
|
suayptalha/HomerCreativeAnvita-Mix-Qw7B
|
suayptalha
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7807816593305763}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5564653181490319}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3610271903323263}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3145973154362416}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44159375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4444813829787234}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
|
HF Open LLM v2
|
suayptalha
|
suayptalha/Falcon3-Jessi-v0.4-7B-Slerp
|
9a9cb5f7-e95a-46c5-90ed-42152fc0a617
|
0.0.1
|
hfopenllm_v2/suayptalha_Falcon3-Jessi-v0.4-7B-Slerp/1762652580.543463
|
1762652580.543463
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
suayptalha/Falcon3-Jessi-v0.4-7B-Slerp
|
suayptalha/Falcon3-Jessi-v0.4-7B-Slerp
|
suayptalha
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7676176988169169}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5590927389495824}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39652567975830816}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31208053691275167}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48121875000000003}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.406000664893617}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 7.456}
|
HF Open LLM v2
|
suayptalha
|
suayptalha/Clarus-7B-v0.3
|
21d1f676-4a7d-4305-b248-4a72d7ce0121
|
0.0.1
|
hfopenllm_v2/suayptalha_Clarus-7B-v0.3/1762652580.543006
|
1762652580.543007
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
suayptalha/Clarus-7B-v0.3
|
suayptalha/Clarus-7B-v0.3
|
suayptalha
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7509064836855099}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5525985716155296}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4879154078549849}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31208053691275167}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44022916666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4384973404255319}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
|
HF Open LLM v2
|
suayptalha
|
suayptalha/Clarus-7B-v0.1
|
b1070a2a-7694-472d-84a4-f20f4cfe1b88
|
0.0.1
|
hfopenllm_v2/suayptalha_Clarus-7B-v0.1/1762652580.542475
|
1762652580.5424771
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
suayptalha/Clarus-7B-v0.1
|
suayptalha/Clarus-7B-v0.1
|
suayptalha
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7454110648634512}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5496611433440965}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.49244712990936557}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3070469798657718}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44295833333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4387466755319149}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
|
HF Open LLM v2
|
necva
|
necva/replica-IEPile
|
86a45185-8753-4cd0-818f-63a62f03423f
|
0.0.1
|
hfopenllm_v2/necva_replica-IEPile/1762652580.389119
|
1762652580.38912
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
necva/replica-IEPile
|
necva/replica-IEPile
|
necva
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4677910167245132}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4778579652970231}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.12386706948640483}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3062080536912752}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3997604166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3560505319148936}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 4.65}
|
HF Open LLM v2
|
DavidAU
|
DavidAU/L3-SMB-Instruct-12.2B-F32
|
970cfd49-b72c-4cf5-af05-1ecfc57c94d8
|
0.0.1
|
hfopenllm_v2/DavidAU_L3-SMB-Instruct-12.2B-F32/1762652579.541919
|
1762652579.54192
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
DavidAU/L3-SMB-Instruct-12.2B-F32
|
DavidAU/L3-SMB-Instruct-12.2B-F32
|
DavidAU
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4303215468290802}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4786412360346213}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.04682779456193353}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28187919463087246}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40872916666666664}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3312001329787234}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 12.174}
|
HF Open LLM v2
|
DavidAU
|
DavidAU/L3.1-Dark-Planet-SpinFire-Uncensored-8B
|
0982d599-57c7-4eeb-bd47-844879bb79a5
|
0.0.1
|
hfopenllm_v2/DavidAU_L3.1-Dark-Planet-SpinFire-Uncensored-8B/1762652579.542578
|
1762652579.542578
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
DavidAU/L3.1-Dark-Planet-SpinFire-Uncensored-8B
|
DavidAU/L3.1-Dark-Planet-SpinFire-Uncensored-8B
|
DavidAU
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7042702252246262}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5260910165037093}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.09290030211480363}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27936241610738255}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.354125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3670212765957447}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
DavidAU
|
DavidAU/L3-Stheno-Maid-Blackroot-Grand-HORROR-16B
|
9dbf220a-cbe9-40da-814f-951205c3abbe
|
0.0.1
|
hfopenllm_v2/DavidAU_L3-Stheno-Maid-Blackroot-Grand-HORROR-16B/1762652579.542142
|
1762652579.5421429
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
DavidAU/L3-Stheno-Maid-Blackroot-Grand-HORROR-16B
|
DavidAU/L3-Stheno-Maid-Blackroot-Grand-HORROR-16B
|
DavidAU
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34389309254998957}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4736328900737677}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.02190332326283988}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2709731543624161}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40311458333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3570478723404255}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 16.537}
|
HF Open LLM v2
|
DavidAU
|
DavidAU/L3-Dark-Planet-8B
|
f5c2a2cc-392e-4337-aad9-72d65ba87aab
|
0.0.1
|
hfopenllm_v2/DavidAU_L3-Dark-Planet-8B/1762652579.5412621
|
1762652579.541263
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
DavidAU/L3-Dark-Planet-8B
|
DavidAU/L3-Dark-Planet-8B
|
DavidAU
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4134108609600305}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5084081453197787}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0823262839879154}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30033557046979864}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36159375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37367021276595747}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
DavidAU
|
DavidAU/L3-Lumimaid-12.2B-v0.1-OAS-Instruct
|
a8fe768d-f988-4fba-be80-2f5cc22dfd9d
|
0.0.1
|
hfopenllm_v2/DavidAU_L3-Lumimaid-12.2B-v0.1-OAS-Instruct/1762652579.541698
|
1762652579.5416992
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
DavidAU/L3-Lumimaid-12.2B-v0.1-OAS-Instruct
|
DavidAU/L3-Lumimaid-12.2B-v0.1-OAS-Instruct
|
DavidAU
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3924032677739509}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46930207579694677}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.04607250755287009}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27684563758389263}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41942708333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31416223404255317}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 12.174}
|
HF Open LLM v2
|
DavidAU
|
DavidAU/L3-DARKEST-PLANET-16.5B
|
2c317db5-86fa-41fd-8f1e-3cf08ba91cde
|
0.0.1
|
hfopenllm_v2/DavidAU_L3-DARKEST-PLANET-16.5B/1762652579.540939
|
1762652579.54094
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
DavidAU/L3-DARKEST-PLANET-16.5B
|
DavidAU/L3-DARKEST-PLANET-16.5B
|
DavidAU
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6230623634179533}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5230436906708896}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.08987915407854985}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2953020134228188}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3753645833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.363031914893617}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 16.537}
|
HF Open LLM v2
|
DavidAU
|
DavidAU/L3.1-MOE-2X8B-Deepseek-DeepHermes-e32-uncensored-abliterated-13.7B
|
a7df9a84-fa29-4c8e-8413-4542b5eafb63
|
0.0.1
|
hfopenllm_v2/DavidAU_L3.1-MOE-2X8B-Deepseek-DeepHermes-e32-uncensored-abliterated-13.7B/1762652579.542795
|
1762652579.5427961
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
DavidAU/L3.1-MOE-2X8B-Deepseek-DeepHermes-e32-uncensored-abliterated-13.7B
|
DavidAU/L3.1-MOE-2X8B-Deepseek-DeepHermes-e32-uncensored-abliterated-13.7B
|
DavidAU
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3345257250761313}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4420822344441435}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26057401812688824}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.313758389261745}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37486458333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2892287234042553}}]
|
{"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 13.668}
|
HF Open LLM v2
|
DavidAU
|
DavidAU/L3-Stheno-v3.2-12.2B-Instruct
|
51566db6-56e4-40bd-a248-6c968f2b83e8
|
0.0.1
|
hfopenllm_v2/DavidAU_L3-Stheno-v3.2-12.2B-Instruct/1762652579.542359
|
1762652579.54236
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
DavidAU/L3-Stheno-v3.2-12.2B-Instruct
|
DavidAU/L3-Stheno-v3.2-12.2B-Instruct
|
DavidAU
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4027945850343755}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4845980190500647}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.05060422960725076}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2751677852348993}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41025}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3345246010638298}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 12.174}
|
HF Open LLM v2
|
DavidAU
|
DavidAU/L3-Jamet-12.2B-MK.V-Blackroot-Instruct
|
85a1ef3f-7d68-4324-876d-b52cfa71317d
|
0.0.1
|
hfopenllm_v2/DavidAU_L3-Jamet-12.2B-MK.V-Blackroot-Instruct/1762652579.541475
|
1762652579.541475
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
DavidAU/L3-Jamet-12.2B-MK.V-Blackroot-Instruct
|
DavidAU/L3-Jamet-12.2B-MK.V-Blackroot-Instruct
|
DavidAU
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3961998608137519}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4765717717789398}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.04078549848942598}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2785234899328859}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40196875}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3291223404255319}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 12.174}
|
HF Open LLM v2
|
ladydaina
|
ladydaina/ECE-FDF
|
737cda34-7dea-4c68-b6a3-5b10066f9241
|
0.0.1
|
hfopenllm_v2/ladydaina_ECE-FDF/1762652580.311657
|
1762652580.311657
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
ladydaina/ECE-FDF
|
ladydaina/ECE-FDF
|
ladydaina
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3728440537773109}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5150177593278346}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.08157099697885196}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2827181208053691}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45039583333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30069813829787234}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
dreamgen
|
dreamgen/WizardLM-2-7B
|
5ed2650d-d76f-49d6-915b-ac551129913e
|
0.0.1
|
hfopenllm_v2/dreamgen_WizardLM-2-7B/1762652580.1345458
|
1762652580.134547
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
dreamgen/WizardLM-2-7B
|
dreamgen/WizardLM-2-7B
|
dreamgen
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45829842595424586}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34867856163972016}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.03323262839879154}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28691275167785235}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39409374999999996}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2660405585106383}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
Sourjayon
|
Sourjayon/DeepSeek-R1-8b-Sify
|
55a6c2c7-d29e-43a2-abd6-435117967a5d
|
0.0.1
|
hfopenllm_v2/Sourjayon_DeepSeek-R1-8b-Sify/1762652579.89035
|
1762652579.890351
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Sourjayon/DeepSeek-R1-8b-Sify
|
Sourjayon/DeepSeek-R1-8b-Sify
|
Sourjayon
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3679481553389451}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.33793580116642347}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24471299093655588}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2525167785234899}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3303125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.19805518617021275}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
Sourjayon
|
Sourjayon/DeepSeek-R1-ForumNXT
|
101d8dec-2e39-47d1-b76d-d91d6562feff
|
0.0.1
|
hfopenllm_v2/Sourjayon_DeepSeek-R1-ForumNXT/1762652579.890614
|
1762652579.890615
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Sourjayon/DeepSeek-R1-ForumNXT
|
Sourjayon/DeepSeek-R1-ForumNXT
|
Sourjayon
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26028714920854445}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3310198487331462}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25755287009063443}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27432885906040266}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3392395833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.16481050531914893}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.777}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/CheckPoint_R1
|
7eba2aef-5c97-4526-92a8-d62bd5b59b6f
|
0.0.1
|
hfopenllm_v2/LeroyDyer_CheckPoint_R1/1762652579.715039
|
1762652579.71504
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/CheckPoint_R1
|
LeroyDyer/CheckPoint_R1
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17278376928771216}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4225419506658359}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.04305135951661632}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27432885906040266}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4031458333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.22049534574468085}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_Teacher_Coder
|
64c0088b-f9e7-4a9a-b449-3e1b514370ff
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_AGI_R1_Teacher_Coder/1762652579.7256
|
1762652579.725601
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_Teacher_Coder
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_Teacher_Coder
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5081572449988254}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.47965526444811907}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0649546827794562}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2911073825503356}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4338125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28449135638297873}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_12
|
b4b57280-49db-4a07-929f-dbe2f222250c
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_12/1762652579.722054
|
1762652579.722055
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_12
|
LeroyDyer/_Spydaz_Web_AI_12
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2764985793250797}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31633960292107943}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.013595166163141994}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2684563758389262}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35815624999999995}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11369680851063829}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/LCARS_TOP_SCORE
|
04631aa2-f1fd-4aea-ba88-53b474c71fe8
|
0.0.1
|
hfopenllm_v2/LeroyDyer_LCARS_TOP_SCORE/1762652579.716028
|
1762652579.716029
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/LCARS_TOP_SCORE
|
LeroyDyer/LCARS_TOP_SCORE
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43706587410293574}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5127371051825098}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.06722054380664652}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2860738255033557}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42928125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3031083776595745}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_ChatQA
|
4e72d3b7-4ebb-470d-8f86-66d6cb28095f
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_ChatQA/1762652579.727107
|
1762652579.727108
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_ChatQA
|
LeroyDyer/_Spydaz_Web_AI_ChatQA
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1414591062824417}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.32359493837413505}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.009818731117824773}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26593959731543626}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3447291666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.14752327127659576}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/LCARS_AI_StarTrek_Computer
|
a3e19823-43ac-44ac-9dee-960a98139fa8
|
0.0.1
|
hfopenllm_v2/LeroyDyer_LCARS_AI_StarTrek_Computer/1762652579.7157388
|
1762652579.715741
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/LCARS_AI_StarTrek_Computer
|
LeroyDyer/LCARS_AI_StarTrek_Computer
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35825609383103496}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4446191188748297}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.04078549848942598}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2676174496644295}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3950208333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24584441489361702}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/SpydazWeb_AI_HumanAI_009_CHAT
|
a6d3b7b1-8834-4b74-8849-6d80381c46f5
|
0.0.1
|
hfopenllm_v2/LeroyDyer_SpydazWeb_AI_HumanAI_009_CHAT/1762652579.718692
|
1762652579.718693
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/SpydazWeb_AI_HumanAI_009_CHAT
|
LeroyDyer/SpydazWeb_AI_HumanAI_009_CHAT
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2973310815303395}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3306728717792965}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.01661631419939577}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28104026845637586}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1432845744680851}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_Top_Teacher_
|
a4beba0f-b860-4d7d-b1c3-0f569ba59171
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_Top_Teacher_/1762652579.728002
|
1762652579.728004
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_Top_Teacher_
|
LeroyDyer/_Spydaz_Web_AI_Top_Teacher_
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44038817005545283}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48909617780536035}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11555891238670694}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27768456375838924}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4366041666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3149933510638298}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_BIBLE_002
|
060f29d1-8b1d-4651-808d-b1419bd76cd9
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_BIBLE_002/1762652579.72666
|
1762652579.7266612
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_BIBLE_002
|
LeroyDyer/_Spydaz_Web_AI_BIBLE_002
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.21949538336059432}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3289070186514165}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.017371601208459216}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28439597315436244}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34069791666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.13680186170212766}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_002
|
86e8ff02-0dd2-4023-ab18-359d24a8a4fd
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_AGI_R1_002/1762652579.7226508
|
1762652579.7226508
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_002
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_002
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5306885729863429}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4682582050072746}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0581570996978852}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2684563758389262}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42546875}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28939494680851063}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_OmG_002
|
8201723e-92fb-4207-afa8-df7db794c889
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_AGI_R1_OmG_002/1762652579.7245262
|
1762652579.7245262
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_OmG_002
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_OmG_002
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.546150879665953}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4655028607746287}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.04984894259818731}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2785234899328859}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45108333333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28665226063829785}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_Math_Teacher
|
24fa44cb-86d9-4e67-be8f-42f7fc574d52
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_AGI_R1_Math_Teacher/1762652579.7241092
|
1762652579.7241101
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_Math_Teacher
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_Math_Teacher
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5772250960784053}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4805094960871836}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.054380664652567974}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2860738255033557}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5222395833333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2956283244680851}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_OmG_001
|
b13652e3-43f1-4670-94f7-1a0bbf622f33
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_AGI_R1_OmG_001/1762652579.72431
|
1762652579.724311
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_OmG_001
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_OmG_001
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5817963004827191}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4907982146977475}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.05060422960725076}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30033557046979864}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4486041666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29055851063829785}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/SpydazWeb_AI_CyberTron_Ultra_7b
|
e8b992b8-9f0a-4bfb-ab53-3b07ca1ca117
|
0.0.1
|
hfopenllm_v2/LeroyDyer_SpydazWeb_AI_CyberTron_Ultra_7b/1762652579.71707
|
1762652579.717071
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/SpydazWeb_AI_CyberTron_Ultra_7b
|
LeroyDyer/SpydazWeb_AI_CyberTron_Ultra_7b
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.15557276914143361}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48107736108561827}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.013595166163141994}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29278523489932884}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41362499999999996}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2865691489361702}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/LCARS_AI_001
|
f6b84bde-67aa-4c50-a46e-1f80605037de
|
0.0.1
|
hfopenllm_v2/LeroyDyer_LCARS_AI_001/1762652579.7152472
|
1762652579.715248
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/LCARS_AI_001
|
LeroyDyer/LCARS_AI_001
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31094495937445976}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42578875825590146}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.023413897280966767}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2634228187919463}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43836458333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2670378989361702}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/SpydazWeb_AI_HumanAI_012_INSTRUCT_MX
|
9cc77018-d090-4202-bcf5-d0031097b84e
|
0.0.1
|
hfopenllm_v2/LeroyDyer_SpydazWeb_AI_HumanAI_012_INSTRUCT_MX/1762652579.7204201
|
1762652579.720421
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/SpydazWeb_AI_HumanAI_012_INSTRUCT_MX
|
LeroyDyer/SpydazWeb_AI_HumanAI_012_INSTRUCT_MX
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3065987136353764}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3158421938604874}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.015105740181268883}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2911073825503356}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34438541666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11070478723404255}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/CheckPoint_A
|
771366a5-e227-4ff8-b60f-744020994bec
|
0.0.1
|
hfopenllm_v2/LeroyDyer_CheckPoint_A/1762652579.714355
|
1762652579.714355
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/CheckPoint_A
|
LeroyDyer/CheckPoint_A
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45127927233074905}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4747699745968042}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.05891238670694864}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2835570469798658}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4230833333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28798204787234044}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/SpydazWebAI_Human_AGI_001
|
a4c9a905-1a7c-406a-ab38-6a5e71ed0bf5
|
0.0.1
|
hfopenllm_v2/LeroyDyer_SpydazWebAI_Human_AGI_001/1762652579.716855
|
1762652579.716856
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/SpydazWebAI_Human_AGI_001
|
LeroyDyer/SpydazWebAI_Human_AGI_001
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31181930610779396}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3433421938604874}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.019637462235649546}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2986577181208054}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39939583333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.14261968085106383}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/SpydazWeb_AI_HumanAI_RP
|
a4a38b96-036f-40db-8a0b-024a36f004f5
|
0.0.1
|
hfopenllm_v2/LeroyDyer_SpydazWeb_AI_HumanAI_RP/1762652579.721039
|
1762652579.7210398
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/SpydazWeb_AI_HumanAI_RP
|
LeroyDyer/SpydazWeb_AI_HumanAI_RP
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2541168543907942}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.33230179059744286}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.01283987915407855}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2751677852348993}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3882604166666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1323969414893617}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/SpydazWeb_HumanAI_M3
|
d5dd0be3-e7a7-4636-b513-3c1d5532807f
|
0.0.1
|
hfopenllm_v2/LeroyDyer_SpydazWeb_HumanAI_M3/1762652579.721856
|
1762652579.721857
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/SpydazWeb_HumanAI_M3
|
LeroyDyer/SpydazWeb_HumanAI_M3
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1578711153073844}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31272572546166244}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.00906344410876133}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2709731543624161}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3914270833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11486037234042554}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/SpydazWeb_AI_HumanAI_011_INSTRUCT
|
bc7bf4d0-45e9-4b37-8e5f-edc92fb1bd66
|
0.0.1
|
hfopenllm_v2/LeroyDyer_SpydazWeb_AI_HumanAI_011_INSTRUCT/1762652579.719242
|
1762652579.719243
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/SpydazWeb_AI_HumanAI_011_INSTRUCT
|
LeroyDyer/SpydazWeb_AI_HumanAI_011_INSTRUCT
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3148667757106699}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3522609512356862}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.014350453172205438}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27936241610738255}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3831458333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.15949135638297873}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/CheckPoint_B
|
4e44fd55-9538-4065-8763-5d1c3d00be5d
|
0.0.1
|
hfopenllm_v2/LeroyDyer_CheckPoint_B/1762652579.7146208
|
1762652579.714622
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/CheckPoint_B
|
LeroyDyer/CheckPoint_B
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4439852923576111}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.47799475378324896}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.07175226586102719}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2902684563758389}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38984375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29072473404255317}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_OmG_Coder
|
e166fa17-c285-466e-ab2e-1eb106ebd271
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_AGI_R1_OmG_Coder/1762652579.724742
|
1762652579.724742
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_OmG_Coder
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_OmG_Coder
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4923702442851634}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46376531085099754}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.054380664652567974}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27348993288590606}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5624583333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28897938829787234}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_MasterCoder
|
85ce2909-a5f9-413a-8719-cd0a66874535
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_AGI_R1_MasterCoder/1762652579.723048
|
1762652579.723048
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_MasterCoder
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_MasterCoder
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.414259719765777}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4689417813020516}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.06117824773413897}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.276006711409396}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.47197916666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27194148936170215}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_Math_001
|
8a7df636-f1bb-4a74-bb7f-8a412edf6bd1
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_AGI_R1_Math_001/1762652579.723258
|
1762652579.723258
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_Math_001
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_Math_001
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4571492528712705}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48178882135920675}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.06948640483383686}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27684563758389263}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.47784375000000007}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2681183510638298}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_X1
|
7c72e837-92fd-4f3b-9c4f-205ffc93ac70
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_AGI_R1_X1/1762652579.7260191
|
1762652579.72602
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_X1
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_X1
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.427323944910615}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.47589342126093026}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.05664652567975831}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2600671140939597}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4231770833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2890625}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_Top_Student
|
d652c8f6-d5b4-482f-91c7-5eb9529765c1
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_AGI_R1_Top_Student/1762652579.725811
|
1762652579.725811
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_Top_Student
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_Top_Student
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6039530667517742}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.49877449828070924}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.07250755287009064}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2726510067114094}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5397916666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30244348404255317}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/SpydazWeb_AI_HumanAI_011_INSTRUCT_ML_r1
|
10d76569-edca-47db-abf2-1d0fd73df198
|
0.0.1
|
hfopenllm_v2/LeroyDyer_SpydazWeb_AI_HumanAI_011_INSTRUCT_ML_r1/1762652579.7198021
|
1762652579.7198029
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/SpydazWeb_AI_HumanAI_011_INSTRUCT_ML_r1
|
LeroyDyer/SpydazWeb_AI_HumanAI_011_INSTRUCT_ML_r1
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4049677079039171}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48583341042911066}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.05513595166163142}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29278523489932884}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3921354166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2956283244680851}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/SpydazWeb_HumanAI_M1
|
ee856df0-01ea-4f06-9323-951144c9e82f
|
0.0.1
|
hfopenllm_v2/LeroyDyer_SpydazWeb_HumanAI_M1/1762652579.721453
|
1762652579.721453
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/SpydazWeb_HumanAI_M1
|
LeroyDyer/SpydazWeb_HumanAI_M1
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3582062261466243}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35632705798398107}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.024924471299093656}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2676174496644295}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36711458333333336}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1663065159574468}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/SpydazWeb_AI_HumanAI_TextVision
|
558a0ed7-a667-421e-bbab-094b46274239
|
0.0.1
|
hfopenllm_v2/LeroyDyer_SpydazWeb_AI_HumanAI_TextVision/1762652579.7212439
|
1762652579.7212448
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/SpydazWeb_AI_HumanAI_TextVision
|
LeroyDyer/SpydazWeb_AI_HumanAI_TextVision
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3062740196013245}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.33536617928965984}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.014350453172205438}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29194630872483224}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39384375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.13871343085106383}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/SpydazWeb_AI_HumanAI_007
|
3143a635-10da-4cb5-9c2f-eae2988d9e60
|
0.0.1
|
hfopenllm_v2/LeroyDyer_SpydazWeb_AI_HumanAI_007/1762652579.718461
|
1762652579.718461
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/SpydazWeb_AI_HumanAI_007
|
LeroyDyer/SpydazWeb_AI_HumanAI_007
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3351751131442351}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3415665794743605}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.022658610271903322}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28859060402684567}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40962499999999996}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.13522273936170212}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_ChatML_002
|
07981f28-b019-42f8-b14b-44ab73ebaa0a
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_ChatML_002/1762652579.7268748
|
1762652579.726876
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_ChatML_002
|
LeroyDyer/_Spydaz_Web_AI_ChatML_002
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24122772022677608}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3106383598957094}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.011329305135951661}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2575503355704698}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3623125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.10945811170212766}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/SpydazWeb_HumanAI_M2
|
4ea0436d-6ec9-40db-af56-2f7f1b0317df
|
0.0.1
|
hfopenllm_v2/LeroyDyer_SpydazWeb_HumanAI_M2/1762652579.7216609
|
1762652579.721662
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/SpydazWeb_HumanAI_M2
|
LeroyDyer/SpydazWeb_HumanAI_M2
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3750171766468526}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39308772552915555}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.028700906344410877}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27936241610738255}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3751458333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2010472074468085}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_MUSR
|
285688d5-c7ad-437b-a54c-9e6108d85267
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_AGI_R1_MUSR/1762652579.722848
|
1762652579.7228491
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_MUSR
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_MUSR
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.478606763387811}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4671769411194033}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.06042296072507553}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28439597315436244}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48689583333333336}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2828291223404255}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/SpydazWeb_AI_HumanAI_006
|
cdbebbea-4749-472b-8cec-5da5ffa96d65
|
0.0.1
|
hfopenllm_v2/LeroyDyer_SpydazWeb_AI_HumanAI_006/1762652579.718229
|
1762652579.71823
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/SpydazWeb_AI_HumanAI_006
|
LeroyDyer/SpydazWeb_AI_HumanAI_006
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.14300832901146734}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3301800420981355}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.010574018126888218}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2802013422818792}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3567916666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11353058510638298}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_AGI_RP_R1
|
fd4405cf-9849-4606-a01c-a20459198853
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_AGI_RP_R1/1762652579.726439
|
1762652579.72644
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_AGI_RP_R1
|
LeroyDyer/_Spydaz_Web_AI_AGI_RP_R1
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5426036250482054}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4701061648636955}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.06042296072507553}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26929530201342283}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42013541666666665}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28939494680851063}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/Mixtral_AI_SwahiliTron_7b
|
4f5fadb6-5fad-4b82-a027-1d4f497dc476
|
0.0.1
|
hfopenllm_v2/LeroyDyer_Mixtral_AI_SwahiliTron_7b/1762652579.716297
|
1762652579.716299
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/Mixtral_AI_SwahiliTron_7b
|
LeroyDyer/Mixtral_AI_SwahiliTron_7b
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1533996462718919}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3055092453201354}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.013595166163141994}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2651006711409396}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34203125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.12076130319148937}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_Math_Student
|
89f92d24-19c1-4021-819d-9c7ed717046c
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_AGI_R1_Math_Student/1762652579.723874
|
1762652579.723874
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_Math_Student
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_Math_Student
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5735781060918363}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48808115770970123}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0513595166163142}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2902684563758389}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.50975}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.292719414893617}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_Math_003
|
79336acd-d465-4938-af7f-f7a688f46fd4
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_AGI_R1_Math_003/1762652579.723467
|
1762652579.723468
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_Math_003
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_Math_003
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6200148938150774}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4755509035158693}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.06948640483383686}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28104026845637586}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42019791666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29986702127659576}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_ChatQA_003
|
471aac2a-5c4b-4b1b-a56b-490fafc444d8
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_ChatQA_003/1762652579.727351
|
1762652579.7273521
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_ChatQA_003
|
LeroyDyer/_Spydaz_Web_AI_ChatQA_003
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.22091938279321088}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3171811407815537}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.010574018126888218}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2709731543624161}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38184375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11328125}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/SpydazWeb_AI_HumanAGI_002
|
3a6cfbae-80c1-4ec6-9c14-1ddeeb6e7138
|
0.0.1
|
hfopenllm_v2/LeroyDyer_SpydazWeb_AI_HumanAGI_002/1762652579.71767
|
1762652579.7176719
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/SpydazWeb_AI_HumanAGI_002
|
LeroyDyer/SpydazWeb_AI_HumanAGI_002
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40876430094371824}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5043871825389313}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.06646525679758308}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28691275167785235}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48648958333333336}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3058510638297872}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_Student_Coder
|
1e7531fc-9f12-4c7c-8bf5-44511c37c23b
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_AGI_R1_Student_Coder/1762652579.725384
|
1762652579.725385
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_Student_Coder
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_Student_Coder
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5449518388985669}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4650844324968853}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.06570996978851963}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28439597315436244}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43883333333333335}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27684507978723405}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/LCARS_AI_1x4_003_SuperAI
|
db8614eb-2b53-460c-a80b-dceb47a9703f
|
0.0.1
|
hfopenllm_v2/LeroyDyer_LCARS_AI_1x4_003_SuperAI/1762652579.7154438
|
1762652579.715445
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/LCARS_AI_1x4_003_SuperAI
|
LeroyDyer/LCARS_AI_1x4_003_SuperAI
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41111251479407973}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.49198503573704794}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.05740181268882175}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2827181208053691}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4506145833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29720744680851063}}]
|
{"precision": "float16", "architecture": "MixtralForCausalLM", "params_billions": 24.154}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/SpydazWeb_AI_HumanAI_011_INSTRUCT_ML
|
fbd83964-530c-4d0e-a305-9f8451affb23
|
0.0.1
|
hfopenllm_v2/LeroyDyer_SpydazWeb_AI_HumanAI_011_INSTRUCT_ML/1762652579.719551
|
1762652579.719552
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/SpydazWeb_AI_HumanAI_011_INSTRUCT_ML
|
LeroyDyer/SpydazWeb_AI_HumanAI_011_INSTRUCT_ML
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37524213531208306}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39840187861283577}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0256797583081571}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29278523489932884}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42391666666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2018783244680851}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_001
|
51d4724b-c85c-4ad4-a4bd-9be93cd99a2a
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_AGI_R1_001/1762652579.72245
|
1762652579.722451
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_001
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_001
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4505046609662362}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4609124425176902}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0634441087613293}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2676174496644295}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42559375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2734375}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_14
|
6233aac6-0ce3-4f3c-8ee0-87d2482d3ea2
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_14/1762652579.722256
|
1762652579.722257
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_14
|
LeroyDyer/_Spydaz_Web_AI_14
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1811770546594148}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2988848127354542}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.012084592145015106}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26593959731543626}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3395208333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11394614361702128}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_TEMP_
|
f44f513c-0814-4f3b-94a4-9e28318da40e
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_TEMP_/1762652579.7275891
|
1762652579.7275898
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_TEMP_
|
LeroyDyer/_Spydaz_Web_AI_TEMP_
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.47953097780555587}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.495695749059555}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.12386706948640483}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27936241610738255}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42175}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3120844414893617}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_Math_AdvancedStudent
|
ed000ee0-4193-46c4-8114-2ea3dbfec9f7
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_AGI_R1_Math_AdvancedStudent/1762652579.7236722
|
1762652579.7236722
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_Math_AdvancedStudent
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_Math_AdvancedStudent
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5950854842927876}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4927473238025393}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.054380664652567974}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29194630872483224}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5198229166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2999501329787234}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/SpydazWeb_AI_HumanAGI_001_M2
|
daa704a9-2eed-4549-a847-3606c9e8a733
|
0.0.1
|
hfopenllm_v2/LeroyDyer_SpydazWeb_AI_HumanAGI_001_M2/1762652579.71728
|
1762652579.717281
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/SpydazWeb_AI_HumanAGI_001_M2
|
LeroyDyer/SpydazWeb_AI_HumanAGI_001_M2
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39395138233221183}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4888172059118469}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.03851963746223565}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28942953020134227}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4503020833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.300531914893617}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_X2
|
169fe3b3-527a-408f-9442-5bc3616cc320
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_AGI_R1_X2/1762652579.7262201
|
1762652579.726221
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_X2
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_X2
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5433782364127182}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4785559277736029}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.06117824773413897}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2978187919463087}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46953125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29205452127659576}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_OmG_MathMaster
|
a79378f7-01b3-4bf0-8b76-2e670d2a7366
|
0.0.1
|
hfopenllm_v2/LeroyDyer__Spydaz_Web_AI_AGI_R1_OmG_MathMaster/1762652579.7251709
|
1762652579.7251709
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_OmG_MathMaster
|
LeroyDyer/_Spydaz_Web_AI_AGI_R1_OmG_MathMaster
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5558429411738631}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.47422312505675873}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.05362537764350453}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.287751677852349}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45098958333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2672041223404255}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
LeroyDyer
|
LeroyDyer/SpydazWebAI_Human_AGI
|
8e1f811e-3e86-4440-a5dd-bf607aa02ad6
|
0.0.1
|
hfopenllm_v2/LeroyDyer_SpydazWebAI_Human_AGI/1762652579.7166212
|
1762652579.716622
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
LeroyDyer/SpydazWebAI_Human_AGI
|
LeroyDyer/SpydazWebAI_Human_AGI
|
LeroyDyer
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3388221031308041}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3374862127508733}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.014350453172205438}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2827181208053691}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39663541666666663}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1478557180851064}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.