_leaderboard
stringclasses 1
value | _developer
stringclasses 559
values | _model
stringlengths 9
102
| _uuid
stringlengths 36
36
| schema_version
stringclasses 1
value | evaluation_id
stringlengths 35
133
| retrieved_timestamp
stringlengths 13
18
| source_data
stringclasses 1
value | evaluation_source_name
stringclasses 1
value | evaluation_source_type
stringclasses 1
value | source_organization_name
stringclasses 1
value | source_organization_url
null | source_organization_logo_url
null | evaluator_relationship
stringclasses 1
value | model_name
stringlengths 4
102
| model_id
stringlengths 9
102
| model_developer
stringclasses 559
values | model_inference_platform
stringclasses 1
value | evaluation_results
stringlengths 1.35k
1.41k
| additional_details
stringclasses 660
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
HF Open LLM v2
|
bond005
|
bond005/meno-tiny-0.1
|
109acb38-3026-4573-b082-8277b9501f09
|
0.0.1
|
hfopenllm_v2/bond005_meno-tiny-0.1/1762652580.035417
|
1762652580.035417
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
bond005/meno-tiny-0.1
|
bond005/meno-tiny-0.1
|
bond005
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45497613000172876}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4262909130965971}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.13897280966767372}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28187919463087246}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4184583333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2785904255319149}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.544}
|
HF Open LLM v2
|
4season
|
4season/final_model_test_v2
|
74973e37-cd82-4e8a-816a-02b035fabff4
|
0.0.1
|
hfopenllm_v2/4season_final_model_test_v2/1762652579.4714398
|
1762652579.4714408
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
4season/final_model_test_v2
|
4season/final_model_test_v2
|
4season
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3191132860809319}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6342049783295018}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.08383685800604229}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3271812080536913}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4314479166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3528091755319149}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 21.421}
|
HF Open LLM v2
|
ZeusLabs
|
ZeusLabs/L3-Aethora-15B-V2
|
0e9ed58c-1a3e-49b4-8013-994642a95920
|
0.0.1
|
hfopenllm_v2/ZeusLabs_L3-Aethora-15B-V2/1762652579.968798
|
1762652579.9687989
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
ZeusLabs/L3-Aethora-15B-V2
|
ZeusLabs/L3-Aethora-15B-V2
|
ZeusLabs
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7208063493752133}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5010910465463698}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.08081570996978851}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.287751677852349}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3870833333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3499833776595745}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 15.01}
|
HF Open LLM v2
|
GoToCompany
|
GoToCompany/gemma2-9b-cpt-sahabatai-v1-instruct
|
68ff0a5c-9e76-410b-a4e3-4b7de0e7fe35
|
0.0.1
|
hfopenllm_v2/GoToCompany_gemma2-9b-cpt-sahabatai-v1-instruct/1762652579.628178
|
1762652579.628178
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
GoToCompany/gemma2-9b-cpt-sahabatai-v1-instruct
|
GoToCompany/gemma2-9b-cpt-sahabatai-v1-instruct
|
GoToCompany
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6550607942481504}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5954551751157878}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2054380664652568}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3347315436241611}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4778645833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4263630319148936}}]
|
{"precision": "bfloat16", "architecture": "Gemma2ForCausalLM", "params_billions": 9.242}
|
HF Open LLM v2
|
GoToCompany
|
GoToCompany/llama3-8b-cpt-sahabatai-v1-instruct
|
aa363693-a300-4545-b7f3-05492646c202
|
0.0.1
|
hfopenllm_v2/GoToCompany_llama3-8b-cpt-sahabatai-v1-instruct/1762652579.628486
|
1762652579.628489
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
GoToCompany/llama3-8b-cpt-sahabatai-v1-instruct
|
GoToCompany/llama3-8b-cpt-sahabatai-v1-instruct
|
GoToCompany
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.523844510343666}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4951292004509417}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.12764350453172205}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26677852348993286}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44884375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3453291223404255}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
internlm
|
internlm/internlm2_5-20b-chat
|
a651c814-41e2-4951-bb8f-df799cc6e470
|
0.0.1
|
hfopenllm_v2/internlm_internlm2_5-20b-chat/1762652580.2279649
|
1762652580.227966
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
internlm/internlm2_5-20b-chat
|
internlm/internlm2_5-20b-chat
|
internlm
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7009977969565198}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7473580533672884}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4078549848942598}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3213087248322148}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4558229166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39976728723404253}}]
|
{"precision": "bfloat16", "architecture": "InternLM2ForCausalLM", "params_billions": 19.86}
|
HF Open LLM v2
|
internlm
|
internlm/internlm2_5-1_8b-chat
|
d37e87e2-53c3-42fa-b78d-04d2819b14d3
|
0.0.1
|
hfopenllm_v2/internlm_internlm2_5-1_8b-chat/1762652580.227762
|
1762652580.227763
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
internlm/internlm2_5-1_8b-chat
|
internlm/internlm2_5-1_8b-chat
|
internlm
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38490870889240547}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4488926786996439}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.15861027190332327}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2902684563758389}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35939583333333336}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.12990359042553193}}]
|
{"precision": "bfloat16", "architecture": "InternLM2ForCausalLM", "params_billions": 1.89}
|
HF Open LLM v2
|
internlm
|
internlm/internlm2-chat-1_8b
|
767b5c7e-6319-487f-906c-2abca794f884
|
0.0.1
|
hfopenllm_v2/internlm_internlm2-chat-1_8b/1762652580.227562
|
1762652580.227563
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
internlm/internlm2-chat-1_8b
|
internlm/internlm2-chat-1_8b
|
internlm
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2386545477111841}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4452271664119214}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0324773413897281}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26593959731543626}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36305208333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.18392619680851063}}]
|
{"precision": "bfloat16", "architecture": "InternLM2ForCausalLM", "params_billions": 1.889}
|
HF Open LLM v2
|
internlm
|
internlm/internlm2-7b
|
d4bba57d-2a3c-4945-ae47-7830840d0259
|
0.0.1
|
hfopenllm_v2/internlm_internlm2-7b/1762652580.2273018
|
1762652580.227303
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
internlm/internlm2-7b
|
internlm/internlm2-7b
|
internlm
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.22803680981595092}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5825}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.08571428571428572}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.33666666666666667}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43999999999999995}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.19}}]
|
{"precision": "float16", "architecture": "Unknown", "params_billions": 0.0}
|
HF Open LLM v2
|
internlm
|
internlm/internlm2-1_8b
|
fc23ef4f-2ef1-4a3e-b029-9d646145e135
|
0.0.1
|
hfopenllm_v2/internlm_internlm2-1_8b/1762652580.227062
|
1762652580.227063
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
internlm/internlm2-1_8b
|
internlm/internlm2-1_8b
|
internlm
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2197702097102355}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3879732800028095}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.021148036253776436}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2483221476510067}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38128125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.15882646276595744}}]
|
{"precision": "bfloat16", "architecture": "InternLM2ForCausalLM", "params_billions": 8.0}
|
HF Open LLM v2
|
internlm
|
internlm/internlm2_5-7b-chat
|
28245528-26e8-48a8-9cc8-68d7a6389bde
|
0.0.1
|
hfopenllm_v2/internlm_internlm2_5-7b-chat/1762652580.2281651
|
1762652580.2281659
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
internlm/internlm2_5-7b-chat
|
internlm/internlm2_5-7b-chat
|
internlm
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5538692890419642}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7073179916851792}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25302114803625375}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34731543624161076}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45938541666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3776595744680851}}]
|
{"precision": "float16", "architecture": "InternLM2ForCausalLM", "params_billions": 7.738}
|
HF Open LLM v2
|
ibivibiv
|
ibivibiv/multimaster-7b-v6
|
7044a4d4-1c07-40ef-917c-d242b61d7877
|
0.0.1
|
hfopenllm_v2/ibivibiv_multimaster-7b-v6/1762652580.205187
|
1762652580.205188
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
ibivibiv/multimaster-7b-v6
|
ibivibiv/multimaster-7b-v6
|
ibivibiv
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4473075883101283}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.519351871026721}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.055891238670694864}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3036912751677852}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43957291666666665}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30950797872340424}}]
|
{"precision": "float16", "architecture": "MixtralForCausalLM", "params_billions": 35.428}
|
HF Open LLM v2
|
ibivibiv
|
ibivibiv/colossus_120b
|
f0bcf710-b1a8-4736-9fd3-6b0ea241155e
|
0.0.1
|
hfopenllm_v2/ibivibiv_colossus_120b/1762652580.2048829
|
1762652580.204884
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
ibivibiv/colossus_120b
|
ibivibiv/colossus_120b
|
ibivibiv
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42759877126025614}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6061408586494191}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.05664652567975831}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3087248322147651}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4733125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3961103723404255}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 117.749}
|
HF Open LLM v2
|
speakleash
|
speakleash/Bielik-11B-v2.3-Instruct
|
822b7413-b84e-4df0-8aca-cc0e95283a86
|
0.0.1
|
hfopenllm_v2/speakleash_Bielik-11B-v2.3-Instruct/1762652580.534104
|
1762652580.534104
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
speakleash/Bielik-11B-v2.3-Instruct
|
speakleash/Bielik-11B-v2.3-Instruct
|
speakleash
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.558290890393046}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5662699020280031}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2084592145015106}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34060402684563756}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4518229166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34441489361702127}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 11.169}
|
HF Open LLM v2
|
speakleash
|
speakleash/Bielik-11B-v2.2-Instruct
|
70c377ab-41b4-4c30-ade6-65cc52ab916a
|
0.0.1
|
hfopenllm_v2/speakleash_Bielik-11B-v2.2-Instruct/1762652580.533901
|
1762652580.5339022
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
speakleash/Bielik-11B-v2.2-Instruct
|
speakleash/Bielik-11B-v2.2-Instruct
|
speakleash
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5551935531057595}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5596561190863629}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2681268882175227}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3313758389261745}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41712499999999997}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3486535904255319}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 11.169}
|
HF Open LLM v2
|
speakleash
|
speakleash/Bielik-11B-v2.0-Instruct
|
4aaff24b-0364-4cc9-9680-5f5c6d04128b
|
0.0.1
|
hfopenllm_v2/speakleash_Bielik-11B-v2.0-Instruct/1762652580.533494
|
1762652580.533494
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
speakleash/Bielik-11B-v2.0-Instruct
|
speakleash/Bielik-11B-v2.0-Instruct
|
speakleash
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5252430218486948}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5361579931173499}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11858006042296072}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31711409395973156}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4467083333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3351063829787234}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 11.169}
|
HF Open LLM v2
|
speakleash
|
speakleash/Bielik-11B-v2
|
680f5fa0-fb15-4687-a40b-7807af2e0fe5
|
0.0.1
|
hfopenllm_v2/speakleash_Bielik-11B-v2/1762652580.533211
|
1762652580.533211
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
speakleash/Bielik-11B-v2
|
speakleash/Bielik-11B-v2
|
speakleash
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.23810489501190177}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.49308409091594996}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.07854984894259819}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28859060402684567}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39244791666666673}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3137466755319149}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 11.169}
|
HF Open LLM v2
|
speakleash
|
speakleash/Bielik-11B-v2.1-Instruct
|
834e5703-00f3-47d6-817f-cf039c53d915
|
0.0.1
|
hfopenllm_v2/speakleash_Bielik-11B-v2.1-Instruct/1762652580.533698
|
1762652580.533698
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
speakleash/Bielik-11B-v2.1-Instruct
|
speakleash/Bielik-11B-v2.1-Instruct
|
speakleash
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5089817240477489}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5530119844151298}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26661631419939574}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.337248322147651}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4185208333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34466422872340424}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 11.169}
|
HF Open LLM v2
|
ZeroXClem
|
ZeroXClem/L3-Aspire-Heart-Matrix-8B
|
e6d8d952-5a3d-4a97-860c-8275b10c6516
|
0.0.1
|
hfopenllm_v2/ZeroXClem_L3-Aspire-Heart-Matrix-8B/1762652579.96632
|
1762652579.966321
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
ZeroXClem/L3-Aspire-Heart-Matrix-8B
|
ZeroXClem/L3-Aspire-Heart-Matrix-8B
|
ZeroXClem
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48335305877294465}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5384211938486898}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.18277945619335348}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.32466442953020136}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4187083333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3784906914893617}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
AI-Sweden-Models
|
AI-Sweden-Models/Llama-3-8B-instruct
|
1d68bd2e-de6e-4327-a8f1-33322eba537e
|
0.0.1
|
hfopenllm_v2/AI-Sweden-Models_Llama-3-8B-instruct/1762652579.474785
|
1762652579.474786
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
AI-Sweden-Models/Llama-3-8B-instruct
|
AI-Sweden-Models/Llama-3-8B-instruct
|
AI-Sweden-Models
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24012841482821137}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4173460154515302}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.03851963746223565}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26593959731543626}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.47709375000000004}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25972406914893614}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
yifAI
|
yifAI/Llama-3-8B-Instruct-SPPO-score-Iter3_gp_8b-table-0.002
|
79fad1b7-c458-4f89-9d7a-d58f70ba6c90
|
0.0.1
|
hfopenllm_v2/yifAI_Llama-3-8B-Instruct-SPPO-score-Iter3_gp_8b-table-0.002/1762652580.6077929
|
1762652580.607796
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
yifAI/Llama-3-8B-Instruct-SPPO-score-Iter3_gp_8b-table-0.002
|
yifAI/Llama-3-8B-Instruct-SPPO-score-Iter3_gp_8b-table-0.002
|
yifAI
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6489658550423987}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.49145217071254876}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0755287009063444}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26174496644295303}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38987499999999997}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3519780585106383}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
MTSAIR
|
MTSAIR/Cotype-Nano
|
b5fa19ff-9b05-4d71-9d79-54f8dfe4a8ab
|
0.0.1
|
hfopenllm_v2/MTSAIR_Cotype-Nano/1762652579.742943
|
1762652579.742944
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
MTSAIR/Cotype-Nano
|
MTSAIR/Cotype-Nano
|
MTSAIR
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3747922179816221}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3864940969601492}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.09743202416918428}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2701342281879195}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3289166666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24767287234042554}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.544}
|
HF Open LLM v2
|
MTSAIR
|
MTSAIR/MultiVerse_70B
|
a713dba7-110a-40a0-9d89-d48567d423af
|
0.0.1
|
hfopenllm_v2/MTSAIR_MultiVerse_70B/1762652579.743202
|
1762652579.7432032
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
MTSAIR/MultiVerse_70B
|
MTSAIR/MultiVerse_70B
|
MTSAIR
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5249183278146429}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6183134284931178}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.19259818731117825}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3540268456375839}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.47398958333333335}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48603723404255317}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 72.289}
|
HF Open LLM v2
|
Edgerunners
|
Edgerunners/meta-llama-3-8b-instruct-hf-ortho-baukit-34fail-3000total-bf16
|
1e2cd0e7-ce74-4eac-86fb-64412d1d2094
|
0.0.1
|
hfopenllm_v2/Edgerunners_meta-llama-3-8b-instruct-hf-ortho-baukit-34fail-3000total-bf16/1762652579.592541
|
1762652579.592542
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Edgerunners/meta-llama-3-8b-instruct-hf-ortho-baukit-34fail-3000total-bf16
|
Edgerunners/meta-llama-3-8b-instruct-hf-ortho-baukit-34fail-3000total-bf16
|
Edgerunners
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7147114101694614}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4979908369885237}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.09063444108761329}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2600671140939597}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.33415625}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36361369680851063}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
ECE-ILAB-PRYMMAL
|
ECE-ILAB-PRYMMAL/ILAB-Merging-3B-V2
|
cbdf2130-1b6a-43ae-a503-4fc7acf14a76
|
0.0.1
|
hfopenllm_v2/ECE-ILAB-PRYMMAL_ILAB-Merging-3B-V2/1762652579.5918348
|
1762652579.591836
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
ECE-ILAB-PRYMMAL/ILAB-Merging-3B-V2
|
ECE-ILAB-PRYMMAL/ILAB-Merging-3B-V2
|
ECE-ILAB-PRYMMAL
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40289432040319684}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5401935891431586}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.15181268882175228}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3053691275167785}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43321875}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38605385638297873}}]
|
{"precision": "bfloat16", "architecture": "Phi3ForCausalLM", "params_billions": 3.821}
|
HF Open LLM v2
|
informatiker
|
informatiker/Qwen2-7B-Instruct-abliterated
|
be1ab009-3aa6-43da-8b8e-11e5287a0370
|
0.0.1
|
hfopenllm_v2/informatiker_Qwen2-7B-Instruct-abliterated/1762652580.2263439
|
1762652580.226345
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
informatiker/Qwen2-7B-Instruct-abliterated
|
informatiker/Qwen2-7B-Instruct-abliterated
|
informatiker
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5821708622011817}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5534265515936739}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.263595166163142}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3011744966442953}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38879166666666665}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3873005319148936}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
|
HF Open LLM v2
|
beowolx
|
beowolx/CodeNinja-1.0-OpenChat-7B
|
fbe7d86c-8d1e-474a-bf85-35a139bdb08f
|
0.0.1
|
hfopenllm_v2/beowolx_CodeNinja-1.0-OpenChat-7B/1762652580.030703
|
1762652580.030704
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
beowolx/CodeNinja-1.0-OpenChat-7B
|
beowolx/CodeNinja-1.0-OpenChat-7B
|
beowolx
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5446770125489258}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4441338669403703}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.06722054380664652}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29446308724832215}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42432291666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3015292553191489}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
AGI-0
|
AGI-0/Art-v0-3B
|
162b6d5f-f983-4989-9603-f6baea26b633
|
0.0.1
|
hfopenllm_v2/AGI-0_Art-v0-3B/1762652579.473539
|
1762652579.47354
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
AGI-0/Art-v0-3B
|
AGI-0/Art-v0-3B
|
AGI-0
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.319238509377341}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3400959483013824}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24622356495468278}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25922818791946306}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3768229166666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11785239361702128}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 3.086}
|
HF Open LLM v2
|
CausalLM
|
CausalLM/34b-beta
|
cc482ca4-031a-4c22-90c2-68322184125b
|
0.0.1
|
hfopenllm_v2/CausalLM_34b-beta/1762652579.502916
|
1762652579.502916
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
CausalLM/34b-beta
|
CausalLM/34b-beta
|
CausalLM
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3043247472262486}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5590996102136266}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.04833836858006042}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3464765100671141}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37486458333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5324966755319149}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 34.389}
|
HF Open LLM v2
|
CausalLM
|
CausalLM/14B
|
c4376867-854d-44fa-9215-b9c1af7612a4
|
0.0.1
|
hfopenllm_v2/CausalLM_14B/1762652579.502646
|
1762652579.502647
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
CausalLM/14B
|
CausalLM/14B
|
CausalLM
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2788213052478535}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4700462397700626}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0755287009063444}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3028523489932886}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4154791666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3221409574468085}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 14.0}
|
HF Open LLM v2
|
CausalLM
|
CausalLM/preview-1-hf
|
e9fcf09c-14e2-4226-b1e5-b5752ac1a753
|
0.0.1
|
hfopenllm_v2/CausalLM_preview-1-hf/1762652579.503128
|
1762652579.503129
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
CausalLM/preview-1-hf
|
CausalLM/preview-1-hf
|
CausalLM
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5558928088582737}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3614567463880903}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.030211480362537766}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26174496644295303}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34218750000000003}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35970744680851063}}]
|
{"precision": "bfloat16", "architecture": "GlmForCausalLM", "params_billions": 9.543}
|
HF Open LLM v2
|
NucleusAI
|
NucleusAI/nucleus-22B-token-500B
|
f18c51de-f5eb-4986-8c44-35bd71db5e8b
|
0.0.1
|
hfopenllm_v2/NucleusAI_nucleus-22B-token-500B/1762652579.7966561
|
1762652579.7966561
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
NucleusAI/nucleus-22B-token-500B
|
NucleusAI/nucleus-22B-token-500B
|
NucleusAI
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.025654153202391873}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29198007801214715}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3510520833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11619015957446809}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 21.828}
|
HF Open LLM v2
|
frameai
|
frameai/Loxa-4B
|
b8ac82ef-a231-43ee-aaf2-23b0830cfbc3
|
0.0.1
|
hfopenllm_v2/frameai_Loxa-4B/1762652580.160984
|
1762652580.160984
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
frameai/Loxa-4B
|
frameai/Loxa-4B
|
frameai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.47648350820268}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42171373309002896}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1095166163141994}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2835570469798658}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.33765625}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28016954787234044}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 4.018}
|
HF Open LLM v2
|
rhymes-ai
|
rhymes-ai/Aria
|
611c449e-3d86-4dea-94a8-a2b7719fa1ae
|
0.0.1
|
hfopenllm_v2/rhymes-ai_Aria/1762652580.4949272
|
1762652580.494928
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
rhymes-ai/Aria
|
rhymes-ai/Aria
|
rhymes-ai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4773079872516035}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5695312446413633}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1933534743202417}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3624161073825503}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44049202127659576}}]
|
{"precision": "bfloat16", "architecture": "AriaForConditionalGeneration", "params_billions": 25.307}
|
HF Open LLM v2
|
Baptiste-HUVELLE-10
|
Baptiste-HUVELLE-10/LeTriomphant2.2_ECE_iLAB
|
b1632b15-fa00-4476-b3f4-05aba95df664
|
0.0.1
|
hfopenllm_v2/Baptiste-HUVELLE-10_LeTriomphant2.2_ECE_iLAB/1762652579.4943
|
1762652579.4943008
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Baptiste-HUVELLE-10/LeTriomphant2.2_ECE_iLAB
|
Baptiste-HUVELLE-10/LeTriomphant2.2_ECE_iLAB
|
Baptiste-HUVELLE-10
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5076330802271307}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7256319952414622}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44486404833836857}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39932885906040266}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46255208333333336}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5851063829787234}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 72.706}
|
HF Open LLM v2
|
h2oai
|
h2oai/h2o-danube-1.8b-chat
|
ac8f78b5-a9e1-4e17-a1e7-8a7b8dc22a8d
|
0.0.1
|
hfopenllm_v2/h2oai_h2o-danube-1.8b-chat/1762652580.188648
|
1762652580.188649
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
h2oai/h2o-danube-1.8b-chat
|
h2oai/h2o-danube-1.8b-chat
|
h2oai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2198699450790569}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3219657593234448}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.013595166163141994}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25419463087248323}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3988645833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.13139960106382978}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 1.831}
|
HF Open LLM v2
|
h2oai
|
h2oai/h2o-danube3-4b-base
|
3878bb0d-753f-465a-a8c1-8408f8f5bfcf
|
0.0.1
|
hfopenllm_v2/h2oai_h2o-danube3-4b-base/1762652580.18891
|
1762652580.1889112
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
h2oai/h2o-danube3-4b-base
|
h2oai/h2o-danube3-4b-base
|
h2oai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.23380851695722904}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3599083951265592}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.022658610271903322}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2911073825503356}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37781250000000005}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2109375}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 3.962}
|
HF Open LLM v2
|
h2oai
|
h2oai/h2o-danube3-4b-chat
|
d3df3cb7-5e79-49e5-9ed1-1e2771318915
|
0.0.1
|
hfopenllm_v2/h2oai_h2o-danube3-4b-chat/1762652580.1891232
|
1762652580.189124
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
h2oai/h2o-danube3-4b-chat
|
h2oai/h2o-danube3-4b-chat
|
h2oai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3628771659197596}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3466170643135169}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.04078549848942598}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2600671140939597}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.378125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.22282247340425532}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 3.962}
|
HF Open LLM v2
|
h2oai
|
h2oai/h2o-danube3-500m-chat
|
c917765b-a4b4-4e5d-9c11-eed791349daf
|
0.0.1
|
hfopenllm_v2/h2oai_h2o-danube3-500m-chat/1762652580.1893299
|
1762652580.1893299
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
h2oai/h2o-danube3-500m-chat
|
h2oai/h2o-danube3-500m-chat
|
h2oai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2207941594968018}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3034691168308313}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.01661631419939577}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.23070469798657717}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34339583333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11436170212765957}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 0.514}
|
HF Open LLM v2
|
h2oai
|
h2oai/h2o-danube3.1-4b-chat
|
5f5d83bd-91e9-416b-b40d-506f3861ed3f
|
0.0.1
|
hfopenllm_v2/h2oai_h2o-danube3.1-4b-chat/1762652580.189556
|
1762652580.189557
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
h2oai/h2o-danube3.1-4b-chat
|
h2oai/h2o-danube3.1-4b-chat
|
h2oai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5021121734774842}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3608421638178268}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.03323262839879154}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28523489932885904}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41015625}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2718583776595745}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 3.962}
|
HF Open LLM v2
|
cgato
|
cgato/TheSalt-L3-8b-v0.3.2
|
aa805bcc-3847-40b5-86eb-397982106d18
|
0.0.1
|
hfopenllm_v2/cgato_TheSalt-L3-8b-v0.3.2/1762652580.100134
|
1762652580.100136
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
cgato/TheSalt-L3-8b-v0.3.2
|
cgato/TheSalt-L3-8b-v0.3.2
|
cgato
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27050337548814923}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29679653176003074}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.04758308157099698}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26593959731543626}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38962499999999994}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11394614361702128}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
Artples
|
Artples/L-MChat-7b
|
7aeaf034-1c02-4da7-b7b4-9a27ce759601
|
0.0.1
|
hfopenllm_v2/Artples_L-MChat-7b/1762652579.482251
|
1762652579.482251
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Artples/L-MChat-7b
|
Artples/L-MChat-7b
|
Artples
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5296646231997766}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46003301674679414}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.09214501510574018}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3053691275167785}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4028645833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3298703457446808}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
Artples
|
Artples/L-MChat-Small
|
0e5a84e3-b90f-4c20-ad58-4d1cf3517f28
|
0.0.1
|
hfopenllm_v2/Artples_L-MChat-Small/1762652579.4824991
|
1762652579.4825
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Artples/L-MChat-Small
|
Artples/L-MChat-Small
|
Artples
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.32870561222002065}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48225627665257265}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0377643504531722}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2676174496644295}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36959375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24642619680851063}}]
|
{"precision": "bfloat16", "architecture": "PhiForCausalLM", "params_billions": 2.78}
|
HF Open LLM v2
|
sumink
|
sumink/qwft
|
6cdf831f-3ccd-4d78-a94f-269ace42fc1c
|
0.0.1
|
hfopenllm_v2/sumink_qwft/1762652580.548597
|
1762652580.548597
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
sumink/qwft
|
sumink/qwft
|
sumink
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11965252197502627}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30021752093452153}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2525167785234899}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3580625}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11294880319148937}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
|
HF Open LLM v2
|
sumink
|
sumink/somerft
|
cb6879a2-41b6-40b6-bb20-723aa0b213e1
|
0.0.1
|
hfopenllm_v2/sumink_somerft/1762652580.5496058
|
1762652580.5496068
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
sumink/somerft
|
sumink/somerft
|
sumink
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.14305819669587805}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3093455213252133}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.014350453172205438}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2483221476510067}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40447916666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11170212765957446}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.543}
|
HF Open LLM v2
|
sumink
|
sumink/llftfl7
|
ed7c36f0-5b1a-45ef-be66-f9880cad099d
|
0.0.1
|
hfopenllm_v2/sumink_llftfl7/1762652580.548197
|
1762652580.548198
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
sumink/llftfl7
|
sumink/llftfl7
|
sumink
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17143512546709397}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37864273336631166}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.010574018126888218}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28104026845637586}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36320833333333336}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17428523936170212}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 3.213}
|
HF Open LLM v2
|
sumink
|
sumink/solarmer3
|
59ebeb48-88c4-4c63-92bb-888752ea9dad
|
0.0.1
|
hfopenllm_v2/sumink_solarmer3/1762652580.5489879
|
1762652580.5489888
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
sumink/solarmer3
|
sumink/solarmer3
|
sumink
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3741428299135183}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5265990319952963}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0581570996978852}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2911073825503356}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44013541666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.332280585106383}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 10.732}
|
HF Open LLM v2
|
sumink
|
sumink/llmer
|
8f2bad2c-5c31-433a-bbf0-f1a8f0a80c3a
|
0.0.1
|
hfopenllm_v2/sumink_llmer/1762652580.548394
|
1762652580.548395
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
sumink/llmer
|
sumink/llmer
|
sumink
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3191132860809319}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4884590875207178}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0649546827794562}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2978187919463087}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4039166666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35289228723404253}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
sumink
|
sumink/qwmer
|
2cd4d3ec-2800-4223-ab50-6f9f4a1e1a57
|
0.0.1
|
hfopenllm_v2/sumink_qwmer/1762652580.54879
|
1762652580.548791
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
sumink/qwmer
|
sumink/qwmer
|
sumink
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.22124407682726277}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4298800979582788}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0007552870090634441}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28691275167785235}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4031770833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.22149268617021275}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
|
HF Open LLM v2
|
sumink
|
sumink/Qmerft
|
11243917-73a3-484e-ac8b-40065c65ea8c
|
0.0.1
|
hfopenllm_v2/sumink_Qmerft/1762652580.5451572
|
1762652580.5451572
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
sumink/Qmerft
|
sumink/Qmerft
|
sumink
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.15639724819035714}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29390930175643937}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0022658610271903325}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2525167785234899}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36876041666666665}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11569148936170212}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.777}
|
HF Open LLM v2
|
sumink
|
sumink/somer
|
282fa475-0ac8-4230-8020-9dbb7fda03da
|
0.0.1
|
hfopenllm_v2/sumink_somer/1762652580.549191
|
1762652580.549192
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
sumink/somer
|
sumink/somer
|
sumink
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29902990731259727}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.519370328606347}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.04154078549848943}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2986577181208054}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.465}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3447473404255319}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 10.732}
|
HF Open LLM v2
|
sumink
|
sumink/somer2
|
fee6fbc3-c115-4668-8b5b-35b307c15fe8
|
0.0.1
|
hfopenllm_v2/sumink_somer2/1762652580.549396
|
1762652580.549397
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
sumink/somer2
|
sumink/somer2
|
sumink
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3132433055404106}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5166793474130525}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.04682779456193353}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3036912751677852}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46630208333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34325132978723405}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 10.732}
|
HF Open LLM v2
|
OmnicromsBrain
|
OmnicromsBrain/NeuralStar_FusionWriter_4x7b
|
65ba6556-712c-42cc-817b-ad8c2014dc4c
|
0.0.1
|
hfopenllm_v2/OmnicromsBrain_NeuralStar_FusionWriter_4x7b/1762652579.7988968
|
1762652579.798898
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
OmnicromsBrain/NeuralStar_FusionWriter_4x7b
|
OmnicromsBrain/NeuralStar_FusionWriter_4x7b
|
OmnicromsBrain
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5963842604289951}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.47762434766958123}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.04909365558912387}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2785234899328859}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.401875}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2605551861702128}}]
|
{"precision": "float16", "architecture": "MixtralForCausalLM", "params_billions": 24.154}
|
HF Open LLM v2
|
byroneverson
|
byroneverson/Yi-1.5-9B-Chat-abliterated
|
345560e2-c981-4aca-9388-4f3a5e95ace8
|
0.0.1
|
hfopenllm_v2/byroneverson_Yi-1.5-9B-Chat-abliterated/1762652580.070213
|
1762652580.070215
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
byroneverson/Yi-1.5-9B-Chat-abliterated
|
byroneverson/Yi-1.5-9B-Chat-abliterated
|
byroneverson
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5723291976400395}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5401219363002313}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1661631419939577}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29194630872483224}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43886458333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3715093085106383}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.829}
|
HF Open LLM v2
|
byroneverson
|
byroneverson/Yi-1.5-9B-Chat-16K-abliterated
|
dc783bb0-c784-4cf4-888b-36a3bfa37a84
|
0.0.1
|
hfopenllm_v2/byroneverson_Yi-1.5-9B-Chat-16K-abliterated/1762652580.068388
|
1762652580.068392
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
byroneverson/Yi-1.5-9B-Chat-16K-abliterated
|
byroneverson/Yi-1.5-9B-Chat-16K-abliterated
|
byroneverson
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5528453392553979}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5282050829986801}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.14123867069486404}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31291946308724833}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4734375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38231382978723405}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.829}
|
HF Open LLM v2
|
byroneverson
|
byroneverson/Mistral-Small-Instruct-2409-abliterated
|
ff0c627b-72b9-45d4-a385-49c8b0ae6b6e
|
0.0.1
|
hfopenllm_v2/byroneverson_Mistral-Small-Instruct-2409-abliterated/1762652580.063036
|
1762652580.063037
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
byroneverson/Mistral-Small-Instruct-2409-abliterated
|
byroneverson/Mistral-Small-Instruct-2409-abliterated
|
byroneverson
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6970759806203096}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5237864400325174}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24773413897280966}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.33305369127516776}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36971875000000004}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39228723404255317}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 22.247}
|
HF Open LLM v2
|
Cran-May
|
Cran-May/SCE-2-24B
|
f4ff02eb-7763-41bc-8a86-adbb051603af
|
0.0.1
|
hfopenllm_v2/Cran-May_SCE-2-24B/1762652579.512776
|
1762652579.5127769
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Cran-May/SCE-2-24B
|
Cran-May/SCE-2-24B
|
Cran-May
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5865924635522636}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6264692798019763}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.18957703927492447}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.337248322147651}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4528125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.461186835106383}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 23.572}
|
HF Open LLM v2
|
Cran-May
|
Cran-May/merge_model_20250308_2
|
c457473c-6c40-4930-94b8-993d3b1e8937
|
0.0.1
|
hfopenllm_v2/Cran-May_merge_model_20250308_2/1762652579.51357
|
1762652579.5135732
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Cran-May/merge_model_20250308_2
|
Cran-May/merge_model_20250308_2
|
Cran-May
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5932370554572978}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6585311075974459}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4380664652567976}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39093959731543626}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4793541666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5419714095744681}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.766}
|
HF Open LLM v2
|
Cran-May
|
Cran-May/SCE-3-24B
|
2d7b9092-a9ad-4f47-b186-db1e1ce7cd6c
|
0.0.1
|
hfopenllm_v2/Cran-May_SCE-3-24B/1762652579.513022
|
1762652579.513023
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Cran-May/SCE-3-24B
|
Cran-May/SCE-3-24B
|
Cran-May
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5465254413844156}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.597283045074691}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.18806646525679757}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3464765100671141}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44347916666666665}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4646775265957447}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 23.572}
|
HF Open LLM v2
|
Cran-May
|
Cran-May/T.E-8.1
|
9c9e0887-5561-4789-9521-a3a78e7cfd99
|
0.0.1
|
hfopenllm_v2/Cran-May_T.E-8.1/1762652579.513231
|
1762652579.513231
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Cran-May/T.E-8.1
|
Cran-May/T.E-8.1
|
Cran-May
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7076922565459647}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5581754708123893}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44561933534743203}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31291946308724833}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4505208333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4432347074468085}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
|
HF Open LLM v2
|
Cran-May
|
Cran-May/merge_model_20250308_4
|
45531924-35ad-4baf-9994-5d5fa3bafd02
|
0.0.1
|
hfopenllm_v2/Cran-May_merge_model_20250308_4/1762652579.514166
|
1762652579.514167
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Cran-May/merge_model_20250308_4
|
Cran-May/merge_model_20250308_4
|
Cran-May
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4539521802151624}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.666435217186487}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4199395770392749}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3976510067114094}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4688125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5366522606382979}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.766}
|
HF Open LLM v2
|
Cran-May
|
Cran-May/merge_model_20250308_3
|
5448dbb6-9874-4734-8252-369c7b0189d7
|
0.0.1
|
hfopenllm_v2/Cran-May_merge_model_20250308_3/1762652579.513911
|
1762652579.513912
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Cran-May/merge_model_20250308_3
|
Cran-May/merge_model_20250308_3
|
Cran-May
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6017799438822324}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6271459892225041}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2545317220543807}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3221476510067114}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43204166666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.49617686170212766}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.766}
|
HF Open LLM v2
|
Cran-May
|
Cran-May/tempmotacilla-cinerea-0308
|
5e5e70f4-c597-415c-ab74-17aaf55b7b28
|
0.0.1
|
hfopenllm_v2/Cran-May_tempmotacilla-cinerea-0308/1762652579.514418
|
1762652579.5144188
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Cran-May/tempmotacilla-cinerea-0308
|
Cran-May/tempmotacilla-cinerea-0308
|
Cran-May
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8084837121061007}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6550960569488126}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5551359516616314}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3624161073825503}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42082291666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5250166223404256}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.766}
|
HF Open LLM v2
|
GreenNode
|
GreenNode/GreenNode-small-9B-it
|
d13def83-5ff8-4cde-aef5-b3c268c40c16
|
0.0.1
|
hfopenllm_v2/GreenNode_GreenNode-small-9B-it/1762652579.6324449
|
1762652579.632446
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
GreenNode/GreenNode-small-9B-it
|
GreenNode/GreenNode-small-9B-it
|
GreenNode
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7436125037123721}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.599383874005197}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17447129909365558}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3196308724832215}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42041666666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3927027925531915}}]
|
{"precision": "float16", "architecture": "Gemma2ForCausalLM", "params_billions": 9.242}
|
HF Open LLM v2
|
altomek
|
altomek/YiSM-34B-0rn
|
a9c75810-f51d-4fd3-8c96-6afdbc0f278c
|
0.0.1
|
hfopenllm_v2/altomek_YiSM-34B-0rn/1762652580.010027
|
1762652580.0100281
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
altomek/YiSM-34B-0rn
|
altomek/YiSM-34B-0rn
|
altomek
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.428373382624769}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6140009573868866}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2280966767371601}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3716442953020134}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.445}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4695811170212766}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 34.389}
|
HF Open LLM v2
|
nbeerbower
|
nbeerbower/Nemoties-ChatML-12B
|
3644fc16-b0fa-42d7-b17a-eb8f8332193f
|
0.0.1
|
hfopenllm_v2/nbeerbower_Nemoties-ChatML-12B/1762652580.383542
|
1762652580.383543
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
nbeerbower/Nemoties-ChatML-12B
|
nbeerbower/Nemoties-ChatML-12B
|
nbeerbower
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6381999760635115}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5470252374810588}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.07854984894259819}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29697986577181207}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45086458333333335}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3550531914893617}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
|
HF Open LLM v2
|
nbeerbower
|
nbeerbower/mistral-nemo-wissenschaft-12B
|
5f68a07f-4442-4453-92c3-b615323da96b
|
0.0.1
|
hfopenllm_v2/nbeerbower_mistral-nemo-wissenschaft-12B/1762652580.388424
|
1762652580.388424
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
nbeerbower/mistral-nemo-wissenschaft-12B
|
nbeerbower/mistral-nemo-wissenschaft-12B
|
nbeerbower
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6520133246452745}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5040306120993181}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1216012084592145}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29278523489932884}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41778125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35322473404255317}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
|
HF Open LLM v2
|
nbeerbower
|
nbeerbower/Nemo-Loony-12B-experimental
|
894b90c6-c701-47d8-b930-4e271e28962f
|
0.0.1
|
hfopenllm_v2/nbeerbower_Nemo-Loony-12B-experimental/1762652580.383332
|
1762652580.383332
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
nbeerbower/Nemo-Loony-12B-experimental
|
nbeerbower/Nemo-Loony-12B-experimental
|
nbeerbower
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37344357416100393}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38222228797769536}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.015105740181268883}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2701342281879195}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3340625}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1589095744680851}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
|
HF Open LLM v2
|
nbeerbower
|
nbeerbower/Lyra4-Gutenberg-12B
|
02606fe0-ca08-4102-9670-8a18a9cc6f81
|
0.0.1
|
hfopenllm_v2/nbeerbower_Lyra4-Gutenberg-12B/1762652580.380318
|
1762652580.380318
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
nbeerbower/Lyra4-Gutenberg-12B
|
nbeerbower/Lyra4-Gutenberg-12B
|
nbeerbower
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2212185888996751}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.538669487933139}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1299093655589124}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3187919463087248}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4037916666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35713098404255317}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
|
HF Open LLM v2
|
nbeerbower
|
nbeerbower/Kartoffel-Deepfry-12B
|
09ba1be1-4b42-4eba-810f-a0aed64aafc0
|
0.0.1
|
hfopenllm_v2/nbeerbower_Kartoffel-Deepfry-12B/1762652580.379381
|
1762652580.3793821
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
nbeerbower/Kartoffel-Deepfry-12B
|
nbeerbower/Kartoffel-Deepfry-12B
|
nbeerbower
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5021620411618949}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5365374219062301}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.06042296072507553}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2961409395973154}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4791666666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3582114361702128}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
|
HF Open LLM v2
|
nbeerbower
|
nbeerbower/Mistral-Nemo-Moderne-12B-FFT-experimental
|
e7337143-6ec7-4467-b6f5-907492705cc9
|
0.0.1
|
hfopenllm_v2/nbeerbower_Mistral-Nemo-Moderne-12B-FFT-experimental/1762652580.3819818
|
1762652580.381983
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
nbeerbower/Mistral-Nemo-Moderne-12B-FFT-experimental
|
nbeerbower/Mistral-Nemo-Moderne-12B-FFT-experimental
|
nbeerbower
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.33522498082864577}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5234089179237257}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0770392749244713}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28104026845637586}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3714895833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3454953457446808}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
|
HF Open LLM v2
|
nbeerbower
|
nbeerbower/Lyra4-Gutenberg2-12B
|
f9da5237-3903-4bbf-a0bc-0bcf3152f45a
|
0.0.1
|
hfopenllm_v2/nbeerbower_Lyra4-Gutenberg2-12B/1762652580.380519
|
1762652580.3805199
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
nbeerbower/Lyra4-Gutenberg2-12B
|
nbeerbower/Lyra4-Gutenberg2-12B
|
nbeerbower
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25851296781428834}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5344527944750038}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11706948640483383}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31291946308724833}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39721874999999995}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35654920212765956}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
|
HF Open LLM v2
|
nbeerbower
|
nbeerbower/SmolNemo-12B-FFT-experimental
|
435e3ce7-479f-4624-978e-25d755dee811
|
0.0.1
|
hfopenllm_v2/nbeerbower_SmolNemo-12B-FFT-experimental/1762652580.383975
|
1762652580.383976
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
nbeerbower/SmolNemo-12B-FFT-experimental
|
nbeerbower/SmolNemo-12B-FFT-experimental
|
nbeerbower
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3348005514257725}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3336088810494464}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.01283987915407855}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2600671140939597}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38469791666666664}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.12167553191489362}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
|
HF Open LLM v2
|
godlikehhd
|
godlikehhd/alpaca_data_ifd_max_2600_3B
|
41d72b83-3c55-460f-9d21-88866eed6b9a
|
0.0.1
|
hfopenllm_v2/godlikehhd_alpaca_data_ifd_max_2600_3B/1762652580.1669528
|
1762652580.166954
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
godlikehhd/alpaca_data_ifd_max_2600_3B
|
godlikehhd/alpaca_data_ifd_max_2600_3B
|
godlikehhd
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.298155560579263}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4626377955326701}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1593655589123867}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2726510067114094}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43455208333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.32878989361702127}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 3.086}
|
HF Open LLM v2
|
godlikehhd
|
godlikehhd/alpaca_data_full_3B
|
d7d6baf0-00d3-4960-970c-949bb9919ac9
|
0.0.1
|
hfopenllm_v2/godlikehhd_alpaca_data_full_3B/1762652580.166356
|
1762652580.166357
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
godlikehhd/alpaca_data_full_3B
|
godlikehhd/alpaca_data_full_3B
|
godlikehhd
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36957162550920447}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46841893776834337}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1336858006042296}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27768456375838924}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4954791666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.335688164893617}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 3.086}
|
HF Open LLM v2
|
godlikehhd
|
godlikehhd/alpaca_data_score_max_2500
|
b6fd288d-36d5-4499-bf2d-da1fdd1120c5
|
0.0.1
|
hfopenllm_v2/godlikehhd_alpaca_data_score_max_2500/1762652580.1698968
|
1762652580.169898
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
godlikehhd/alpaca_data_score_max_2500
|
godlikehhd/alpaca_data_score_max_2500
|
godlikehhd
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3563577973111345}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41801375075895447}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.09516616314199396}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2953020134228188}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36270833333333335}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2939660904255319}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.544}
|
HF Open LLM v2
|
godlikehhd
|
godlikehhd/alpaca_data_score_max_0.1_2600
|
08195b61-5fe5-4cce-8da4-34b731289278
|
0.0.1
|
hfopenllm_v2/godlikehhd_alpaca_data_score_max_0.1_2600/1762652580.1691651
|
1762652580.169167
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
godlikehhd/alpaca_data_score_max_0.1_2600
|
godlikehhd/alpaca_data_score_max_0.1_2600
|
godlikehhd
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3287554799044313}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42522607952607777}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.09894259818731117}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2911073825503356}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37064583333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29230385638297873}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.544}
|
HF Open LLM v2
|
godlikehhd
|
godlikehhd/alpaca_data_ins_min_5200
|
d976888b-5e17-4e5c-b557-0b48bf36d4f7
|
0.0.1
|
hfopenllm_v2/godlikehhd_alpaca_data_ins_min_5200/1762652580.1684108
|
1762652580.1684108
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
godlikehhd/alpaca_data_ins_min_5200
|
godlikehhd/alpaca_data_ins_min_5200
|
godlikehhd
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3359995921931586}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4289279419241076}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.10347432024169184}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28691275167785235}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39055208333333336}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29488031914893614}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.544}
|
HF Open LLM v2
|
godlikehhd
|
godlikehhd/alpaca_data_score_max_2600_3B
|
92dc5ec0-5aea-45f5-9237-32b5a65e095b
|
0.0.1
|
hfopenllm_v2/godlikehhd_alpaca_data_score_max_2600_3B/1762652580.170121
|
1762652580.170122
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
godlikehhd/alpaca_data_score_max_2600_3B
|
godlikehhd/alpaca_data_score_max_2600_3B
|
godlikehhd
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.33577463352792813}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4716306839273412}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.15483383685800603}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2651006711409396}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44744791666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3341921542553192}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 3.086}
|
HF Open LLM v2
|
godlikehhd
|
godlikehhd/alpaca_data_ifd_max_2600
|
017ca821-f6ea-43bc-bac1-28dd30c2341d
|
0.0.1
|
hfopenllm_v2/godlikehhd_alpaca_data_ifd_max_2600/1762652580.16661
|
1762652580.166613
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
godlikehhd/alpaca_data_ifd_max_2600
|
godlikehhd/alpaca_data_ifd_max_2600
|
godlikehhd
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3042504997850149}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40285133876405865}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.09894259818731117}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3028523489932886}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3508645833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29163896276595747}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.544}
|
HF Open LLM v2
|
godlikehhd
|
godlikehhd/alpaca_data_score_max_0.3_2600
|
40e4c93e-7a54-49c2-b513-33edd87f59b0
|
0.0.1
|
hfopenllm_v2/godlikehhd_alpaca_data_score_max_0.3_2600/1762652580.1694138
|
1762652580.169415
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
godlikehhd/alpaca_data_score_max_0.3_2600
|
godlikehhd/alpaca_data_score_max_0.3_2600
|
godlikehhd
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.33752332699459653}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4151448369012765}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.10347432024169184}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28942953020134227}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37594791666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29130651595744683}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.544}
|
HF Open LLM v2
|
godlikehhd
|
godlikehhd/alpaca_data_ifd_min_2600
|
5561b7bd-bd90-445c-b969-8d400e99e629
|
0.0.1
|
hfopenllm_v2/godlikehhd_alpaca_data_ifd_min_2600/1762652580.167441
|
1762652580.167443
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
godlikehhd/alpaca_data_ifd_min_2600
|
godlikehhd/alpaca_data_ifd_min_2600
|
godlikehhd
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3749673089624419}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4219047173013076}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.09667673716012085}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29194630872483224}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36562500000000003}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.289311835106383}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.544}
|
HF Open LLM v2
|
godlikehhd
|
godlikehhd/alpaca_data_score_max_0.7_2600
|
988c6ec3-e967-4cec-993b-e060a5a18e97
|
0.0.1
|
hfopenllm_v2/godlikehhd_alpaca_data_score_max_0.7_2600/1762652580.169624
|
1762652580.169625
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
godlikehhd/alpaca_data_score_max_0.7_2600
|
godlikehhd/alpaca_data_score_max_0.7_2600
|
godlikehhd
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3639764713183243}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41845266250678703}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.10725075528700906}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3036912751677852}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3468645833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2982878989361702}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.544}
|
HF Open LLM v2
|
godlikehhd
|
godlikehhd/alpaca_data_ifd_me_max_5200
|
e2f13357-053c-42e5-8149-465b4f16d334
|
0.0.1
|
hfopenllm_v2/godlikehhd_alpaca_data_ifd_me_max_5200/1762652580.167201
|
1762652580.167202
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
godlikehhd/alpaca_data_ifd_me_max_5200
|
godlikehhd/alpaca_data_ifd_me_max_5200
|
godlikehhd
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36832271705740766}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4153453015610935}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.09743202416918428}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2911073825503356}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3482604166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29820478723404253}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.544}
|
HF Open LLM v2
|
godlikehhd
|
godlikehhd/alpaca_data_sampled_ifd_new_5200
|
906db90c-7ea4-4878-aa01-06fd1ad0d18a
|
0.0.1
|
hfopenllm_v2/godlikehhd_alpaca_data_sampled_ifd_new_5200/1762652580.1688168
|
1762652580.168818
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
godlikehhd/alpaca_data_sampled_ifd_new_5200
|
godlikehhd/alpaca_data_sampled_ifd_new_5200
|
godlikehhd
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36632468516868577}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4177831234050982}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.09441087613293052}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2936241610738255}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29247007978723405}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.544}
|
HF Open LLM v2
|
godlikehhd
|
godlikehhd/alpaca_data_sampled_ifd_5200
|
e7ca66f4-852b-4b5b-8781-d6272a43c559
|
0.0.1
|
hfopenllm_v2/godlikehhd_alpaca_data_sampled_ifd_5200/1762652580.1686149
|
1762652580.1686149
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
godlikehhd/alpaca_data_sampled_ifd_5200
|
godlikehhd/alpaca_data_sampled_ifd_5200
|
godlikehhd
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2923853154075631}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4032969715626326}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.12537764350453173}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3087248322147651}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3520729166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2896442819148936}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.544}
|
HF Open LLM v2
|
godlikehhd
|
godlikehhd/alpaca_data_ins_min_2600
|
121f28df-65d6-4a48-aa77-4ee794034032
|
0.0.1
|
hfopenllm_v2/godlikehhd_alpaca_data_ins_min_2600/1762652580.1682088
|
1762652580.16821
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
godlikehhd/alpaca_data_ins_min_2600
|
godlikehhd/alpaca_data_ins_min_2600
|
godlikehhd
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.33300199027469335}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41873469888886056}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11102719033232629}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2978187919463087}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38534375000000004}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28798204787234044}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.544}
|
HF Open LLM v2
|
godlikehhd
|
godlikehhd/alpaca_data_full_2
|
3c550631-c27c-4743-98f3-3ab65c5fa906
|
0.0.1
|
hfopenllm_v2/godlikehhd_alpaca_data_full_2/1762652580.166118
|
1762652580.166118
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
godlikehhd/alpaca_data_full_2
|
godlikehhd/alpaca_data_full_2
|
godlikehhd
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31781450994472443}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4216953430035033}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.09290030211480363}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2978187919463087}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40515625000000005}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.285405585106383}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.544}
|
HF Open LLM v2
|
godlikehhd
|
godlikehhd/alpaca_data_ins_ans_max_5200
|
9c2cee8b-3f35-4a49-814e-ad316fcede7f
|
0.0.1
|
hfopenllm_v2/godlikehhd_alpaca_data_ins_ans_max_5200/1762652580.167691
|
1762652580.1676931
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
godlikehhd/alpaca_data_ins_ans_max_5200
|
godlikehhd/alpaca_data_ins_ans_max_5200
|
godlikehhd
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34786477657061043}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40982060224148426}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1027190332326284}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2911073825503356}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3601666666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2900598404255319}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.544}
|
HF Open LLM v2
|
godlikehhd
|
godlikehhd/alpaca_data_score_max_5200
|
d877dbd4-b3da-44b5-974a-1267db396435
|
0.0.1
|
hfopenllm_v2/godlikehhd_alpaca_data_score_max_5200/1762652580.170327
|
1762652580.170327
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
godlikehhd/alpaca_data_score_max_5200
|
godlikehhd/alpaca_data_score_max_5200
|
godlikehhd
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34454248061809334}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42417102847687554}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.09743202416918428}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2978187919463087}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3877916666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29446476063829785}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.544}
|
HF Open LLM v2
|
godlikehhd
|
godlikehhd/alpaca_data_ins_max_5200
|
cdd1de41-4e85-4872-be9f-e3af4e9221a9
|
0.0.1
|
hfopenllm_v2/godlikehhd_alpaca_data_ins_max_5200/1762652580.1679769
|
1762652580.167978
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
godlikehhd/alpaca_data_ins_max_5200
|
godlikehhd/alpaca_data_ins_max_5200
|
godlikehhd
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.32750657145263457}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41550742328078477}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.09969788519637462}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2961409395973154}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.361375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2915558510638298}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.544}
|
HF Open LLM v2
|
tokyotech-llm
|
tokyotech-llm/Llama-3-Swallow-8B-Instruct-v0.1
|
f6729e0a-559f-4087-af75-37634bf0af62
|
0.0.1
|
hfopenllm_v2/tokyotech-llm_Llama-3-Swallow-8B-Instruct-v0.1/1762652580.5769222
|
1762652580.576923
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
tokyotech-llm/Llama-3-Swallow-8B-Instruct-v0.1
|
tokyotech-llm/Llama-3-Swallow-8B-Instruct-v0.1
|
tokyotech-llm
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5507719517546776}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5009389976232003}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.07477341389728097}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28942953020134227}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43569791666666663}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3087599734042553}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
shastraai
|
shastraai/Shastra-LLAMA2-Math-Commonsense-SFT
|
563e2894-10bf-43e1-af67-5cd97d52f033
|
0.0.1
|
hfopenllm_v2/shastraai_Shastra-LLAMA2-Math-Commonsense-SFT/1762652580.5147672
|
1762652580.5147672
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
shastraai/Shastra-LLAMA2-Math-Commonsense-SFT
|
shastraai/Shastra-LLAMA2-Math-Commonsense-SFT
|
shastraai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3041507644161935}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.384316753625765}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.017371601208459216}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25922818791946306}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3604479166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.19971742021276595}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 6.738}
|
HF Open LLM v2
|
w4r10ck
|
w4r10ck/SOLAR-10.7B-Instruct-v1.0-uncensored
|
9add85f6-b577-449e-8a2f-ae77a2588bc7
|
0.0.1
|
hfopenllm_v2/w4r10ck_SOLAR-10.7B-Instruct-v1.0-uncensored/1762652580.5912771
|
1762652580.591278
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
w4r10ck/SOLAR-10.7B-Instruct-v1.0-uncensored
|
w4r10ck/SOLAR-10.7B-Instruct-v1.0-uncensored
|
w4r10ck
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38840609582574237}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5301525050503222}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.06570996978851963}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29446308724832215}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4639479166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3343583776595745}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 10.732}
|
HF Open LLM v2
|
Ayush-Singh
|
Ayush-Singh/Llama1B-sft-2
|
678cad7f-854b-4dc3-91cc-2d1774ef7faf
|
0.0.1
|
hfopenllm_v2/Ayush-Singh_Llama1B-sft-2/1762652579.4859679
|
1762652579.4859688
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Ayush-Singh/Llama1B-sft-2
|
Ayush-Singh/Llama1B-sft-2
|
Ayush-Singh
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.13743755457741016}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.283428204214368}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24580536912751677}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35520833333333335}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11170212765957446}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 1.236}
|
HF Open LLM v2
|
euclaise
|
euclaise/ReMask-3B
|
a905005d-85fa-44c9-848b-286f9100bab7
|
0.0.1
|
hfopenllm_v2/euclaise_ReMask-3B/1762652580.14753
|
1762652580.147531
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
euclaise/ReMask-3B
|
euclaise/ReMask-3B
|
euclaise
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2419269759792905}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3516779692917367}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.019637462235649546}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26677852348993286}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.33409375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.13572140957446807}}]
|
{"precision": "bfloat16", "architecture": "StableLmForCausalLM", "params_billions": 2.795}
|
HF Open LLM v2
|
Langboat
|
Langboat/Mengzi3-8B-Chat
|
13e12b5c-d3bb-4634-967d-e5741e623be1
|
0.0.1
|
hfopenllm_v2/Langboat_Mengzi3-8B-Chat/1762652579.707526
|
1762652579.707527
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Langboat/Mengzi3-8B-Chat
|
Langboat/Mengzi3-8B-Chat
|
Langboat
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.513977357854936}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4683725003203179}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.09063444108761329}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27432885906040266}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4077916666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31416223404255317}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
zake7749
|
zake7749/gemma-2-2b-it-chinese-kyara-dpo
|
4fbaf39a-86a1-4b79-aeeb-e14c2de64666
|
0.0.1
|
hfopenllm_v2/zake7749_gemma-2-2b-it-chinese-kyara-dpo/1762652580.612313
|
1762652580.6123142
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
zake7749/gemma-2-2b-it-chinese-kyara-dpo
|
zake7749/gemma-2-2b-it-chinese-kyara-dpo
|
zake7749
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5382075116247114}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4257464897414603}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.08383685800604229}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26677852348993286}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45756250000000004}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25731382978723405}}]
|
{"precision": "bfloat16", "architecture": "Gemma2ForCausalLM", "params_billions": 2.614}
|
HF Open LLM v2
|
thinkcoder
|
thinkcoder/llama3-8b-instruct-lora-8-sft
|
51caac64-fee1-4c7f-b474-1b1e0f71212c
|
0.0.1
|
hfopenllm_v2/thinkcoder_llama3-8b-instruct-lora-8-sft/1762652580.564969
|
1762652580.56497
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
thinkcoder/llama3-8b-instruct-lora-8-sft
|
thinkcoder/llama3-8b-instruct-lora-8-sft
|
thinkcoder
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6480416406246536}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4865011845587858}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.10196374622356495}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26677852348993286}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.32345833333333335}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34757313829787234}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
stabilityai
|
stabilityai/StableBeluga2
|
ca7ae45f-833a-4ce2-9fb7-27601e9434c8
|
0.0.1
|
hfopenllm_v2/stabilityai_StableBeluga2/1762652580.535889
|
1762652580.5358899
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
stabilityai/StableBeluga2
|
stabilityai/StableBeluga2
|
stabilityai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37871403431783224}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5824128134553807}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.04380664652567976}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3162751677852349}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.47296875}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3326130319148936}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 68.977}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.