model
string | quant method
string | truthfulqa_mc2 acc ± stderr
string | arc:challenge acc ± stderr
string | hellaswag acc
string | winogrande acc
string | average
float64 | throughput (tok/s)
float64 | peak process vram (GB)
float64 | calibration/quantization time
string | throughput w/ torch.compile
float64 | peak process vram w/ torch.compile
float64 | througput w/ marlin
float64 | peak process vram w/ marlin
float64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Llama 3.1 8B
|
baseline (bf16)
|
0.5456 ± 0.0150
|
0.5623 ± 0.0145
|
0.5973 ± 0.0049
|
0.7372 ± 0.0124
| 0.6106
| 38.82
| 16.77302
| null | 79.27
| 17.12954
| null | null |
Llama 3.1 8B
|
AWQ (4 bit)
|
0.5281 ± 0.0150
|
0.5589 ± .0145
|
.5869 ± .0049
|
0.7285 ± 0.0125
| 0.6006
| 43.09
| 8.11808
|
~10 minutes
| null | null | null | null |
Llama 3.1 8B
|
GPTQModel (4 bit)
| null | null | null | null | null | 36.83
| 6.31872
|
~23 minutes
| null | null | 37.84
| 6,318.72
|
Llama 3.1 8B
|
AutoGPTQ (4 bit)
|
0.5361 ± 0.0149
|
0.5589 ± 0.0145
|
0.5758 ± 0.0049
|
0.7103 ± 0.0127
| 0.595275
| 43.71
| 6.49278
|
~30 minutes
| null | null | null | null |
Llama 3.1 8B
|
bnb (nf4)
|
0.5446 ± 0.0149
|
0.5563 ± 0.0145
|
0.5769 ± 0.0049
|
0.7301 ± 0.0125
| 0.601975
| 24.35
| 6.44035
|
~1 minute
| null | null | null | null |
Llama 3.1 8B
|
optimum quanto (w4a16)
|
0.5162 ± 0.0150
|
0.5427 ± 0.0146
|
0.5856 ± 0.0049
|
0.7419 ± 0.0123
| 0.5966
| 31.22
| 6.57667
|
~30 seconds
| null | null | null | null |
Llama 3.1 8B
|
torchao (int4wo)
|
0.5166 ±0.0150
|
0.5418 ± 0.0146
|
0.5855 ± 0.0049
|
0.7395 ± 0.0123
| 0.59585
| 24.98
| 6.49907
|
~20 seconds
| 85.76
| 6.8493
| null | null |
Llama 3.1 8B
|
HQQ (4 bit)
|
0.5485 ± 0.0150
|
0.5495 ± 0.0145
|
0.5859 ± 0.0049
|
0.7285 ± 0.0125
| 0.6031
| 34.44
| 6.71718
| null | null | null | null | null |
Llama 3.1 8B
|
HIGGS (4 bit)
|
0.5214 ± 0.0149
|
0.5486 ± 0.0145
|
0.5836 ± 0.0049
|
0.7182 ± 0.0126
| 0.59295
| 28.35
| 6.81994
|
~5 minutes
| null | null | null | null |
Llama 3.1 8B
|
bnb (llm.int8())
|
0.5446 ± 0.0150
|
0.5623 ± 0.0145
|
0.5949 ± 0.0049
|
0.7324 ± 0.0124
| 0.60855
| 20.75
| 9.70772
|
~20 seconds
| null | null | null | null |
Llama 3.1 8B
|
HQQ (8 bit)
|
0.5441 ± 0.0150
|
0.5666 ± 0.0145
|
0.5979 ± 0.0049
|
0.7380 ± 0.0124
| 0.61165
| 9.07
| 10.6011
|
~80 seconds
| null | null | null | null |
Llama 3.1 8B
|
optimum quanto (int8wo)
|
0.5436 ± 0.0150
|
0.5640 ± 0.0145
|
0.5992 ± 0.0049
|
0.7372 ± 0.0124
| 0.611
| 15.59
| 9.81887
|
~20 seconds
| 16.01
| 10.07052
| null | null |
Llama 3.1 8B
|
torchao (int8wo)
|
0.5449 ± 0.0150
|
0.5640 ± 0.0145
|
0.5975 ± 0.0049
|
0.7380 ± 0.0124
| 0.6111
| 5.98
| 13.07155
|
~30 seconds
| 43.79
| 13.66714
| null | null |
Llama 3.1 8B
|
fbgemm (fp8)
|
0.5430 ± 0.0150
|
0.5580 ± 0.0145
|
0.5958 ± 0.0049
|
0.7411 ± 0.0123
| 0.609475
| 33.83
| 10.00551
|
~30 seconds
| null | null | null | null |
Llama 3.1 8B
|
compressed-tensors (fp8)
|
0.5398 ± 0.0151
|
0.5589 ± 0.0145
|
0.5950 ± 0.0049
|
0.7356 ± 0.0124
| 0.607325
| null | null | null | null | null | null | null |
Llama 3.1 8B
|
VPTQ (2 bit)
|
0.4543 ± 0.0149
|
0.4923 ± 0.0146
|
0.5258 ± 0.0050
|
0.6930 ± 0.0130
| 0.54135
| 32.35
| 5.28902
|
~2 hours
| 31.48
| 5.28692
| null | null |
Llama 3.1 8B
|
AQLM + PV (2 bit)
|
0.5036 ± 0.0148
|
0.5230 ± 0.0146
|
0.5628 ± 0.0050
|
0.6938 ± 0.0130
| 0.5708
| 22.28
| 4.84023
|
~1 day
| 27.27
| 4.85491
| null | null |
Llama 3.1 8B
|
GPTQModel (2 bit)
| null | null | null | null | null | 19.02
| 18.45284
|
~26 minutes
| null | null | null | null |
Llama 3.1 8B
|
AutoGPTQ (2 bit)
|
0.5127 ± 0.0150
|
0.1988 ± 0.0117
|
0.2665 ± 0.0044
|
0.4799 ± 0.0140
| 0.364475
| 6.25
| 11.02473
|
~26 minutes
| null | null | null | null |
Llama 3.1 70B
|
baseline (bf16)
|
0.6068 ± 0.0147
|
0.6732 ± 0.0137
|
0.6666 ± 0.0047
|
0.8248 ± 0.0107
| 0.69285
| 9.73
| 142.26869
| null | 10.1
| 142.81395
| null | null |
Llama 3.1 70B
|
AWQ (4 bit)
|
0.5706 ± 0.0150
|
0.6681 ± 0.0138
|
0.6598 ± 0.0047
|
0.8193 ± 0.0108
| 0.67945
| 15.74
| 43.75288
|
~1 hour
| null | null | null | null |
Llama 3.1 70B
|
GPTQModel (4 bit)
| null | null | null | null | null | 14.84
| 40.5757
| null | null | null | 15.28
| 40.5757
|
Llama 3.1 70B
|
AutoGPTQ (4 bit)
|
0.5937 ± 0.0147
|
0.6655 ± 0.0138
|
0.6568 ± 0.0047
|
0.8185 ± 0.0108
| 0.683625
| 0.46
| 42.40022
|
~2 hours
| null | null | null | null |
Llama 3.1 70B
|
bnb (nf4)
|
0.5939 ± 0.0148
|
0.6724 ± 0.0137
|
0.6592 ± 0.0047
|
0.8098 ± 0.0110
| 0.683825
| 11.27
| 44.62949
|
~2 minutes
| null | null | null | null |
Llama 3.1 70B
|
optimum quanto (w4a16)
|
0.4847 ± 0.0164
|
0.2082 ± 0.0119
|
0.2582 ± 0.0044
|
0.4878 ± 0.0140
| 0.359725
| 12.97
| 80.39013
|
~2 minutes
| null | null | null | null |
Llama 3.1 70B
|
torchao (int4wo)
|
0.4847 ± 0.0164
|
0.2108 ± 0.0119
|
0.2581 ± 0.0044
|
0.4980 ± 0.0141
| 0.3629
| 10.56
| 41.6054
|
~2 minutes
| 18.95
| 42.26181
| null | null |
Llama 3.1 70B
|
HQQ (4 bit)
|
0.5882 ± 0.0146
|
0.6706 ± 0.0137
|
0.6597 ± 0.0047
|
0.8035 ± 0.0112
| 0.6805
| 13.92
| 44.50366
|
~10 minutes
| null | null | null | null |
Llama 3.1 70B
|
HIGGS (4 bit)
|
0.4871 ± 0.0163
|
0.1971 ± 0.0116
|
0.2575 ± 0.0044
|
0.4893 ± 0.0140
| 0.35775
| 11.61
| 41.52571
|
~6 minutes
| 12.38
| 41.02868
| null | null |
Llama 3.1 70B
|
bnb (llm.int8())
|
0.5604 ± 0.0169
|
0.6544 ± 0.0139
|
0.6382 ± 0.0048
|
0.7940 ± 0.0114
| 0.66175
| 6.87
| 74.26428
|
~2 minutes
| null | null | null | null |
Llama 3.1 70B
|
HQQ (8 bit)
|
0.6112 ± 0.0146
|
0.6732 ± 0.0137
|
0.6661 ± 0.0047
|
0.8327 ± 0.0105
| 0.6958
| 0.98
| 80.52435
|
~10 minutes
| 0.98
| 80.39013
| null | null |
Llama 3.1 70B
|
optimum quanto (int8wo)
|
0.5591 ± 0.0150
|
0.6459 ± 0.0140
|
0.6413 ± 0.0048
|
0.7979 ± 0.0113
| 0.66105
| 1.79
| 74.21192
|
~2 minutes
| 1.8
| 74.21401
| null | null |
Llama 3.1 70B
|
torchao (int8wo)
|
0.6094 ± 0.0146
|
0.6732 ± 0.0137
|
0.6659 ± 0.0047
|
0.8240 ± 0.0107
| 0.693125
| 0.65
| 89.85038
|
~2 minutes
| 0.65
| 89.84619
| null | null |
Llama 3.1 70B
|
fbgemm (fp8)
|
0.6075 ± 0.0146
|
0.6732 ± 0.0137
|
0.6671 ± 0.0047
|
0.8216 ± 0.0108
| 0.69235
| 13.61
| 74.04624
|
~6 minutes
| null | null | null | null |
Llama 3.1 70B
|
compressed-tensors (fp8)
|
0.6062 ± 0.0146
|
0.6741 ± 0.0137
|
0.6652 ± 0.0047
|
0.8216 ± 0.0108
| 0.691775
| null | null | null | null | null | null | null |
Llama 3.1 70B
|
VPTQ (2 bit)
|
0.5451 ± 0.0150
|
0.6212 ± 0.0142
|
0.6073 ± 0.0049
|
0.7901 ± 0.0114
| 0.640925
| 6.29
| 24.89949
|
~19 hours
| 6.18
| 24.89949
| null | null |
Llama 3.1 70B
|
AQLM + PV (2 bit)
|
0.5706 ± 0.0150
|
0.6365 ± 0.0141
|
0.6401 ± 0.0048
|
0.8066 ± 0.0111
| 0.66345
| 6.75
| 23.12739
|
10-14 days
| 7.09
| 23,607.64
| null | null |
Llama 3.1 70B
|
GPTQModel (2 bit)
| null | null | null | null | null | null | null | null | null | null | null | null |
Llama 3.1 70B
|
AutoGPTQ (2 bit)
|
0.4556 ± 0.0147
|
0.2807 ± 0.0131
|
0.3642 ± 0.0048
|
0.5470 ± 0.0140
| 0.411875
| null | null | null | null | null | null | null |