Skip to content

Commit 7d337d0

Browse files
authored
Slight reorder of the attn.weight tree
And application of the attn.v.weight logic I used for IQ2 and IQ3, but only when such logic is already implied by the existing quant strategies, as a compromise to not disturb too much Ikawrakow's quant strategies.
1 parent 6398663 commit 7d337d0

File tree

1 file changed

+15
-14
lines changed

1 file changed

+15
-14
lines changed

src/llama.cpp

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -15370,7 +15370,19 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1537015370
}
1537115371
}
1537215372
} else if (name.find("attn_v.weight") != std::string::npos) {
15373-
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
15373+
if (qs.model.hparams.n_expert >= 8) {
15374+
// for the 8-expert model, bumping this to Q8_0 trades just ~128MB
15375+
// TODO: explore better strategies
15376+
new_type = GGML_TYPE_Q8_0;
15377+
}
15378+
else if (qs.model.hparams.n_gqa() >= 7) {
15379+
// The Llama 70B models have 8 heads sharing the same attn_v weights (-> GQA 8). As a result, the attn_v.weight tensor is
15380+
// 8x smaller compared to attn_q.weight. Hence, we can get a nice boost in quantization accuracy with
15381+
// nearly negligible increase in model size by quantizing this tensor with more bits.
15382+
// That logic applies also to models like Yi 34B (-> GQA 7) and Mistral Large 123B (-> GQA 12).
15383+
if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K) new_type = GGML_TYPE_Q5_K;
15384+
}
15385+
else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
1537415386
new_type = (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2) ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
1537515387
}
1537615388
else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2)) {
@@ -15389,26 +15401,15 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1538915401
new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
1539015402
}
1539115403
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
15392-
else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) && qs.model.hparams.n_gqa() >= 4) {
15404+
else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) && (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2)) {
1539315405
new_type = GGML_TYPE_Q5_K;
1539415406
}
1539515407
else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
1539615408
use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) new_type = GGML_TYPE_Q6_K;
1539715409
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && qs.i_attention_wv < 4) new_type = GGML_TYPE_Q5_K;
15398-
if (qs.model.type == MODEL_70B) {
15399-
// In the 70B model we have 8 heads sharing the same attn_v weights. As a result, the attn_v.weight tensor is
15400-
// 8x smaller compared to attn_q.weight. Hence, we can get a nice boost in quantization accuracy with
15401-
// nearly negligible increase in model size by quantizing this tensor with more bits:
15402-
if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K) new_type = GGML_TYPE_Q5_K;
15403-
}
15404-
if (qs.model.hparams.n_expert == 8) {
15405-
// for the 8-expert model, bumping this to Q8_0 trades just ~128MB
15406-
// TODO: explore better strategies
15407-
new_type = GGML_TYPE_Q8_0;
15408-
}
1540915410
++qs.i_attention_wv;
1541015411
} else if (name.find("attn_k.weight") != std::string::npos) {
15411-
if (qs.model.hparams.n_expert == 8) {
15412+
if (qs.model.hparams.n_expert >= 8) {
1541215413
// for the 8-expert model, bumping this to Q8_0 trades just ~128MB
1541315414
// TODO: explore better strategies
1541415415
new_type = GGML_TYPE_Q8_0;

0 commit comments

Comments
 (0)