@@ -15370,7 +15370,19 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
15370
15370
}
15371
15371
}
15372
15372
} else if (name.find("attn_v.weight") != std::string::npos) {
15373
- if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
15373
+ if (qs.model.hparams.n_expert >= 8) {
15374
+ // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
15375
+ // TODO: explore better strategies
15376
+ new_type = GGML_TYPE_Q8_0;
15377
+ }
15378
+ else if (qs.model.hparams.n_gqa() >= 7) {
15379
+ // The Llama 70B models have 8 heads sharing the same attn_v weights (-> GQA 8). As a result, the attn_v.weight tensor is
15380
+ // 8x smaller compared to attn_q.weight. Hence, we can get a nice boost in quantization accuracy with
15381
+ // nearly negligible increase in model size by quantizing this tensor with more bits.
15382
+ // That logic applies also to models like Yi 34B (-> GQA 7) and Mistral Large 123B (-> GQA 12).
15383
+ if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K) new_type = GGML_TYPE_Q5_K;
15384
+ }
15385
+ else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
15374
15386
new_type = (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2) ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
15375
15387
}
15376
15388
else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2)) {
@@ -15389,26 +15401,15 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
15389
15401
new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
15390
15402
}
15391
15403
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
15392
- else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) && qs.model.hparams.n_gqa() >= 4 ) {
15404
+ else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) && ( qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2) ) {
15393
15405
new_type = GGML_TYPE_Q5_K;
15394
15406
}
15395
15407
else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
15396
15408
use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) new_type = GGML_TYPE_Q6_K;
15397
15409
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && qs.i_attention_wv < 4) new_type = GGML_TYPE_Q5_K;
15398
- if (qs.model.type == MODEL_70B) {
15399
- // In the 70B model we have 8 heads sharing the same attn_v weights. As a result, the attn_v.weight tensor is
15400
- // 8x smaller compared to attn_q.weight. Hence, we can get a nice boost in quantization accuracy with
15401
- // nearly negligible increase in model size by quantizing this tensor with more bits:
15402
- if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K) new_type = GGML_TYPE_Q5_K;
15403
- }
15404
- if (qs.model.hparams.n_expert == 8) {
15405
- // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
15406
- // TODO: explore better strategies
15407
- new_type = GGML_TYPE_Q8_0;
15408
- }
15409
15410
++qs.i_attention_wv;
15410
15411
} else if (name.find("attn_k.weight") != std::string::npos) {
15411
- if (qs.model.hparams.n_expert = = 8) {
15412
+ if (qs.model.hparams.n_expert > = 8) {
15412
15413
// for the 8-expert model, bumping this to Q8_0 trades just ~128MB
15413
15414
// TODO: explore better strategies
15414
15415
new_type = GGML_TYPE_Q8_0;
0 commit comments