@@ -4494,7 +4494,7 @@ static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec(ggml_backend_vk_context *
4494
4494
4495
4495
// heuristic to choose workgroup size
4496
4496
uint32_t dmmv_wg = DMMV_WG_SIZE_SUBGROUP;
4497
- if (ctx->device->vendor_id == VK_VENDOR_ID_NVIDIA || ctx->device->vendor_id == VK_VENDOR_ID_INTEL ) {
4497
+ if (ctx->device->vendor_id == VK_VENDOR_ID_NVIDIA) {
4498
4498
// Prefer larger workgroups when M is small, to spread the work out more
4499
4499
// and keep more SMs busy.
4500
4500
// q6_k seems to prefer small workgroup size even for "medium" values of M.
@@ -4510,7 +4510,7 @@ static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec(ggml_backend_vk_context *
4510
4510
}
4511
4511
4512
4512
if (b_type == GGML_TYPE_Q8_1) {
4513
- return ctx->device->pipeline_dequant_mul_mat_vec_q8_1_f32[DMMV_WG_SIZE_SUBGROUP ][a_type][num_cols-1];
4513
+ return ctx->device->pipeline_dequant_mul_mat_vec_q8_1_f32[dmmv_wg ][a_type][num_cols-1];
4514
4514
}
4515
4515
4516
4516
return b_type == GGML_TYPE_F32 ? ctx->device->pipeline_dequant_mul_mat_vec_f32_f32[dmmv_wg][a_type][num_cols-1] : ctx->device->pipeline_dequant_mul_mat_vec_f16_f32[dmmv_wg][a_type][num_cols-1];
@@ -5939,7 +5939,12 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context&
5939
5939
}
5940
5940
}
5941
5941
if (quantize_y) {
5942
- ggml_vk_quantize_q8_1(ctx, subctx, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE }, y_ne * ne12 * ne13, true);
5942
+ if (ctx->prealloc_y_last_pipeline_used != to_q8_1.get() ||
5943
+ ctx->prealloc_y_last_tensor_used != src1) {
5944
+ ggml_vk_quantize_q8_1(ctx, subctx, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE }, y_ne * ne12 * ne13, true);
5945
+ ctx->prealloc_y_last_pipeline_used = to_q8_1.get();
5946
+ ctx->prealloc_y_last_tensor_used = src1;
5947
+ }
5943
5948
}
5944
5949
5945
5950
// For batch_n, the A matrix is the same for each batch, and B/D use the row stride as the batch stride
0 commit comments