From 11b4186f1af00b3709d92f0cbb49e56481ec03b9 Mon Sep 17 00:00:00 2001 From: Juk Armstrong <69222624+jukofyork@users.noreply.github.com> Date: Tue, 5 Aug 2025 13:42:55 +0100 Subject: [PATCH] Fix `glm4moe` bug --- src/llama-model.cpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 44f89003b3917..f53fa2f878918 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -13800,10 +13800,6 @@ struct llm_build_glm4_moe : public llm_graph_context { LLM_FFN_SILU, LLM_FFN_PAR, il); cb(cur, "ffn_out", il); } else { - // MoE layer with shared experts - const int64_t n_expert = hparams.n_expert; - const int64_t n_expert_used = hparams.n_expert_used; - // Process routed experts using existing MoE infrastructure ggml_tensor * routed_out = build_moe_ffn(cur, model.layers[il].ffn_gate_inp,