Skip to content

Commit 17d3714

Browse files
jukofyorkNexesenex
authored andcommitted
Fix glm4moe bug (ggml-org#15088)
1 parent 5ecb83a commit 17d3714

File tree

1 file changed

+0
-4
lines changed

1 file changed

+0
-4
lines changed

src/llama-model.cpp

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14209,10 +14209,6 @@ struct llm_build_glm4_moe : public llm_graph_context {
1420914209
LLM_FFN_SILU, LLM_FFN_PAR, il);
1421014210
cb(cur, "ffn_out", il);
1421114211
} else {
14212-
// MoE layer with shared experts
14213-
const int64_t n_expert = hparams.n_expert;
14214-
const int64_t n_expert_used = hparams.n_expert_used;
14215-
1421614212
// Process routed experts using existing MoE infrastructure
1421714213
ggml_tensor * routed_out = build_moe_ffn(cur,
1421814214
model.layers[il].ffn_gate_inp,

0 commit comments

Comments
 (0)