-
Notifications
You must be signed in to change notification settings - Fork 301
[SmolLM3] Add Backbone, CausalLM + Converter for HuggingFace Weights #2327
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from 2 commits
f2dedc4
1d90715
e5a8f33
54191ca
1369733
2448d80
598fd74
b9e458d
6a53a7d
adb05d9
4d14120
a94acc9
2156a5f
2838c87
9619703
09c1dea
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,52 @@ | ||
from keras import ops | ||
from keras import random | ||
|
||
def rotate_half(x): | ||
x1 = x[..., : x.shape[-1] // 2] | ||
x2 = x[..., x.shape[-1] // 2 :] | ||
return ops.concatenate((-x2, x1), axis=-1) | ||
|
||
|
||
def apply_rotary_pos_emb(q, k, cos, sin, expansion_axis=1): | ||
cos = ops.expand_dims(cos, expansion_axis) | ||
sin = ops.expand_dims(sin, expansion_axis) | ||
q_embed = (q * cos) + (rotate_half(q) * sin) | ||
k_embed = (k * cos) + (rotate_half(k) * sin) | ||
return q_embed, k_embed | ||
|
||
|
||
def repeat_kv(hidden_states, n_rep): | ||
batch, num_key_value_heads, slen, head_dim = ops.shape(hidden_states) | ||
if n_rep == 1: | ||
return hidden_states | ||
hidden_states = ops.expand_dims(hidden_states, axis=2) | ||
target_shape = (batch, num_key_value_heads, n_rep, slen, head_dim) | ||
hidden_states = ops.broadcast_to(hidden_states, target_shape) | ||
return ops.reshape(hidden_states, [batch, num_key_value_heads * n_rep, slen, head_dim]) | ||
|
||
|
||
def eager_attention_forward( | ||
|
||
module, | ||
query, | ||
key, | ||
value, | ||
attention_mask, | ||
scaling: float, | ||
dropout: float = 0.0, | ||
DavidLandup0 marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
): | ||
key_states = repeat_kv(key, module.num_key_value_groups) | ||
value_states = repeat_kv(value, module.num_key_value_groups) | ||
|
||
attn_weights = ops.matmul(query, ops.transpose(key_states, axes=(0, 1, 3, 2))) * scaling | ||
|
||
# Apply attention mask if provided | ||
if attention_mask is not None: | ||
causal_mask = attention_mask[:, :, :, : ops.shape(key_states)[-2]] | ||
attn_weights = ops.add(attn_weights, causal_mask) | ||
|
||
attn_weights = ops.softmax(attn_weights, axis=-1) | ||
attn_weights = random.dropout(attn_weights, rate=dropout) | ||
DavidLandup0 marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
attn_output = ops.matmul(attn_weights, value_states) | ||
attn_output = ops.transpose(attn_output, axes=(0, 2, 1, 3)) | ||
|
||
return attn_output, attn_weights |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This implementation of
rotate_half
is susceptible to a known bug with XLA compilation on JAX when usingops.concatenate
, as noted inkeras_hub/src/layers/modeling/rotary_embedding.py
. To ensure backend compatibility and robustness, adopt the safer implementation pattern used inRotaryEmbedding
.