Skip to content

Instantly share code, notes, and snippets.

View Siwensun's full-sized avatar

Shanlin Sun Siwensun

View GitHub Profile
@Siwensun
Siwensun / vit.py
Last active April 25, 2025 09:15
Vision Transformer All in One [Numpy]
import numpy as np
def get_len_mask(batch_size, seq_len):
"""Create attention mask (simplified for ViT where all patches are attended to)"""
# In ViT, typically all patches are attended to (no padding)
return np.zeros((batch_size, seq_len, seq_len), dtype=bool)
def pos_sinusoid_embedding(seq_len, d_model):
@Siwensun
Siwensun / transformer.py
Last active April 25, 2025 08:43
Transformer All in One [Numpy]
import numpy as np
def get_len_mask(batch_size, max_len, feat_lens):
"""Create padding mask"""
attn_mask = np.ones((batch_size, max_len, max_len))
for i in range(batch_size):
attn_mask[i, :, :feat_lens[i]] = 0
return attn_mask.astype(bool)