Created
November 28, 2021 12:53
-
-
Save abhi1868sharma/be9a9a588c3c2b1abde4f431b9b5b480 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import torch | |
| from torch import nn | |
| import math | |
| class PositionalEncoding(nn.Module): | |
| "Implement the PE function." | |
| def __init__(self, d_model, max_len=5000): | |
| super().__init__() | |
| # Compute the positional encodings once in log space. | |
| pe = torch.zeros(max_len, d_model) | |
| position = torch.arange(0, max_len).unsqueeze(1) | |
| div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model)) | |
| pe[:, 0::2] = torch.sin(position * div_term) | |
| pe[:, 1::2] = torch.cos(position * div_term) | |
| pe = pe.unsqueeze(0) # adds a dimension to the specified position; It changes pe from (5000,512) to (1,5000,512) | |
| self.register_buffer('pe', pe) # save the state and stops optimizer from making updates; makes it deterministic; model cannot update these values | |
| def forward(self, x): | |
| x = torch.autograd.Variable(self.pe[:, :x.size(1)], requires_grad=False) # To make it equivalent to numpy function isolating the PE part | |
| return x | |
| x=torch.rand(1,10,512) | |
| pe_obj=PositionalEncoding(512) | |
| pe_torch=pe_obj.forward(x).numpy() # creates a pe of dimension (1,x.size(1),d_model) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment