def scaled_dot_product_attention(Q, K, V):
  dim_k = Q.size(-1)
  scores = torch.bmm(Q, K.transpose(1, 2)) / sqrt(dim_k)
  weights = F.softmax(scores, dim=-1)
  return torch.bmm(weights, V)

 

from transformers import AutoConfig
from transformers import AutoTokenizer
import torch.nn.functional as F

model = "distilbert-base-uncased"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
tokenizer = AutoTokenizer.from_pretrained(model).to(device)

inputs = tokenizer(text, return_tensors='pt', add_special_tokens=False)
config = AutoConfig.from_pretrained(model)

 

class Positional_Embedding(nn.Module):
  def __init__(self, config):
    super().__init__()
    self.token_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
    self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
    self.layer_norm = nn.LayerNprm(config.hidden_size, eps=1e-12)
    self.dropout = nn.Dropout()

  def forward(self, input_ids):
    seq_len = input_ids.size(1)
    position_ids = torch.arange(seq_len, dtype=torch.long).unsqueeze(0)
    token_embeddings = self.token_embeddings(input_ids)
    position_embeddings = self.position_embeddings(position_ids)

    embeddings = token_embeddings + position_embeddings
    embeddings = self.layer_norm(embeddings)
    embeddings = self.dropout(embeddings)
    return embeddings​

 

class Attention_Head(nn.Module):
  def __init__(self, embed_dim, head_dim):
    super().__init__()
    self.q = nn.Linear(embed_dim, head_dim)
    self.k = nn.Linear(embed_dim, head_dim)
    self.v = nn.Linear(embed_dim, head_dim)

  def forward(self, hidden_state):
    attention_outputs = scaled_dot_product_attention(
        self.q(hidden_state), self.k(hidden_state), self.v(hidden_state))
    return attention_outputs



class Multi_Head_Attention(nn.Module):
  def __init__(self, config):
    super().__init__()
    embed_dim = config.hidden_size
    head_num = config.num_attention_heads
    head_dim = embed_dim // head_num

    self.heads = nn.ModuleList(
        [Attention_Head(embed_dim, head_dim) for _ in range(head_num)]
    )
    self.output_linear = nn.Linear(embed_dim, dembed_dim)

  def forward(self, hidden_state):
    x = torch.cat([h(hidden_state) for h in self.heads], dim=-1)
    x = self.output_linear(x)
    return x

 

class FeedForward(nn.Module):
  def __init__(self, config):
    super().__init__()
    self.linear_1 = nn.Linear(config.hidden_size, config.intermediate_size)
    self.linear_2 = nn.Linear(config.intermediate_size, config.hidden_size)
    self.gelu = nn.GELU()
    self.dropout = nn.Dropout(config.hidden_dropout_prob)

  def forward(self, x):
    x = self.linear_1(x)
    x = self.gelu(x)
    x = self.linear_2(x)
    x = self.dropout(x)
    return x

 

class Transformer_Encoder(nn.Module):
  def __init__(self, config):
    super().__init__()
    self.embeddings = Embeddings(config)
    self.layers = nn.ModuleList([TransformerEncoder(config)
                                for _ in range(config.num_hidden_layers)])
    self.layer_norm_1 = nn.LayerNorm(config.hidden_size)
    self.layer_norm_2 = nn.LayerNorm(config.hidden_size)
    self.attetion = Multi_Head_Attention(config)
    self.feed_forward = FeedForward(config)

  def forward(self, x):
    x = self.embeddings(x)
    for layer in self.Layers:
      x = Layer(x)
    hidden_state = self.layer_norm_1(x)
    x = x + self.attention(hidden_state)
    x = x + self.feed_forward(self.layer_norm_2(x))
    return x

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

+ Recent posts