|
12345678910111213141516171819202122232425262728293031 |
- import torch
- import torch.nn.functional as F
-
-
- class Head(torch.nn.Module):
- def __init__(self, config):
- super(Head, self).__init__()
- self.embedding_dim = config['embedding_dim']
- self.fc1_in_dim = config['embedding_dim'] * 8
- self.fc2_in_dim = config['first_fc_hidden_dim']
- self.fc2_out_dim = config['second_fc_hidden_dim']
- self.use_cuda = True
- self.fc1 = torch.nn.Linear(self.fc1_in_dim, self.fc2_in_dim)
- self.fc2 = torch.nn.Linear(self.fc2_in_dim, self.fc2_out_dim)
- self.linear_out = torch.nn.Linear(self.fc2_out_dim, 1)
- self.dropout_rate = config['head_dropout']
- self.dropout = torch.nn.Dropout(self.dropout_rate)
-
- def forward(self, task_embed, gamma_1, beta_1, gamma_2, beta_2):
- hidden_1 = self.fc1(task_embed)
- hidden_1 = torch.mul(hidden_1, gamma_1) + beta_1
- hidden_1 = self.dropout(hidden_1)
- hidden_2 = F.relu(hidden_1)
-
- hidden_2 = self.fc2(hidden_2)
- hidden_2 = torch.mul(hidden_2, gamma_2) + beta_2
- hidden_2 = self.dropout(hidden_2)
- hidden_3 = F.relu(hidden_2)
-
- y_pred = self.linear_out(hidden_3)
- return y_pred
|