babylm commited on
Commit
8ae7c0e
1 Parent(s): a68b420

Upload modeling_ltgbert.py

Browse files
Files changed (1) hide show
  1. modeling_ltgbert.py +2 -2
modeling_ltgbert.py CHANGED
@@ -233,8 +233,8 @@ class Attention(nn.Module):
233
  if self.position_indices.size(0) < query_len:
234
  position_indices = torch.arange(query_len, dtype=torch.long).unsqueeze(1) \
235
  - torch.arange(query_len, dtype=torch.long).unsqueeze(0)
236
- position_indices = self.make_log_bucket_position(position_indices, self.position_bucket_size, 512)
237
- position_indices = self.position_bucket_size - 1 + position_indices
238
  self.position_indices = position_indices.to(hidden_states.device)
239
 
240
  hidden_states = self.pre_layer_norm(hidden_states)
 
233
  if self.position_indices.size(0) < query_len:
234
  position_indices = torch.arange(query_len, dtype=torch.long).unsqueeze(1) \
235
  - torch.arange(query_len, dtype=torch.long).unsqueeze(0)
236
+ position_indices = self.make_log_bucket_position(position_indices, self.config.position_bucket_size, 512)
237
+ position_indices = self.config.position_bucket_size - 1 + position_indices
238
  self.position_indices = position_indices.to(hidden_states.device)
239
 
240
  hidden_states = self.pre_layer_norm(hidden_states)