Skip to content

Commit b317dc9

Browse files
ludweegsoumith
authored andcommitted
Replace redundant module names (#475)
1 parent 015dc95 commit b317dc9

File tree

1 file changed

+10
-10
lines changed

1 file changed

+10
-10
lines changed

beginner_source/chatbot_tutorial.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -632,8 +632,8 @@ def batch2TrainData(voc, pair_batch):
632632
#
633633
# Finally, if passing a padded batch of sequences to an RNN module, we
634634
# must pack and unpack padding around the RNN pass using
635-
# ``torch.nn.utils.rnn.pack_padded_sequence`` and
636-
# ``torch.nn.utils.rnn.pad_packed_sequence`` respectively.
635+
# ``nn.utils.rnn.pack_padded_sequence`` and
636+
# ``nn.utils.rnn.pad_packed_sequence`` respectively.
637637
#
638638
# **Computation Graph:**
639639
#
@@ -679,11 +679,11 @@ def forward(self, input_seq, input_lengths, hidden=None):
679679
# Convert word indexes to embeddings
680680
embedded = self.embedding(input_seq)
681681
# Pack padded batch of sequences for RNN module
682-
packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, input_lengths)
682+
packed = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths)
683683
# Forward pass through GRU
684684
outputs, hidden = self.gru(packed, hidden)
685685
# Unpack padding
686-
outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(outputs)
686+
outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs)
687687
# Sum bidirectional GRU outputs
688688
outputs = outputs[:, :, :self.hidden_size] + outputs[:, : ,self.hidden_size:]
689689
# Return output and final hidden state
@@ -755,18 +755,18 @@ def forward(self, input_seq, input_lengths, hidden=None):
755755
#
756756

757757
# Luong attention layer
758-
class Attn(torch.nn.Module):
758+
class Attn(nn.Module):
759759
def __init__(self, method, hidden_size):
760760
super(Attn, self).__init__()
761761
self.method = method
762762
if self.method not in ['dot', 'general', 'concat']:
763763
raise ValueError(self.method, "is not an appropriate attention method.")
764764
self.hidden_size = hidden_size
765765
if self.method == 'general':
766-
self.attn = torch.nn.Linear(self.hidden_size, hidden_size)
766+
self.attn = nn.Linear(self.hidden_size, hidden_size)
767767
elif self.method == 'concat':
768-
self.attn = torch.nn.Linear(self.hidden_size * 2, hidden_size)
769-
self.v = torch.nn.Parameter(torch.FloatTensor(hidden_size))
768+
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
769+
self.v = nn.Parameter(torch.FloatTensor(hidden_size))
770770

771771
def dot_score(self, hidden, encoder_output):
772772
return torch.sum(hidden * encoder_output, dim=2)
@@ -1021,8 +1021,8 @@ def train(input_variable, lengths, target_variable, mask, max_target_len, encode
10211021
loss.backward()
10221022

10231023
# Clip gradients: gradients are modified in place
1024-
_ = torch.nn.utils.clip_grad_norm_(encoder.parameters(), clip)
1025-
_ = torch.nn.utils.clip_grad_norm_(decoder.parameters(), clip)
1024+
_ = nn.utils.clip_grad_norm_(encoder.parameters(), clip)
1025+
_ = nn.utils.clip_grad_norm_(decoder.parameters(), clip)
10261026

10271027
# Adjust model weights
10281028
encoder_optimizer.step()

0 commit comments

Comments
 (0)