Skip to content

Commit f011eae

Browse files
committed
Merge remote-tracking branch 'upstream/master' into mp
2 parents dbf49e5 + b317dc9 commit f011eae

File tree

2 files changed

+12
-12
lines changed

2 files changed

+12
-12
lines changed

beginner_source/chatbot_tutorial.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -632,8 +632,8 @@ def batch2TrainData(voc, pair_batch):
632632
#
633633
# Finally, if passing a padded batch of sequences to an RNN module, we
634634
# must pack and unpack padding around the RNN pass using
635-
# ``torch.nn.utils.rnn.pack_padded_sequence`` and
636-
# ``torch.nn.utils.rnn.pad_packed_sequence`` respectively.
635+
# ``nn.utils.rnn.pack_padded_sequence`` and
636+
# ``nn.utils.rnn.pad_packed_sequence`` respectively.
637637
#
638638
# **Computation Graph:**
639639
#
@@ -679,11 +679,11 @@ def forward(self, input_seq, input_lengths, hidden=None):
679679
# Convert word indexes to embeddings
680680
embedded = self.embedding(input_seq)
681681
# Pack padded batch of sequences for RNN module
682-
packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, input_lengths)
682+
packed = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths)
683683
# Forward pass through GRU
684684
outputs, hidden = self.gru(packed, hidden)
685685
# Unpack padding
686-
outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(outputs)
686+
outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs)
687687
# Sum bidirectional GRU outputs
688688
outputs = outputs[:, :, :self.hidden_size] + outputs[:, : ,self.hidden_size:]
689689
# Return output and final hidden state
@@ -755,18 +755,18 @@ def forward(self, input_seq, input_lengths, hidden=None):
755755
#
756756

757757
# Luong attention layer
758-
class Attn(torch.nn.Module):
758+
class Attn(nn.Module):
759759
def __init__(self, method, hidden_size):
760760
super(Attn, self).__init__()
761761
self.method = method
762762
if self.method not in ['dot', 'general', 'concat']:
763763
raise ValueError(self.method, "is not an appropriate attention method.")
764764
self.hidden_size = hidden_size
765765
if self.method == 'general':
766-
self.attn = torch.nn.Linear(self.hidden_size, hidden_size)
766+
self.attn = nn.Linear(self.hidden_size, hidden_size)
767767
elif self.method == 'concat':
768-
self.attn = torch.nn.Linear(self.hidden_size * 2, hidden_size)
769-
self.v = torch.nn.Parameter(torch.FloatTensor(hidden_size))
768+
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
769+
self.v = nn.Parameter(torch.FloatTensor(hidden_size))
770770

771771
def dot_score(self, hidden, encoder_output):
772772
return torch.sum(hidden * encoder_output, dim=2)
@@ -1021,8 +1021,8 @@ def train(input_variable, lengths, target_variable, mask, max_target_len, encode
10211021
loss.backward()
10221022

10231023
# Clip gradients: gradients are modified in place
1024-
_ = torch.nn.utils.clip_grad_norm_(encoder.parameters(), clip)
1025-
_ = torch.nn.utils.clip_grad_norm_(decoder.parameters(), clip)
1024+
_ = nn.utils.clip_grad_norm_(encoder.parameters(), clip)
1025+
_ = nn.utils.clip_grad_norm_(decoder.parameters(), clip)
10261026

10271027
# Adjust model weights
10281028
encoder_optimizer.step()

beginner_source/dcgan_faces_tutorial.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -218,7 +218,7 @@
218218
# ----
219219
#
220220
# In this tutorial we will use the `Celeb-A Faces
221-
# dataset <https://mmlab.ie.cuhk.edu.hk/projects/CelebA.html>`__ which can
221+
# dataset <http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html>`__ which can
222222
# be downloaded at the linked site, or in `Google
223223
# Drive <https://drive.google.com/drive/folders/0B7EVK8r0v71pTUZsaXdaSnZBZzg>`__.
224224
# The dataset will download as a file named *img_align_celeba.zip*. Once
@@ -283,7 +283,7 @@
283283
#
284284
# From the DCGAN paper, the authors specify that all model weights shall
285285
# be randomly initialized from a Normal distribution with mean=0,
286-
# stdev=0.2. The ``weights_init`` function takes an initialized model as
286+
# stdev=0.02. The ``weights_init`` function takes an initialized model as
287287
# input and reinitializes all convolutional, convolutional-transpose, and
288288
# batch normalization layers to meet this criteria. This function is
289289
# applied to the models immediately after initialization.

0 commit comments

Comments
 (0)