Skip to content

Commit 9719a22

Browse files
committed
Break codeblock in sequence_models_tutorial.py into a few cells
1 parent db490c0 commit 9719a22

File tree

1 file changed

+9
-1
lines changed

1 file changed

+9
-1
lines changed

beginner_source/nlp/sequence_models_tutorial.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,8 @@
121121
# vector. Note this implies immediately that the dimensionality of the
122122
# target space of :math:`A` is :math:`|T|`.
123123
#
124+
#
125+
# Prepare data:
124126

125127
def prepare_sequence(seq, to_ix):
126128
idxs = [to_ix[w] for w in seq]
@@ -144,6 +146,8 @@ def prepare_sequence(seq, to_ix):
144146
EMBEDDING_DIM = 6
145147
HIDDEN_DIM = 6
146148

149+
######################################################################
150+
# Create the model:
147151

148152
class LSTMTagger(nn.Module):
149153

@@ -176,6 +180,9 @@ def forward(self, sentence):
176180
tag_scores = F.log_softmax(tag_space)
177181
return tag_scores
178182

183+
######################################################################
184+
# Train the model:
185+
179186
model = LSTMTagger(EMBEDDING_DIM, HIDDEN_DIM, len(word_to_ix), len(tag_to_ix))
180187
loss_function = nn.NLLLoss()
181188
optimizer = optim.SGD(model.parameters(), lr=0.1)
@@ -241,7 +248,8 @@ def forward(self, sentence):
241248
#
242249
# To get the character level representation, do an LSTM over the
243250
# characters of a word, and let :math:`c_w` be the final hidden state of
244-
# this LSTM. Hints:
251+
# this LSTM. Hints:
252+
#
245253
# * There are going to be two LSTM's in your new model.
246254
# The original one that outputs POS tag scores, and the new one that
247255
# outputs a character-level representation of each word.

0 commit comments

Comments
 (0)