121
121
# vector. Note this implies immediately that the dimensionality of the
122
122
# target space of :math:`A` is :math:`|T|`.
123
123
#
124
+ #
125
+ # Prepare data:
124
126
125
127
def prepare_sequence (seq , to_ix ):
126
128
idxs = [to_ix [w ] for w in seq ]
@@ -144,6 +146,8 @@ def prepare_sequence(seq, to_ix):
144
146
EMBEDDING_DIM = 6
145
147
HIDDEN_DIM = 6
146
148
149
+ ######################################################################
150
+ # Create the model:
147
151
148
152
class LSTMTagger (nn .Module ):
149
153
@@ -176,6 +180,9 @@ def forward(self, sentence):
176
180
tag_scores = F .log_softmax (tag_space )
177
181
return tag_scores
178
182
183
+ ######################################################################
184
+ # Train the model:
185
+
179
186
model = LSTMTagger (EMBEDDING_DIM , HIDDEN_DIM , len (word_to_ix ), len (tag_to_ix ))
180
187
loss_function = nn .NLLLoss ()
181
188
optimizer = optim .SGD (model .parameters (), lr = 0.1 )
@@ -241,7 +248,8 @@ def forward(self, sentence):
241
248
#
242
249
# To get the character level representation, do an LSTM over the
243
250
# characters of a word, and let :math:`c_w` be the final hidden state of
244
- # this LSTM. Hints:
251
+ # this LSTM. Hints:
252
+ #
245
253
# * There are going to be two LSTM's in your new model.
246
254
# The original one that outputs POS tag scores, and the new one that
247
255
# outputs a character-level representation of each word.
0 commit comments