Skip to content

Commit a215d58

Browse files
authored
Merge branch 'master' into patch-1
2 parents 1b97736 + f35c04a commit a215d58

File tree

4 files changed

+5
-5
lines changed

4 files changed

+5
-5
lines changed

advanced_source/cpp_extension.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1101,7 +1101,7 @@ on it:
11011101
const int threads = 1024;
11021102
const dim3 blocks((state_size + threads - 1) / threads, batch_size);
11031103
1104-
AT_DISPATCH_FLOATING_TYPES(X.type(), "lltm_forward_cuda", ([&] {
1104+
AT_DISPATCH_FLOATING_TYPES(X.type(), "lltm_backward_cuda", ([&] {
11051105
lltm_cuda_backward_kernel<scalar_t><<<blocks, threads>>>(
11061106
d_old_cell.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
11071107
d_gates.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),

advanced_source/super_resolution_with_onnxruntime.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ def _initialize_weights(self):
145145
do_constant_folding=True, # whether to execute constant folding for optimization
146146
input_names = ['input'], # the model's input names
147147
output_names = ['output'], # the model's output names
148-
dynamic_axes={'input' : {0 : 'batch_size'}, # variable lenght axes
148+
dynamic_axes={'input' : {0 : 'batch_size'}, # variable length axes
149149
'output' : {0 : 'batch_size'}})
150150

151151
######################################################################

beginner_source/dcgan_faces_tutorial.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -610,10 +610,10 @@ def forward(self, input):
610610
output = netD(fake.detach()).view(-1)
611611
# Calculate D's loss on the all-fake batch
612612
errD_fake = criterion(output, label)
613-
# Calculate the gradients for this batch
613+
# Calculate the gradients for this batch, accumulated (summed) with previous gradients
614614
errD_fake.backward()
615615
D_G_z1 = output.mean().item()
616-
# Add the gradients from the all-real and all-fake batches
616+
# Compute error of D as sum over the fake and the real batches
617617
errD = errD_real + errD_fake
618618
# Update D
619619
optimizerD.step()

intermediate_source/reinforcement_q_learning.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ def __len__(self):
134134

135135

136136
######################################################################
137-
# Now, let's define our model. But first, let quickly recap what a DQN is.
137+
# Now, let's define our model. But first, let's quickly recap what a DQN is.
138138
#
139139
# DQN algorithm
140140
# -------------

0 commit comments

Comments
 (0)