Skip to content

Commit 2dac3e4

Browse files
author
Svetlana Karslioglu
authored
Pyspelling: Python intermediate tutorials A-M (#2287)
* Pyspelling: Python intermediate tutorials A-M
1 parent 8c1d408 commit 2dac3e4

17 files changed

+276
-182
lines changed

.pyspelling.yml

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,21 @@ matrix:
33
- name: python
44
sources:
55
- beginner_source/*.py
6+
- intermediate_source/autograd_saved_tensors_hooks_tutorial.py
7+
- intermediate_source/ax_multiobjective_nas_tutorial.py
8+
- intermediate_source/char_rnn_classification_tutorial.py
9+
- intermediate_source/char_rnn_generation_tutorial.py
10+
- intermediate_source/custom_function_conv_bn_tutorial.py
11+
- intermediate_source/ensembling.py
12+
#- intermediate_source/flask_rest_api_tutorial.py
13+
- intermediate_source/forward_ad_usage.py
14+
- intermediate_source/fx_conv_bn_fuser.py
15+
- intermediate_source/fx_profiling_tutorial.py
16+
- intermediate_source/jacobians_hessians.py
17+
- intermediate_source/mario_rl_tutorial.py
18+
- intermediate_source/mnist_train_nas.py
19+
- intermediate_source/memory_format_tutorial.py
20+
- intermediate_source/model_parallel_tutorial.py
621
dictionary:
722
wordlists:
823
- en-wordlist.txt

en-wordlist.txt

Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ APIs
22
Args
33
Autograd
44
BCE
5+
BN
56
BOS
67
Bahdanau
78
BatchNorm
@@ -10,76 +11,112 @@ CIFAR
1011
CLS
1112
CNNDM
1213
CNNs
14+
CPUs
1315
CUDA
1416
Chatbots
1517
Colab
1618
Conv
1719
ConvNet
20+
ConvNets
1821
DCGAN
1922
DCGANs
23+
DDQN
24+
DNN
2025
DataLoaders
2126
DeiT
27+
DenseNet
2228
EOS
29+
FC
2330
FGSM
2431
FLAVA
32+
FX
33+
FX's
2534
FloydHub
2635
FloydHub's
2736
GAN
2837
GANs
2938
GPUs
3039
GRU
3140
GRUs
41+
GeForce
3242
Goodfellow
3343
Goodfellow’s
3444
GreedySearchDecoder
45+
HVP
3546
Hugging Face
3647
IMDB
3748
ImageNet
3849
Initializations
3950
Iteratively
4051
JSON
52+
JVP
53+
Jacobian
54+
Kiuk
55+
Kubernetes
4156
Kuei
4257
LSTM
58+
LSTMs
4359
LeNet
4460
LeakyReLU
4561
LeakyReLUs
62+
Lua
4663
Luong
4764
MLP
65+
MLPs
4866
MNIST
4967
Mypy
68+
NAS
69+
NCHW
70+
NES
5071
NLP
5172
NaN
5273
NeurIPS
5374
NumPy
5475
Numericalization
5576
Numpy's
77+
OpenAI
78+
Plotly
79+
Prec
5680
Profiler
5781
PyTorch's
5882
RGB
83+
RL
5984
RNN
6085
RNNs
86+
RTX
6187
Radford
6288
ReLU
89+
ResNet
6390
SST2
91+
Sequentials
6492
Sigmoid
6593
SoTA
94+
TPU
6695
TensorBoard
6796
TextVQA
6897
Tokenization
6998
TorchMultimodal
7099
TorchScript
100+
TorchX
101+
Tunable
71102
Unescape
72103
VQA
73104
Wikitext
105+
Xeon
74106
accuracies
75107
activations
76108
adversarially
77109
al
110+
autodiff
111+
autograd
78112
backend
113+
backends
79114
backprop
115+
backpropagate
80116
backpropagated
81117
backpropagates
82118
backpropagation
119+
batchnorm
83120
batchnorm's
84121
benchmarking
85122
boolean
@@ -89,12 +126,15 @@ chatbot's
89126
checkpointing
90127
composable
91128
concat
129+
config
92130
contrastive
93131
conv
94132
convolutional
95133
cpu
96134
csv
135+
cuDNN
97136
datafile
137+
dataframe
98138
dataloader
99139
dataloaders
100140
datapipes
@@ -105,26 +145,43 @@ deserialize
105145
deserialized
106146
dir
107147
downsample
148+
downsamples
108149
embeddings
109150
encodings
151+
ensembling
110152
eq
111153
et
112154
evaluateInput
155+
extensibility
113156
fastai
114157
fbgemm
115158
feedforward
116159
finetune
117160
finetuning
161+
fp
162+
functorch
163+
fuser
164+
grayscale
165+
hardcode
118166
helpdesk
119167
helpdesks
168+
hessian
169+
hessians
170+
hvp
120171
hyperparameter
121172
hyperparameters
122173
imagenet
174+
initializations
175+
inlined
176+
interpretable
123177
io
124178
iterable
125179
iteratively
180+
jacobian
181+
jacobians
126182
jit
127183
jpg
184+
kwargs
128185
labelled
129186
learnable
130187
loadFilename
@@ -139,6 +196,7 @@ modularity
139196
modularized
140197
multimodal
141198
multimodality
199+
multiobjective
142200
multithreaded
143201
namespace
144202
natively
@@ -153,56 +211,78 @@ overfitting
153211
parallelizable
154212
parallelization
155213
perceptibility
214+
pipelining
215+
pointwise
216+
precomputing
156217
prepend
157218
preprocess
158219
preprocessing
220+
prespecified
159221
pretrained
160222
prewritten
223+
primals
161224
profiler
162225
profilers
163226
pytorch
164227
quantized
165228
quantizing
229+
queryable
166230
randint
167231
readably
168232
reinitializes
169233
relu
170234
reproducibility
171235
rescale
236+
resnet
237+
restride
172238
rewinded
239+
romanized
240+
runnable
173241
runtime
174242
runtime
175243
runtimes
244+
scalable
176245
softmax
177246
src
178247
stacktrace
179248
stateful
180249
storages
181250
strided
182251
subclasses
252+
subclassing
183253
subdirectories
184254
submodule
255+
subreddit
185256
summarization
186257
tanh
187258
th
188259
thresholding
260+
timestep
261+
timesteps
189262
tokenization
190263
tokenize
191264
tokenizer
192265
torchaudio
193266
torchdata
267+
torchscriptable
194268
torchtext
195269
torchtext's
196270
torchvision
271+
torchviz
197272
traceback
198273
tradeoff
274+
tradeoffs
199275
uncomment
200276
uncommented
277+
unfused
201278
unimodal
202279
unnormalized
203280
unpickling
204281
utils
282+
vectorization
283+
vectorize
205284
vectorized
285+
vhp
206286
voc
207287
walkthrough
208288
warmstart

intermediate_source/autograd_saved_tensors_hooks_tutorial.py

Lines changed: 15 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
"""
22
Hooks for autograd saved tensors
3-
=======================
3+
================================
44
55
"""
66

@@ -13,8 +13,7 @@
1313
# packing/unpacking process.
1414
#
1515
# This tutorial assumes you are familiar with how backpropagation works in
16-
# theory. If not, read this first:
17-
# https://colab.research.google.com/drive/1aWNdmYt7RcHMbUk-Xz2Cv5-cGFSWPXe0#scrollTo=AHcEJ6nXUb7W
16+
# theory. If not, read `this <https://colab.research.google.com/drive/1aWNdmYt7RcHMbUk-Xz2Cv5-cGFSWPXe0#scrollTo=AHcEJ6nXUb7W>`_ first.
1817
#
1918

2019

@@ -107,7 +106,7 @@ def f(x):
107106

108107
######################################################################
109108
# In the example above, executing without grad would only have kept ``x``
110-
# and ``y`` in the scope, But the graph additionnally stores ``f(x)`` and
109+
# and ``y`` in the scope, But the graph additionally stores ``f(x)`` and
111110
# ``f(f(x))``. Hence, running a forward pass during training will be more
112111
# costly in memory usage than during evaluation (more precisely, when
113112
# autograd is not required).
@@ -182,7 +181,7 @@ def unpack_hook(x):
182181

183182

184183
######################################################################
185-
# The ``pack_hook`` function will be called everytime an operation saves
184+
# The ``pack_hook`` function will be called every time an operation saves
186185
# a tensor for backward.
187186
# The output of ``pack_hook`` is then stored in the computation graph
188187
# instead of the original tensor.
@@ -218,8 +217,9 @@ def unpack_hook(x):
218217
#
219218

220219
######################################################################
221-
# **Returning and int**
222-
220+
# Returning an ``int``
221+
# ^^^^^^^^^^^^^^^^^^^^
222+
#
223223
# Returning the index of a Python list
224224
# Relatively harmless but with debatable usefulness
225225

@@ -240,8 +240,9 @@ def unpack(x):
240240
assert(x.grad.equal(2 * x))
241241

242242
######################################################################
243-
# **Returning a tuple**
244-
243+
# Returning a tuple
244+
# ^^^^^^^^^^^^^^^^^
245+
#
245246
# Returning some tensor and a function how to unpack it
246247
# Quite unlikely to be useful in its current form
247248

@@ -262,9 +263,10 @@ def unpack(packed):
262263
assert(torch.allclose(x.grad, 2 * x))
263264

264265
######################################################################
265-
# **Returning a str**
266-
267-
# Returning the __repr__ of the tensor
266+
# Returning a ``str``
267+
# ^^^^^^^^^^^^^^^^^^^
268+
#
269+
# Returning the ``__repr__ of`` the tensor
268270
# Probably never do this
269271

270272
x = torch.randn(5, requires_grad=True)
@@ -337,7 +339,7 @@ def forward(self, x):
337339

338340

339341
######################################################################
340-
# In practice, on a A100 GPU, for a resnet-152 with batch size 256, this
342+
# In practice, on a A100 GPU, for a ResNet-152 with batch size 256, this
341343
# corresponds to a GPU memory usage reduction from 48GB to 5GB, at the
342344
# cost of a 6x slowdown.
343345
#

0 commit comments

Comments
 (0)