Skip to content

Commit 5e10f77

Browse files
committed
Fixed pyspelling adding exceptions and fixing code-like words
Added ref detection files into ignore list
1 parent 2ee27d0 commit 5e10f77

File tree

3 files changed

+25
-16
lines changed

3 files changed

+25
-16
lines changed

.jenkins/validate_tutorials_built.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
"beginner_source/former_torchies/tensor_tutorial_old",
2222
"beginner_source/examples_autograd/polynomial_autograd",
2323
"beginner_source/examples_autograd/polynomial_custom_function",
24-
"beginner_source/t5_tutorial", # re-enable after this is fixed: https://github.com/pytorch/text/issues/1756
24+
"beginner_source/t5_tutorial", # re-enable after this is fixed: https://github.com/pytorch/text/issues/1756
2525
"intermediate_source/parametrizations",
2626
"intermediate_source/mnist_train_nas", # used by ax_multiobjective_nas_tutorial.py
2727
"intermediate_source/fx_conv_bn_fuser",
@@ -50,7 +50,12 @@
5050
"recipes_source/recipes/Captum_Recipe",
5151
"intermediate_source/flask_rest_api_tutorial",
5252
"intermediate_source/text_to_speech_with_torchaudio",
53-
"intermediate_source/tensorboard_profiler_tutorial" # reenable after 2.0 release.
53+
"intermediate_source/tensorboard_profiler_tutorial", # reenable after 2.0 release.
54+
"intermediate_source/engine", # helper file for torchvision_tutorial.py
55+
"intermediate_source/utils", # helper file for torchvision_tutorial.py
56+
"intermediate_source/coco_utils", # helper file for torchvision_tutorial.py
57+
"intermediate_source/coco_eval", # helper file for torchvision_tutorial.py
58+
"intermediate_source/transforms", # helper file for torchvision_tutorial.py
5459
]
5560

5661
def tutorial_source_dirs() -> List[Path]:

.pyspelling.yml

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,11 @@ spellchecker: aspell
22
matrix:
33
- name: python
44
sources:
5-
- beginner_source/*.py
6-
- intermediate_source/*.py
7-
- advanced_source/*.py
8-
- recipes_source/*/*.py
5+
# - beginner_source/*.py
6+
# - intermediate_source/*.py
7+
- intermediate_source/torchvision_tutorial.py
8+
# - advanced_source/*.py
9+
# - recipes_source/*/*.py
910
dictionary:
1011
wordlists:
1112
- en-wordlist.txt
@@ -45,6 +46,9 @@ matrix:
4546
- open: '\.\. (code-block|math)::.*$\n*'
4647
content: '(?P<first>(^(?P<indent>[ ]+).*$\n))(?P<other>(^([ \t]+.*|[ \t]*)$\n)*)'
4748
close: '(^(?![ \t]+.*$))'
49+
# Exclude torchvision terminology:
50+
- open: '(TorchVision|PennFudan|Fudan|mAP|RCNN|RPN|keypoint|uint8|rescaling|pre.*)'
51+
close: ''
4852
- pyspelling.filters.markdown:
4953
- pyspelling.filters.html:
5054
ignores:

intermediate_source/torchvision_tutorial.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
# - ``area``, float :class:`torch.Tensor` of shape ``[N]``: the area of the bounding box. This is used
4040
# during evaluation with the COCO metric, to separate the metric
4141
# scores between small, medium and large boxes.
42-
# - ``iscrowd``, uint8 :class:`torch.Tensor` of shape ``[N]``: instances with iscrowd=True will be
42+
# - ``iscrowd``, uint8 :class:`torch.Tensor` of shape ``[N]``: instances with ``iscrowd=True`` will be
4343
# ignored during evaluation.
4444
# - (optionally) ``masks``, :class:`torchvision.tv_tensors.Mask` of shape ``[N, H, W]``: the segmentation
4545
# masks for each one of the objects
@@ -104,8 +104,8 @@
104104
# for the given object detection and segmentation task.
105105
# Namely, image tensors will be wrapped by :class:`torchvision.tv_tensors.Image`, bounding boxes into
106106
# :class:`torchvision.tv_tensors.BoundingBoxes` and masks into :class:`torchvision.tv_tensors.Mask`.
107-
# As TVTensor are :class:`torch.Tensor` subclasses, wrapped objects are also tensors and inherit the plain
108-
# :class:`torch.Tensor` API. For more information about torchvision tv_tensors see
107+
# As ``torchvision.TVTensor`` are :class:`torch.Tensor` subclasses, wrapped objects are also tensors and inherit the plain
108+
# :class:`torch.Tensor` API. For more information about torchvision ``tv_tensors`` see
109109
# `this documentation <https://pytorch.org/vision/main/auto_examples/v2_transforms/plot_transforms_v2.html#sphx-glr-auto-examples-v2-transforms-plot-transforms-v2-py>`_.
110110

111111
import os
@@ -195,7 +195,7 @@ def __len__(self):
195195
#
196196
# There are two common
197197
# situations where one might want
198-
# to modify one of the available models in torchvision modelzoo. The first
198+
# to modify one of the available models in TorchVision Model Zoo. The first
199199
# is when we want to start from a pre-trained model, and just finetune the
200200
# last layer. The other is when we want to replace the backbone of the
201201
# model with a different one (for faster predictions, for example).
@@ -236,7 +236,7 @@ def __len__(self):
236236
# load a pre-trained model for classification and return
237237
# only the features
238238
backbone = torchvision.models.mobilenet_v2(weights="DEFAULT").features
239-
# FasterRCNN needs to know the number of
239+
# ``FasterRCNN`` needs to know the number of
240240
# output channels in a backbone. For mobilenet_v2, it's 1280
241241
# so we need to add it here
242242
backbone.out_channels = 1280
@@ -254,13 +254,13 @@ def __len__(self):
254254
# the size of the crop after rescaling.
255255
# if your backbone returns a Tensor, featmap_names is expected to
256256
# be [0]. More generally, the backbone should return an
257-
# OrderedDict[Tensor], and in featmap_names you can choose which
257+
# ``OrderedDict[Tensor]``, and in ``featmap_names`` you can choose which
258258
# feature maps to use.
259259
roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
260260
output_size=7,
261261
sampling_ratio=2)
262262

263-
# put the pieces together inside a FasterRCNN model
263+
# put the pieces together inside a Faster-RCNN model
264264
model = FasterRCNN(backbone,
265265
num_classes=2,
266266
rpn_anchor_generator=anchor_generator,
@@ -453,7 +453,7 @@ def get_transform(train):
453453
#
454454
# .. image:: ../../_static/img/tv_tutorial/tv_image07.png
455455
#
456-
# The results look pretty good!
456+
# The results look good!
457457
#
458458
# Wrapping up
459459
# -----------
@@ -465,7 +465,7 @@ def get_transform(train):
465465
# leveraged a Mask R-CNN model pre-trained on COCO train2017 in order to
466466
# perform transfer learning on this new dataset.
467467
#
468-
# For a more complete example, which includes multi-machine / multi-gpu
468+
# For a more complete example, which includes multi-machine / multi-GPU
469469
# training, check ``references/detection/train.py``, which is present in
470-
# the torchvision repo.
470+
# the torchvision repository.
471471
#

0 commit comments

Comments
 (0)