39
39
# - ``area``, float :class:`torch.Tensor` of shape ``[N]``: the area of the bounding box. This is used
40
40
# during evaluation with the COCO metric, to separate the metric
41
41
# scores between small, medium and large boxes.
42
- # - ``iscrowd``, uint8 :class:`torch.Tensor` of shape ``[N]``: instances with iscrowd=True will be
42
+ # - ``iscrowd``, uint8 :class:`torch.Tensor` of shape ``[N]``: instances with `` iscrowd=True`` will be
43
43
# ignored during evaluation.
44
44
# - (optionally) ``masks``, :class:`torchvision.tv_tensors.Mask` of shape ``[N, H, W]``: the segmentation
45
45
# masks for each one of the objects
104
104
# for the given object detection and segmentation task.
105
105
# Namely, image tensors will be wrapped by :class:`torchvision.tv_tensors.Image`, bounding boxes into
106
106
# :class:`torchvision.tv_tensors.BoundingBoxes` and masks into :class:`torchvision.tv_tensors.Mask`.
107
- # As TVTensor are :class:`torch.Tensor` subclasses, wrapped objects are also tensors and inherit the plain
108
- # :class:`torch.Tensor` API. For more information about torchvision tv_tensors see
107
+ # As ``torchvision. TVTensor`` are :class:`torch.Tensor` subclasses, wrapped objects are also tensors and inherit the plain
108
+ # :class:`torch.Tensor` API. For more information about torchvision `` tv_tensors`` see
109
109
# `this documentation <https://pytorch.org/vision/main/auto_examples/v2_transforms/plot_transforms_v2.html#sphx-glr-auto-examples-v2-transforms-plot-transforms-v2-py>`_.
110
110
111
111
import os
@@ -195,7 +195,7 @@ def __len__(self):
195
195
#
196
196
# There are two common
197
197
# situations where one might want
198
- # to modify one of the available models in torchvision modelzoo . The first
198
+ # to modify one of the available models in TorchVision Model Zoo . The first
199
199
# is when we want to start from a pre-trained model, and just finetune the
200
200
# last layer. The other is when we want to replace the backbone of the
201
201
# model with a different one (for faster predictions, for example).
@@ -236,7 +236,7 @@ def __len__(self):
236
236
# load a pre-trained model for classification and return
237
237
# only the features
238
238
backbone = torchvision .models .mobilenet_v2 (weights = "DEFAULT" ).features
239
- # FasterRCNN needs to know the number of
239
+ # `` FasterRCNN`` needs to know the number of
240
240
# output channels in a backbone. For mobilenet_v2, it's 1280
241
241
# so we need to add it here
242
242
backbone .out_channels = 1280
@@ -254,13 +254,13 @@ def __len__(self):
254
254
# the size of the crop after rescaling.
255
255
# if your backbone returns a Tensor, featmap_names is expected to
256
256
# be [0]. More generally, the backbone should return an
257
- # OrderedDict[Tensor], and in featmap_names you can choose which
257
+ # `` OrderedDict[Tensor]`` , and in `` featmap_names`` you can choose which
258
258
# feature maps to use.
259
259
roi_pooler = torchvision .ops .MultiScaleRoIAlign (featmap_names = ['0' ],
260
260
output_size = 7 ,
261
261
sampling_ratio = 2 )
262
262
263
- # put the pieces together inside a FasterRCNN model
263
+ # put the pieces together inside a Faster-RCNN model
264
264
model = FasterRCNN (backbone ,
265
265
num_classes = 2 ,
266
266
rpn_anchor_generator = anchor_generator ,
@@ -453,7 +453,7 @@ def get_transform(train):
453
453
#
454
454
# .. image:: ../../_static/img/tv_tutorial/tv_image07.png
455
455
#
456
- # The results look pretty good!
456
+ # The results look good!
457
457
#
458
458
# Wrapping up
459
459
# -----------
@@ -465,7 +465,7 @@ def get_transform(train):
465
465
# leveraged a Mask R-CNN model pre-trained on COCO train2017 in order to
466
466
# perform transfer learning on this new dataset.
467
467
#
468
- # For a more complete example, which includes multi-machine / multi-gpu
468
+ # For a more complete example, which includes multi-machine / multi-GPU
469
469
# training, check ``references/detection/train.py``, which is present in
470
- # the torchvision repo .
470
+ # the torchvision repository .
471
471
#
0 commit comments