diff --git a/intermediate_source/torchvision_tutorial.rst b/intermediate_source/torchvision_tutorial.rst index 7e700f9bd1b..c82b8097e93 100644 --- a/intermediate_source/torchvision_tutorial.rst +++ b/intermediate_source/torchvision_tutorial.rst @@ -327,6 +327,30 @@ transformation: transforms.append(T.RandomHorizontalFlip(0.5)) return T.Compose(transforms) + +Testing ``forward()`` method (Optional) +--------------------------------------- + +Before iterating over the dataset, it's good to see what the model +expects during training and inference time on sample data. + +.. code:: python + + model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True) + dataset = PennFudanDataset('PennFudanPed', get_transform(train=True)) + data_loader = torch.utils.data.DataLoader( + dataset, batch_size=2, shuffle=True, num_workers=4, + collate_fn=utils.collate_fn) + # For Training + images,targets = next(iter(data_loader)) + images = list(image for image in images) + targets = [{k: v for k, v in t.items()} for t in targets] + output = model(images,targets) # Returns losses and detections + # For inference + model.eval() + x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + predictions = model(x) # Returns predictions + Let’s now write the main function which performs the training and the validation: