62
62
# - Insert ``QuantStub`` and ``DeQuantStub`` at the beginning and end of the network.
63
63
# - Replace ReLU6 with ReLU
64
64
#
65
- # Note that this code is taken from
66
- # `here <https://github.com/pytorch/vision/blob/master/torchvision/models/mobilenet.py>`_
65
+ # Note: this code is taken from
66
+ # `here <https://github.com/pytorch/vision/blob/master/torchvision/models/mobilenet.py>`_.
67
67
68
68
from torch .quantization import QuantStub , DeQuantStub
69
69
@@ -306,7 +306,33 @@ def print_size_of_model(model):
306
306
# As our last major setup step, we define our dataloaders for our training and testing set.
307
307
# The specific dataset we've created for this tutorial contains just 1000 images, one from
308
308
# each class (this dataset, at just over 250 MB, is small enough that it can be downloaded
309
- # relatively easily). These functions mostly come from
309
+ # relatively easily). The URL for this custom dataset is:
310
+ #
311
+ # .. code::
312
+ #
313
+ # https://s3.amazonaws.com/pytorch-tutorial-assets/imagenet_1k.zip
314
+ #
315
+ # To download this data locally using Python, then, you could use:
316
+ #
317
+ # .. code:: python
318
+ #
319
+ # import requests
320
+ #
321
+ # url = 'https://s3.amazonaws.com/pytorch-tutorial-assets/imagenet_1k.zip`
322
+ # filename = '~/Downloads/imagenet_1k_data.zip'
323
+ #
324
+ # r = requests.get(url)
325
+ #
326
+ # with open(filename, 'wb') as f:
327
+ # f.write(r.content)
328
+ #
329
+ #
330
+ # For this tutorial to run, we download this data and move it to the right place using
331
+ # `these lines <https://github.com/pytorch/tutorials/blob/master/Makefile#L97-L98>`_
332
+ # from the `Makefile <https://github.com/pytorch/tutorials/blob/master/Makefile>`_.
333
+ #
334
+ # With the data downloaded, we show functions below that define dataloaders we'll use to read
335
+ # in this data. These functions mostly come from
310
336
# `here <https://github.com/pytorch/vision/blob/master/references/detection/train.py>`_.
311
337
312
338
def prepare_data_loaders (data_path ):
@@ -348,7 +374,8 @@ def prepare_data_loaders(data_path):
348
374
return data_loader , data_loader_test
349
375
350
376
######################################################################
351
- # Next, we'll load in the pre-trained MobileNetV2 model
377
+ # Next, we'll load in the pre-trained MobileNetV2 model. Similarly to the data about, the file with the pre-trained
378
+ # weights is stored at ``https://s3.amazonaws.com/pytorch-tutorial-assets/mobilenet_quantization.pth``:
352
379
353
380
data_path = 'data/imagenet_1k'
354
381
saved_model_dir = 'data/'
@@ -391,7 +418,7 @@ def prepare_data_loaders(data_path):
391
418
torch .jit .save (torch .jit .script (float_model ), saved_model_dir + scripted_float_model_file )
392
419
393
420
######################################################################
394
- # You should see 78% accuracy on 300 images, a solid baseline for ImageNet,
421
+ # We see 78% accuracy on 300 images, a solid baseline for ImageNet,
395
422
# especially considering our model is just 14.0 MB.
396
423
#
397
424
# This will be our baseline to compare to. Next, let's try different quantization methods
@@ -406,7 +433,8 @@ def prepare_data_loaders(data_path):
406
433
# data). These distributions are then used to determine how the specifically the different activations
407
434
# should be quantized at inference time (a simple technique would be to simply divide the entire range
408
435
# of activations into 256 levels, but we support more sophisticated methods as well). Importantly,
409
- # this additional step allows us to pass quantized values between operations instead of converting these values to floats - and then back to ints - between every operation, resulting in a significant speed-up.
436
+ # this additional step allows us to pass quantized values between operations instead of converting these
437
+ # values to floats - and then back to ints - between every operation, resulting in a significant speed-up.
410
438
411
439
num_calibration_batches = 10
412
440
@@ -442,7 +470,7 @@ def prepare_data_loaders(data_path):
442
470
print ('Evaluation accuracy on %d images, %2.2f' % (num_eval_batches * eval_batch_size , top1 .avg ))
443
471
444
472
######################################################################
445
- # For this quantized model, we see a significantly lower accuracy of just 62.33 % on these same 30
473
+ # For this quantized model, we see a significantly lower accuracy of just ~62 % on these same 300
446
474
# images. Nevertheless, we did reduce the size of our model down to just under 3.6 MB, almost a 4x
447
475
# decrease.
448
476
#
@@ -470,7 +498,7 @@ def prepare_data_loaders(data_path):
470
498
471
499
######################################################################
472
500
# Changing just this quantization configuration method resulted in an increase
473
- # of the accuracy to 74 %! Still, this is 4 % worse than the baseline of 78% achieved above.
501
+ # of the accuracy to over 76 %! Still, this is 1-2 % worse than the baseline of 78% achieved above.
474
502
# So lets try quantization aware training.
475
503
#
476
504
# 5. Quantization-aware training
0 commit comments