44
44
# - ``PIL``, ``PIL.Image``, ``matplotlib.pyplot`` (load and display
45
45
# images)
46
46
# - ``torchvision.transforms`` (transform PIL images into tensors)
47
- # - ``torchvision.models`` (train or load pre-trained models)
47
+ # - ``torchvision.models`` (train or load pretrained models)
48
48
# - ``copy`` (to deep copy the models; system package)
49
49
50
50
from __future__ import print_function
84
84
# torch library are trained with tensor values ranging from 0 to 1. If you
85
85
# try to feed the networks with 0 to 255 tensor images, then the activated
86
86
# feature maps will be unable to sense the intended content and style.
87
- # However, pre-trained networks from the Caffe library are trained with 0
87
+ # However, pretrained networks from the Caffe library are trained with 0
88
88
# to 255 tensor images.
89
89
#
90
90
#
96
96
# with name ``images`` in your current working directory.
97
97
98
98
# desired size of the output image
99
- imsize = 512 if torch .cuda .is_available () else 128 # use small size if no gpu
99
+ imsize = 512 if torch .cuda .is_available () else 128 # use small size if no GPU
100
100
101
101
loader = transforms .Compose ([
102
102
transforms .Resize (imsize ), # scale imported image
@@ -220,7 +220,7 @@ def gram_matrix(input):
220
220
# b=number of feature maps
221
221
# (c,d)=dimensions of a f. map (N=c*d)
222
222
223
- features = input .view (a * b , c * d ) # resise F_XL into \hat F_XL
223
+ features = input .view (a * b , c * d ) # resize F_XL into \hat F_XL
224
224
225
225
G = torch .mm (features , features .t ()) # compute the gram product
226
226
@@ -251,7 +251,7 @@ def forward(self, input):
251
251
# Importing the Model
252
252
# -------------------
253
253
#
254
- # Now we need to import a pre-trained neural network. We will use a 19
254
+ # Now we need to import a pretrained neural network. We will use a 19
255
255
# layer VGG network like the one used in the paper.
256
256
#
257
257
# PyTorch’s implementation of VGG is a module divided into two child
@@ -277,7 +277,7 @@ def forward(self, input):
277
277
cnn_normalization_std = torch .tensor ([0.229 , 0.224 , 0.225 ]).to (device )
278
278
279
279
# create a module to normalize input image so we can easily put it in a
280
- # nn.Sequential
280
+ # `` nn.Sequential``
281
281
class Normalization (nn .Module ):
282
282
def __init__ (self , mean , std ):
283
283
super (Normalization , self ).__init__ ()
@@ -288,14 +288,14 @@ def __init__(self, mean, std):
288
288
self .std = torch .tensor (std ).view (- 1 , 1 , 1 )
289
289
290
290
def forward (self , img ):
291
- # normalize img
291
+ # normalize `` img``
292
292
return (img - self .mean ) / self .std
293
293
294
294
295
295
######################################################################
296
296
# A ``Sequential`` module contains an ordered list of child modules. For
297
- # instance, ``vgg19.features`` contains a sequence (Conv2d, ReLU, MaxPool2d,
298
- # Conv2d, ReLU…) aligned in the right order of depth. We need to add our
297
+ # instance, ``vgg19.features`` contains a sequence (`` Conv2d``, `` ReLU``, `` MaxPool2d`` ,
298
+ # `` Conv2d``, `` ReLU`` …) aligned in the right order of depth. We need to add our
299
299
# content loss and style loss layers immediately after the convolution
300
300
# layer they are detecting. To do this we must create a new ``Sequential``
301
301
# module that has content loss and style loss modules correctly inserted.
@@ -312,12 +312,12 @@ def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
312
312
# normalization module
313
313
normalization = Normalization (normalization_mean , normalization_std ).to (device )
314
314
315
- # just in order to have an iterable access to or list of content/syle
315
+ # just in order to have an iterable access to or list of content/style
316
316
# losses
317
317
content_losses = []
318
318
style_losses = []
319
319
320
- # assuming that cnn is a nn.Sequential, so we make a new nn.Sequential
320
+ # assuming that `` cnn`` is a `` nn.Sequential`` , so we make a new `` nn.Sequential``
321
321
# to put in modules that are supposed to be activated sequentially
322
322
model = nn .Sequential (normalization )
323
323
@@ -328,8 +328,8 @@ def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
328
328
name = 'conv_{}' .format (i )
329
329
elif isinstance (layer , nn .ReLU ):
330
330
name = 'relu_{}' .format (i )
331
- # The in-place version doesn't play very nicely with the ContentLoss
332
- # and StyleLoss we insert below. So we replace with out-of-place
331
+ # The in-place version doesn't play very nicely with the `` ContentLoss``
332
+ # and `` StyleLoss`` we insert below. So we replace with out-of-place
333
333
# ones here.
334
334
layer = nn .ReLU (inplace = False )
335
335
elif isinstance (layer , nn .MaxPool2d ):
@@ -371,8 +371,11 @@ def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
371
371
#
372
372
373
373
input_img = content_img .clone ()
374
- # if you want to use white noise instead uncomment the below line:
375
- # input_img = torch.randn(content_img.data.size(), device=device)
374
+ # if you want to use white noise by using the following code:
375
+ #
376
+ # ::
377
+ #
378
+ # input_img = torch.randn(content_img.data.size(), device=device)
376
379
377
380
# add the original input image to the figure:
378
381
plt .figure ()
@@ -385,7 +388,7 @@ def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
385
388
#
386
389
# As Leon Gatys, the author of the algorithm, suggested `here <https://discuss.pytorch.org/t/pytorch-tutorial-for-neural-transfert-of-artistic-style/336/20?u=alexis-jacq>`__, we will use
387
390
# L-BFGS algorithm to run our gradient descent. Unlike training a network,
388
- # we want to train the input image in order to minimise the content/style
391
+ # we want to train the input image in order to minimize the content/style
389
392
# losses. We will create a PyTorch L-BFGS optimizer ``optim.LBFGS`` and pass
390
393
# our image to it as the tensor to optimize.
391
394
#
@@ -400,7 +403,7 @@ def get_input_optimizer(input_img):
400
403
# Finally, we must define a function that performs the neural transfer. For
401
404
# each iteration of the networks, it is fed an updated input and computes
402
405
# new losses. We will run the ``backward`` methods of each loss module to
403
- # dynamicaly compute their gradients. The optimizer requires a “closure”
406
+ # dynamically compute their gradients. The optimizer requires a “closure”
404
407
# function, which reevaluates the module and returns the loss.
405
408
#
406
409
# We still have one final constraint to address. The network may try to
0 commit comments