1
1
"""
2
- Exporting a Model from PyTorch to ONNX and Running it using ONNX Runtime
2
+ 4. (optional) Exporting a Model from PyTorch to ONNX and Running it using ONNX Runtime
3
3
========================================================================
4
4
5
5
In this tutorial, we describe how to convert a model defined
37
37
# and is widely used in image processing or video editing. For this
38
38
# tutorial, we will use a small super-resolution model.
39
39
#
40
- # First, let's create a SuperResolution model in PyTorch.
40
+ # First, let's create a SuperResolution model in PyTorch.
41
41
# This model uses the efficient sub-pixel convolution layer described in
42
42
# `"Real-Time Single Image and Video Super-Resolution Using an Efficient
43
43
# Sub-Pixel Convolutional Neural Network" - Shi et al <https://arxiv.org/abs/1609.05158>`__
44
44
# for increasing the resolution of an image by an upscale factor.
45
45
# The model expects the Y component of the YCbCr of an image as an input, and
46
- # outputs the upscaled Y component in super resolution.
47
- #
46
+ # outputs the upscaled Y component in super resolution.
47
+ #
48
48
# `The
49
49
# model <https://github.com/pytorch/examples/blob/master/super_resolution/model.py>`__
50
50
# comes directly from PyTorch's examples without modification:
@@ -94,7 +94,7 @@ def _initialize_weights(self):
94
94
# It is important to call ``torch_model.eval()`` or ``torch_model.train(False)``
95
95
# before exporting the model, to turn the model to inference mode.
96
96
# This is required since operators like dropout or batchnorm behave
97
- # differently in inference and training mode.
97
+ # differently in inference and training mode.
98
98
#
99
99
100
100
# Load pretrained model weights
@@ -113,7 +113,7 @@ def _initialize_weights(self):
113
113
114
114
######################################################################
115
115
# Exporting a model in PyTorch works via tracing or scripting. This
116
- # tutorial will use as an example a model exported by tracing.
116
+ # tutorial will use as an example a model exported by tracing.
117
117
# To export a model, we call the ``torch.onnx.export()`` function.
118
118
# This will execute the model, recording a trace of what operators
119
119
# are used to compute the outputs.
@@ -124,10 +124,10 @@ def _initialize_weights(self):
124
124
# all the input's dimensions, unless specified as a dynamic axes.
125
125
# In this example we export the model with an input of batch_size 1,
126
126
# but then specify the first dimension as dynamic in the ``dynamic_axes``
127
- # parameter in ``torch.onnx.export()``.
127
+ # parameter in ``torch.onnx.export()``.
128
128
# The exported model will thus accept inputs of size [batch_size, 1, 224, 224]
129
- # where batch_size can be variable.
130
- #
129
+ # where batch_size can be variable.
130
+ #
131
131
# To learn more details about PyTorch's export interface, check out the
132
132
# `torch.onnx documentation <https://pytorch.org/docs/master/onnx.html>`__.
133
133
#
@@ -238,7 +238,7 @@ def to_numpy(tensor):
238
238
# Then we split the image into its Y, Cb, and Cr components.
239
239
# These components represent a greyscale image (Y), and
240
240
# the blue-difference (Cb) and red-difference (Cr) chroma components.
241
- # The Y component being more sensitive to the human eye, we are
241
+ # The Y component being more sensitive to the human eye, we are
242
242
# interested in this component which we will be transforming.
243
243
# After extracting the Y component, we convert it to a tensor which
244
244
# will be the input of our model.
@@ -278,7 +278,7 @@ def to_numpy(tensor):
278
278
# The post-processing steps have been adopted from PyTorch
279
279
# implementation of super-resolution model
280
280
# `here <https://github.com/pytorch/examples/blob/master/super_resolution/super_resolve.py>`__.
281
- #
281
+ #
282
282
283
283
img_out_y = Image .fromarray (np .uint8 ((img_out_y [0 ] * 255.0 ).clip (0 , 255 )[0 ]), mode = 'L' )
284
284
@@ -301,12 +301,12 @@ def to_numpy(tensor):
301
301
#
302
302
# ONNX Runtime being a cross platform engine, you can run it across
303
303
# multiple platforms and on both CPUs and GPUs.
304
- #
304
+ #
305
305
# ONNX Runtime can also be deployed to the cloud for model inferencing
306
306
# using Azure Machine Learning Services. More information `here <https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-onnx>`__.
307
307
#
308
308
# More information about ONNX Runtime's performance `here <https://github.com/microsoft/onnxruntime#high-performance>`__.
309
309
#
310
- #
310
+ #
311
311
# For more information about ONNX Runtime `here <https://github.com/microsoft/onnxruntime>`__.
312
312
#
0 commit comments