41
41
#
42
42
# %%bash
43
43
# pip install onnx
44
- # pip install onnxscript-preview # TODO: Replace by `onnxscript` when we get the name at pypi.org officially
44
+ # pip install onnxscript
45
45
#
46
46
# Once your environment is set up, let’s start modeling our image classifier with PyTorch,
47
47
# exactly like we did in the 60 Minute Blitz tutorial.
52
52
import torch .nn .functional as F
53
53
54
54
55
- class Net (nn .Module ):
55
+ class MyModel (nn .Module ):
56
56
57
57
def __init__ (self ):
58
- super (Net , self ).__init__ ()
58
+ super (MyModel , self ).__init__ ()
59
59
self .conv1 = nn .Conv2d (1 , 6 , 5 )
60
60
self .conv2 = nn .Conv2d (6 , 16 , 5 )
61
61
self .fc1 = nn .Linear (16 * 5 * 5 , 120 )
@@ -71,16 +71,16 @@ def forward(self, x):
71
71
x = self .fc3 (x )
72
72
return x
73
73
74
- net = Net ()
74
+ torch_model = MyModel ()
75
75
76
76
# Analogous to the 60 Minute Blitz tutorial, we need to create a random 32x32 input.
77
77
78
- input = torch .randn (1 , 1 , 32 , 32 )
78
+ torch_input = torch .randn (1 , 1 , 32 , 32 )
79
79
80
80
# That is all we need to export the model to ONNX format: a model instance and a dummy input.
81
81
# We can now export the model with the following code:
82
82
83
- export_output = torch .onnx .dynamo_export (net , input )
83
+ export_output = torch .onnx .dynamo_export (torch_model , torch_input )
84
84
85
85
# As we can see, we didn't need any code change on our model.
86
86
# The resulting ONNX model is saved within ``torch.onnx.ExportOutput`` as a binary protobuf file.
@@ -113,8 +113,8 @@ def forward(self, x):
113
113
114
114
# Adapt PyTorch input to ONNX format
115
115
116
- onnx_input = export_output .adapt_torch_inputs_to_onnx (input )
117
- print (f"Input legth : { len (onnx_input )} " )
116
+ onnx_input = export_output .adapt_torch_inputs_to_onnx (torch_input )
117
+ print (f"Input length : { len (onnx_input )} " )
118
118
print (f"Sample input: { onnx_input } " )
119
119
120
120
# in our example, the input is the same, but we can have more inputs
@@ -140,11 +140,23 @@ def to_numpy(tensor):
140
140
141
141
# Finally, we can execute the ONNX model with ONNX Runtime.
142
142
143
- onnxruntime_output = ort_session .run (None , onnxruntime_input )
143
+ onnxruntime_outputs = ort_session .run (None , onnxruntime_input )
144
144
145
145
# The output can be a single tensor or a list of tensors, depending on the model.
146
+ # Let's execute the PyTorch model and use it as benchmark next
147
+ torch_outputs = torch_model (torch_input )
146
148
147
- print (onnxruntime_output )
149
+ # We need to adapt the PyTorch output format to match ONNX's
150
+ torch_outputs = export_output .adapt_torch_outputs_to_onnx (torch_outputs )
151
+
152
+ # Now we can compare both results
153
+ assert len (torch_outputs ) == len (onnxruntime_outputs )
154
+ for torch_output , onnxruntime_output in zip (torch_outputs , onnxruntime_outputs ):
155
+ torch .testing .assert_close (torch_output , torch .tensor (onnxruntime_output ))
156
+
157
+ print ("PyTorch and ONNX Runtime output matched!" )
158
+ print (f"Output length: { len (onnxruntime_outputs )} " )
159
+ print (f"Sample output: { onnxruntime_outputs } " )
148
160
149
161
# That is about it! We have successfully exported our PyTorch model to ONNX format,
150
- # saved it to disk, and executed it with ONNX Runtime.
162
+ # saved it to disk, executed it with ONNX Runtime and compared its result with PyTorch's .
0 commit comments