Skip to content

Commit 2db178e

Browse files
committed
[mobile] Mobile Perf Recipe
1 parent f7d7360 commit 2db178e

File tree

1 file changed

+188
-0
lines changed

1 file changed

+188
-0
lines changed

recipes_source/recipes/mobile_perf.py

Lines changed: 188 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,188 @@
1+
"""
2+
Pytorch Mobile Performance Guideline
3+
====================================
4+
5+
Introduction
6+
------------
7+
Performance (aka latency) is crucial to most, if not all,
8+
applications and use-cases of ML model inference on mobile devices.
9+
Today, PyTorch executes the models on the CPU backend pending availability
10+
of other hardware backends such as GPU, DSP, and NPU.
11+
12+
"""
13+
14+
######################################################################
15+
# Model preparation
16+
# -----------------
17+
#
18+
# Next recipes you can take (offline) while preparing the model
19+
# to have an optimized model that will probably have shorter execution time
20+
# (higher performance, lower latency) on the mobile device.
21+
22+
######################################################################
23+
# 1. Use torch.utils.mobile_optimizer
24+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25+
#
26+
# Torch mobile_optimizer package does several optimizations with the model,
27+
# which will help to conv2d and linear operations.
28+
# It pre-packs model weights in an optimized format and fuses ops above with relu
29+
# if it is the next operation.
30+
31+
from torch.utils.mobile_optimizer import optimize_for_mobile
32+
traced_model = torch.jit.load("input_model_path")
33+
optimized_model = optimize_for_mobile(traced_model)
34+
torch.jit.save(optimized_model, "output_model_path")
35+
36+
######################################################################
37+
# 2. Fuse operators using torch.quantization.fuse_modules
38+
# Do not be confused that fuse_modules is in the quantization package.
39+
# It works for all types of torch script modules.
40+
# `torch.quantization.fuse_modules<https://pytorch.org/docs/stable/quantization.html#torch.quantization.fuse_modules>`
41+
# fuses a list of modules into a single module.
42+
# It fuses only the following sequence of modules:
43+
#
44+
# - Convolution, Batch normalization
45+
# - Convolution, Batch normalization, Relu
46+
# - Convolution, Relu
47+
# - Linear, Relu
48+
#
49+
# This script shows how to fuse Batch Normalization in torchvision [mobileNetV2](https://github.com/pytorch/vision/blob/master/torchvision/models/mobilenet.py#L33).
50+
#
51+
# ConvBNReLU: nn.Sequential[nn.Conv2d, nn.BatchNorm2d, nn.ReLU]
52+
#
53+
# InvertedResidual: nn.Sequential[ConvBnReLU, ConvBNReLU, nn.Conv2d, nn.BatchNorm2d]
54+
#
55+
56+
import torch
57+
import torchvision
58+
59+
def fuse_model(model):
60+
for idx, m in enumerate(model.modules()):
61+
name = m._get_name();
62+
if name == 'ConvBNReLU':
63+
torch.quantization.fuse_modules(m, ['0', '1'], inplace=True)
64+
if name == 'InvertedResidual':
65+
for idx in range(len(m.conv)):
66+
if type(m.conv[idx]) == torch.nn.Conv2d:
67+
torch.quantization.fuse_modules(m.conv, [str(idx), str(idx + 1)], inplace=True)
68+
69+
m = torchvision.models.mobilenet_v2(pretrained=True)
70+
m.eval()
71+
fuse_model(m)
72+
torch.jit.trace(m, torch.rand(1, 3, 224, 224)).save("mobilenetV2-bnfused.pt")
73+
74+
######################################################################
75+
#
76+
# Quantization
77+
# ------------
78+
#
79+
80+
######################################################################
81+
# 3. Try a quantized version of your model
82+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
83+
#
84+
# You can find more about PyTorch quantization in `the dedicated tutorial<https://pytorch.org/blog/introduction-to-quantization-on-pytorch/>`.
85+
#
86+
# Quantization of the model not only moves computation to int8,
87+
# but also reduces the size of your model on a disk.
88+
# That size reduction helps to reduce disk read operations during the first load of the model and decreases the amount of RAM.
89+
# Both of those resources can be crucial for the performance of mobile applications.
90+
#
91+
92+
import torch
93+
import torchvision
94+
95+
supported_qengines = torch.backends.quantized.supported_engines
96+
print(supported_qengines)
97+
model = torchvision.models.quantization.__dict__['mobilenet_v2'](pretrained=True, quantize=True)
98+
torch.backends.quantized.engine='qnnpack'
99+
model.eval()
100+
script_model = torch.jit.script(model)
101+
x = torch.rand(1, 3 , 224, 224)
102+
y = script_model(x)
103+
torch.jit.save(script_model, 'mobilenetV2_quantized.pt')
104+
105+
106+
######################################################################
107+
# Android API Recommendations
108+
# ---------------------------
109+
#
110+
# 4. Android. Reusing tensors for forward.
111+
#
112+
# Memory is a critical resource for android performance, especially on old devices.
113+
# Tensors can need a significant amount of memory.
114+
# For example, standard computer vision tensor contains 1*3*224*224 elements,
115+
# assuming that data type is float and will need 588Kb of memory.
116+
117+
FloatBuffer buffer = Tensor.allocateFloatBuffer(1*3*224*224);
118+
Tensor tensor = Tensor.fromBlob(buffer, new long[]{1, 3, 224, 224});
119+
120+
121+
# Here we allocate native memory as ``java.nio.FloatBuffer`` and creating ``org.pytorch.Tensor`` which storage will be pointing to the memory of the allocated buffer.
122+
#
123+
# For most of the use cases, we do not do model forward only once, repeating it with some frequency or as fast as possible.
124+
#
125+
# If we are doing new memory allocation for every module forward - that will be suboptimal.
126+
# Instead of this, we can reuse the same memory that we allocated on the previous step, fill it with new data, and run module forward again on the same tensor object.
127+
#
128+
# You can check how it looks in code in [pytorch android application example](https://github.com/pytorch/android-demo-app/blob/master/PyTorchDemoApp/app/src/main/java/org/pytorch/demo/vision/ImageClassificationActivity.java#L174).
129+
130+
protected AnalysisResult analyzeImage(ImageProxy image, int rotationDegrees) {
131+
if (mModule == null) {
132+
mModule = Module.load(moduleFileAbsoluteFilePath);
133+
mInputTensorBuffer =
134+
Tensor.allocateFloatBuffer(3 * 224 * 224);
135+
mInputTensor = Tensor.fromBlob(mInputTensorBuffer, new long[]{1, 3, 224, 224});
136+
}
137+
138+
TensorImageUtils.imageYUV420CenterCropToFloatBuffer(
139+
image.getImage(), rotationDegrees,
140+
224, 224,
141+
TensorImageUtils.TORCHVISION_NORM_MEAN_RGB,
142+
TensorImageUtils.TORCHVISION_NORM_STD_RGB,
143+
mInputTensorBuffer, 0);
144+
145+
Tensor outputTensor = mModule.forward(IValue.from(mInputTensor)).toTensor();
146+
}
147+
148+
149+
# Member fields ``mModule``, ``mInputTensorBuffer`` and ``mInputTensor`` are initialized only once
150+
# and buffer is refilled using ``org.pytorch.torchvision.TensorImageUtils.imageYUV420CenterCropToFloatBuffer``.
151+
#
152+
#
153+
# Benchmarking
154+
# ------------
155+
#
156+
# The best way to benchmark (to check if optimizations helped your use case) - to measure your particular use case that you want to optimize, as performance behavior can vary in different environments.
157+
#
158+
# PyTorch distribution provides a way to benchmark naked binary that runs the model forward,
159+
# this approach can give more stable measurements rather than testing inside the application.
160+
# Android
161+
# -------
162+
# For this you first need to build benchmark binary:
163+
164+
<from-your-root-pytorch-dir>
165+
rm -rf build_android
166+
BUILD_PYTORCH_MOBILE=1 ANDROID_ABI=arm64-v8a ./scripts/build_android.sh -DBUILD_BINARY=ON
167+
168+
169+
# You should have arm64 binary at: ``build_android/bin/speed_benchmark_torch``.
170+
# This binary takes --model=<path-to-model>, --input_dim="1,3,224,224 as dimension information for the input and --input_type="float" as the type of the input as arguments.
171+
#
172+
# Once you have your android device connected, push speedbenchark_torch binary and your model to the phone:
173+
174+
adb push <speedbenchmark-torch> /data/local/tmp
175+
adb push <path-to-scripted-model> /data/local/tmp
176+
177+
178+
# Now we are ready to benchmark your model:
179+
#
180+
181+
adb shell "/data/local/tmp/speed_benchmark_torch --model="/data/local/tmp/model.pt" --input_dims="1,3,224,224" --input_type="float"
182+
----- output -----
183+
Starting benchmark.
184+
Running warmup runs.
185+
Main runs.
186+
Main run finished. Microseconds per iter: 121318. Iters per second: 8.24281
187+
188+

0 commit comments

Comments
 (0)