We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 81e18a5 commit 812b863Copy full SHA for 812b863
recipes_source/recipes/tuning_guide.py
@@ -295,7 +295,7 @@ def fused_gelu(x):
295
torch._C._jit_set_autocast_mode(False)
296
297
with torch.no_grad(), torch.cpu.amp.autocast(cache_enabled=False, dtype=torch.bfloat16):
298
- # Conv-BatchNorm folding for CNN-based Vision Models should be done with torch.fx.experimental.optimization.fuse when AMP is used
+ # Conv-BatchNorm folding for CNN-based Vision Models should be done with ``torch.fx.experimental.optimization.fuse`` when AMP is used
299
import torch.fx.experimental.optimization as optimization
300
# Please note that optimization.fuse need not be called when AMP is not used
301
model = optimization.fuse(model)
0 commit comments