From 27578191b87573c1c74eb64adc02db36e5238c80 Mon Sep 17 00:00:00 2001 From: abhijit deo <167164474+deo-abhijit@users.noreply.github.com> Date: Tue, 24 Sep 2024 11:59:24 +0530 Subject: [PATCH] Update dynamic_quantization.py --- recipes_source/recipes/dynamic_quantization.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/recipes_source/recipes/dynamic_quantization.py b/recipes_source/recipes/dynamic_quantization.py index eb9605d0c63..e69d7bfd02e 100644 --- a/recipes_source/recipes/dynamic_quantization.py +++ b/recipes_source/recipes/dynamic_quantization.py @@ -162,7 +162,7 @@ def forward(self,inputs,hidden): # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Now we get to the fun part. First we create an instance of the model -# called ``float\_lstm`` then we are going to quantize it. We're going to use +# called ``float_lstm`` then we are going to quantize it. We're going to use # the `torch.quantization.quantize_dynamic `__ function, which takes the model, then a list of the submodules # which we want to # have quantized if they appear, then the datatype we are targeting. This