Skip to content

Commit 7baf4b5

Browse files
authored
Merge branch 'main' into add_amx_doc
2 parents d3d0aae + d9938ee commit 7baf4b5

File tree

3 files changed

+29
-7
lines changed

3 files changed

+29
-7
lines changed

conf.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434
import pytorch_sphinx_theme
3535
import torch
3636
import glob
37+
import random
3738
import shutil
3839
from custom_directives import IncludeDirective, GalleryItemDirective, CustomGalleryItemDirective, CustomCalloutItemDirective, CustomCardItemDirective
3940
import distutils.file_util
@@ -85,6 +86,11 @@
8586

8687
# -- Sphinx-gallery configuration --------------------------------------------
8788

89+
def reset_seeds(gallery_conf, fname):
90+
torch.manual_seed(42)
91+
torch.set_default_device(None)
92+
random.seed(10)
93+
8894
sphinx_gallery_conf = {
8995
'examples_dirs': ['beginner_source', 'intermediate_source',
9096
'advanced_source', 'recipes_source', 'prototype_source'],
@@ -94,7 +100,8 @@
94100
'backreferences_dir': None,
95101
'first_notebook_cell': ("# For tips on running notebooks in Google Colab, see\n"
96102
"# https://pytorch.org/tutorials/beginner/colab\n"
97-
"%matplotlib inline")
103+
"%matplotlib inline"),
104+
'reset_modules': (reset_seeds)
98105
}
99106

100107
if os.getenv('GALLERY_PATTERN'):

prototype_source/fx_graph_mode_ptq_dynamic.py

Lines changed: 21 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -239,9 +239,27 @@ def evaluate(model_, data_source):
239239
.set_object_type(nn.LSTM, default_dynamic_qconfig)
240240
.set_object_type(nn.Linear, default_dynamic_qconfig)
241241
)
242-
# Deepcopying the original model because quantization api changes the model inplace and we want
242+
# Load model to create the original model because quantization api changes the model inplace and we want
243243
# to keep the original model for future comparison
244-
model_to_quantize = copy.deepcopy(model)
244+
245+
246+
model_to_quantize = LSTMModel(
247+
ntoken = ntokens,
248+
ninp = 512,
249+
nhid = 256,
250+
nlayers = 5,
251+
)
252+
253+
model_to_quantize.load_state_dict(
254+
torch.load(
255+
model_data_filepath + 'word_language_model_quantize.pth',
256+
map_location=torch.device('cpu')
257+
)
258+
)
259+
260+
model_to_quantize.eval()
261+
262+
245263
prepared_model = prepare_fx(model_to_quantize, qconfig_mapping, example_inputs)
246264
print("prepared model:", prepared_model)
247265
quantized_model = convert_fx(prepared_model)
@@ -289,4 +307,4 @@ def time_model_evaluation(model, test_data):
289307
# 3. Conclusion
290308
# -------------
291309
# This tutorial introduces the api for post training dynamic quantization in FX Graph Mode,
292-
# which dynamically quantizes the same modules as Eager Mode Quantization.
310+
# which dynamically quantizes the same modules as Eager Mode Quantization.

recipes_source/recipes/changing_default_device.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -43,9 +43,6 @@
4343
print(mod.weight.device)
4444
print(mod(torch.randn(128, 20)).device)
4545

46-
# And then globally return it back to CPU
47-
torch.set_default_device('cpu')
48-
4946
################################################################
5047
# This function imposes a slight performance cost on every Python
5148
# call to the torch API (not just factory functions). If this

0 commit comments

Comments
 (0)