Skip to content

Commit 46d8c26

Browse files
author
Seth Weidman
committed
Remove dataloader arguments causing errors
1 parent 3c6390f commit 46d8c26

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

advanced_source/static_quantization_tutorial.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -339,11 +339,11 @@ def prepare_data_loaders(data_path):
339339

340340
data_loader = torch.utils.data.DataLoader(
341341
dataset, batch_size=train_batch_size,
342-
sampler=train_sampler, num_workers=1)
342+
sampler=train_sampler)
343343

344344
data_loader_test = torch.utils.data.DataLoader(
345345
dataset_test, batch_size=eval_batch_size,
346-
sampler=test_sampler, num_workers=4, pin_memory=True)
346+
sampler=test_sampler)
347347

348348
return data_loader, data_loader_test
349349

@@ -455,7 +455,7 @@ def prepare_data_loaders(data_path):
455455
# quantization parameters in an optimal manner.
456456
#
457457

458-
per_channel_quantized_model = load_model(saved_model_dir+float_model_file)
458+
per_channel_quantized_model = load_model(saved_model_dir + float_model_file)
459459
per_channel_quantized_model.eval()
460460
per_channel_quantized_model.fuse_model()
461461
per_channel_quantized_model.qconfig = torch.quantization.get_default_qconfig('fbgemm')

0 commit comments

Comments
 (0)