Skip to content

Commit 9578973

Browse files
author
Svetlana Karslioglu
authored
Merge branch 'main' into main
2 parents 0ec1b92 + b966c1f commit 9578973

File tree

2 files changed

+20
-6
lines changed

2 files changed

+20
-6
lines changed

beginner_source/data_loading_tutorial.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -165,9 +165,7 @@ def __getitem__(self, idx):
165165

166166
fig = plt.figure()
167167

168-
for i in range(len(face_dataset)):
169-
sample = face_dataset[i]
170-
168+
for i, sample in enumerate(face_dataset):
171169
print(i, sample['image'].shape, sample['landmarks'].shape)
172170

173171
ax = plt.subplot(1, 4, i + 1)
@@ -356,9 +354,7 @@ def __call__(self, sample):
356354
ToTensor()
357355
]))
358356

359-
for i in range(len(transformed_dataset)):
360-
sample = transformed_dataset[i]
361-
357+
for i, sample in enumerate(transformed_dataset):
362358
print(i, sample['image'].size(), sample['landmarks'].size())
363359

364360
if i == 3:

intermediate_source/dynamic_quantization_bert_tutorial.rst

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -255,6 +255,9 @@ model before and after the dynamic quantization.
255255
torch.manual_seed(seed)
256256
set_seed(42)
257257
258+
# Initialize a global random number generator
259+
global_rng = random.Random()
260+
258261
259262
2.2 Load the fine-tuned BERT model
260263
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -525,6 +528,21 @@ We can serialize and save the quantized model for the future use using
525528

526529
.. code:: python
527530
531+
def ids_tensor(shape, vocab_size, rng=None, name=None):
532+
# Creates a random int32 tensor of the shape within the vocab size
533+
if rng is None:
534+
rng = global_rng
535+
536+
total_dims = 1
537+
for dim in shape:
538+
total_dims *= dim
539+
540+
values = []
541+
for _ in range(total_dims):
542+
values.append(rng.randint(0, vocab_size - 1))
543+
544+
return torch.tensor(data=values, dtype=torch.long, device='cpu').view(shape).contiguous()
545+
528546
input_ids = ids_tensor([8, 128], 2)
529547
token_type_ids = ids_tensor([8, 128], 2)
530548
attention_mask = ids_tensor([8, 128], vocab_size=2)

0 commit comments

Comments
 (0)