Skip to content

Commit 177c4a4

Browse files
authored
Add pyspelling config and workflow (#2274)
Pipeline is: - Filter python comments - Remove encoding comment and one `:: figure:` rst - Convert markup to html - Remove code sections from converted rst (important: for markup to properly recognize something as code-section it must have correct indentation) Add non-standard words to `tutorials-worldlist.txt` So far only covers `data_loading_tutorial.py`
1 parent b2fba80 commit 177c4a4

File tree

4 files changed

+89
-21
lines changed

4 files changed

+89
-21
lines changed

.github/workflows/spelling.yml

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
name: Check spelling
2+
3+
on:
4+
pull_request:
5+
push:
6+
branches:
7+
- main
8+
jobs:
9+
pyspelling:
10+
runs-on: ubuntu-20.04
11+
steps:
12+
- uses: actions/checkout@v3
13+
- uses: actions/setup-python@v4
14+
with:
15+
python-version: '3.9'
16+
cache: 'pip'
17+
- run: pip install pyspelling
18+
- run: sudo apt-get install aspell aspell-en
19+
- run: pyspelling
20+

.pyspelling.yml

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
spellchecker: aspell
2+
matrix:
3+
- name: beginner
4+
sources:
5+
- beginner_source/data_loading_tutorial.py
6+
dictionary:
7+
wordlists:
8+
- tutorials-wordlist.txt
9+
pipeline:
10+
- pyspelling.filters.python:
11+
group_comments: true
12+
- pyspelling.filters.context:
13+
context_visible_first: true
14+
delimiters:
15+
# Exclude figure rST tags
16+
- open: '\.\.\s+(figure|literalinclude|)::'
17+
close: '\n'
18+
# Exclude Python coding directives
19+
- open: '-\*- coding:'
20+
close: '\n'
21+
- pyspelling.filters.markdown:
22+
- pyspelling.filters.html:
23+
ignores:
24+
- code
25+
- pre

beginner_source/data_loading_tutorial.py

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ class FaceLandmarksDataset(Dataset):
124124

125125
def __init__(self, csv_file, root_dir, transform=None):
126126
"""
127-
Args:
127+
Arguments:
128128
csv_file (string): Path to the csv file with annotations.
129129
root_dir (string): Directory with all the images.
130130
transform (callable, optional): Optional transform to be applied
@@ -197,7 +197,7 @@ def __getitem__(self, idx):
197197
# swap axes).
198198
#
199199
# We will write them as callable classes instead of simple functions so
200-
# that parameters of the transform need not be passed everytime it's
200+
# that parameters of the transform need not be passed every time it's
201201
# called. For this, we just need to implement ``__call__`` method and
202202
# if required, ``__init__`` method. We can then use a transform like this:
203203
#
@@ -291,12 +291,12 @@ def __call__(self, sample):
291291
image = image.transpose((2, 0, 1))
292292
return {'image': torch.from_numpy(image),
293293
'landmarks': torch.from_numpy(landmarks)}
294-
294+
295295
######################################################################
296296
# .. note::
297297
# In the example above, `RandomCrop` uses an external library's random number generator
298-
# (in this case, Numpy's `np.random.int`). This can result in unexpected behavior with `DataLoader`
299-
# (see https://pytorch.org/docs/stable/notes/faq.html#my-data-loader-workers-return-identical-random-numbers).
298+
# (in this case, Numpy's `np.random.int`). This can result in unexpected behavior with `DataLoader`
299+
# (see `here <https://pytorch.org/docs/stable/notes/faq.html#my-data-loader-workers-return-identical-random-numbers>`_).
300300
# In practice, it is safer to stick to PyTorch's random number generator, e.g. by using `torch.randint` instead.
301301

302302
######################################################################
@@ -404,7 +404,7 @@ def show_landmarks_batch(sample_batched):
404404
plt.title('Batch from dataloader')
405405

406406
# if you are using Windows, uncomment the next line and indent the for loop.
407-
# you might need to go back and change "num_workers" to 0.
407+
# you might need to go back and change ``num_workers`` to 0.
408408

409409
# if __name__ == '__main__':
410410
for i_batch, sample_batched in enumerate(dataloader):
@@ -444,21 +444,21 @@ def show_landmarks_batch(sample_batched):
444444
# which operate on ``PIL.Image`` like ``RandomHorizontalFlip``, ``Scale``,
445445
# are also available. You can use these to write a dataloader like this: ::
446446
#
447-
# import torch
448-
# from torchvision import transforms, datasets
449-
#
450-
# data_transform = transforms.Compose([
451-
# transforms.RandomSizedCrop(224),
452-
# transforms.RandomHorizontalFlip(),
453-
# transforms.ToTensor(),
454-
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
455-
# std=[0.229, 0.224, 0.225])
456-
# ])
457-
# hymenoptera_dataset = datasets.ImageFolder(root='hymenoptera_data/train',
458-
# transform=data_transform)
459-
# dataset_loader = torch.utils.data.DataLoader(hymenoptera_dataset,
460-
# batch_size=4, shuffle=True,
461-
# num_workers=4)
447+
# import torch
448+
# from torchvision import transforms, datasets
449+
#
450+
# data_transform = transforms.Compose([
451+
# transforms.RandomSizedCrop(224),
452+
# transforms.RandomHorizontalFlip(),
453+
# transforms.ToTensor(),
454+
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
455+
# std=[0.229, 0.224, 0.225])
456+
# ])
457+
# hymenoptera_dataset = datasets.ImageFolder(root='hymenoptera_data/train',
458+
# transform=data_transform)
459+
# dataset_loader = torch.utils.data.DataLoader(hymenoptera_dataset,
460+
# batch_size=4, shuffle=True,
461+
# num_workers=4)
462462
#
463463
# For an example with training code, please see
464464
# :doc:`transfer_learning_tutorial`.

tutorials-wordlist.txt

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
csv
2+
DataLoaders
3+
dataloader
4+
dataset
5+
datasets
6+
dir
7+
imagenet
8+
io
9+
jpg
10+
ndarrays
11+
Numpy's
12+
numpy
13+
preprocess
14+
preprocessing
15+
pytorch
16+
rescale
17+
runtime
18+
th
19+
subclasses
20+
submodule
21+
tanh
22+
torchvision
23+
uncomment

0 commit comments

Comments
 (0)