diff --git a/.github/workflows/spelling.yml b/.github/workflows/spelling.yml new file mode 100644 index 00000000000..07c86ed4a28 --- /dev/null +++ b/.github/workflows/spelling.yml @@ -0,0 +1,20 @@ +name: Check spelling + +on: + pull_request: + push: + branches: + - main +jobs: + pyspelling: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: '3.9' + cache: 'pip' + - run: pip install pyspelling + - run: sudo apt-get install aspell aspell-en + - run: pyspelling + diff --git a/.pyspelling.yml b/.pyspelling.yml new file mode 100644 index 00000000000..04dcda37b75 --- /dev/null +++ b/.pyspelling.yml @@ -0,0 +1,25 @@ +spellchecker: aspell +matrix: +- name: beginner + sources: + - beginner_source/data_loading_tutorial.py + dictionary: + wordlists: + - tutorials-wordlist.txt + pipeline: + - pyspelling.filters.python: + group_comments: true + - pyspelling.filters.context: + context_visible_first: true + delimiters: + # Exclude figure rST tags + - open: '\.\.\s+(figure|literalinclude|)::' + close: '\n' + # Exclude Python coding directives + - open: '-\*- coding:' + close: '\n' + - pyspelling.filters.markdown: + - pyspelling.filters.html: + ignores: + - code + - pre diff --git a/beginner_source/data_loading_tutorial.py b/beginner_source/data_loading_tutorial.py index 1445dd6beb5..322d9b3009c 100644 --- a/beginner_source/data_loading_tutorial.py +++ b/beginner_source/data_loading_tutorial.py @@ -124,7 +124,7 @@ class FaceLandmarksDataset(Dataset): def __init__(self, csv_file, root_dir, transform=None): """ - Args: + Arguments: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied @@ -197,7 +197,7 @@ def __getitem__(self, idx): # swap axes). # # We will write them as callable classes instead of simple functions so -# that parameters of the transform need not be passed everytime it's +# that parameters of the transform need not be passed every time it's # called. For this, we just need to implement ``__call__`` method and # if required, ``__init__`` method. We can then use a transform like this: # @@ -291,12 +291,12 @@ def __call__(self, sample): image = image.transpose((2, 0, 1)) return {'image': torch.from_numpy(image), 'landmarks': torch.from_numpy(landmarks)} - + ###################################################################### # .. note:: # In the example above, `RandomCrop` uses an external library's random number generator -# (in this case, Numpy's `np.random.int`). This can result in unexpected behavior with `DataLoader` -# (see https://pytorch.org/docs/stable/notes/faq.html#my-data-loader-workers-return-identical-random-numbers). +# (in this case, Numpy's `np.random.int`). This can result in unexpected behavior with `DataLoader` +# (see `here `_). # In practice, it is safer to stick to PyTorch's random number generator, e.g. by using `torch.randint` instead. ###################################################################### @@ -404,7 +404,7 @@ def show_landmarks_batch(sample_batched): plt.title('Batch from dataloader') # if you are using Windows, uncomment the next line and indent the for loop. -# you might need to go back and change "num_workers" to 0. +# you might need to go back and change ``num_workers`` to 0. # if __name__ == '__main__': for i_batch, sample_batched in enumerate(dataloader): @@ -444,21 +444,21 @@ def show_landmarks_batch(sample_batched): # which operate on ``PIL.Image`` like ``RandomHorizontalFlip``, ``Scale``, # are also available. You can use these to write a dataloader like this: :: # -# import torch -# from torchvision import transforms, datasets -# -# data_transform = transforms.Compose([ -# transforms.RandomSizedCrop(224), -# transforms.RandomHorizontalFlip(), -# transforms.ToTensor(), -# transforms.Normalize(mean=[0.485, 0.456, 0.406], -# std=[0.229, 0.224, 0.225]) -# ]) -# hymenoptera_dataset = datasets.ImageFolder(root='hymenoptera_data/train', -# transform=data_transform) -# dataset_loader = torch.utils.data.DataLoader(hymenoptera_dataset, -# batch_size=4, shuffle=True, -# num_workers=4) +# import torch +# from torchvision import transforms, datasets +# +# data_transform = transforms.Compose([ +# transforms.RandomSizedCrop(224), +# transforms.RandomHorizontalFlip(), +# transforms.ToTensor(), +# transforms.Normalize(mean=[0.485, 0.456, 0.406], +# std=[0.229, 0.224, 0.225]) +# ]) +# hymenoptera_dataset = datasets.ImageFolder(root='hymenoptera_data/train', +# transform=data_transform) +# dataset_loader = torch.utils.data.DataLoader(hymenoptera_dataset, +# batch_size=4, shuffle=True, +# num_workers=4) # # For an example with training code, please see # :doc:`transfer_learning_tutorial`. diff --git a/tutorials-wordlist.txt b/tutorials-wordlist.txt new file mode 100644 index 00000000000..822e2fb2525 --- /dev/null +++ b/tutorials-wordlist.txt @@ -0,0 +1,23 @@ +csv +DataLoaders +dataloader +dataset +datasets +dir +imagenet +io +jpg +ndarrays +Numpy's +numpy +preprocess +preprocessing +pytorch +rescale +runtime +th +subclasses +submodule +tanh +torchvision +uncomment