diff --git a/.gitignore b/.gitignore index 3fb316b581f..ea278b3dbc4 100644 --- a/.gitignore +++ b/.gitignore @@ -6,14 +6,17 @@ pytorch_basics #data things _data/ -beginner_source/hymenoptera_data -beginner_source/blitz/data -beginner_source/faces -beginner_source/hybrid_frontend/data -intermediate_source/data/ advanced_source/images/ -*data.zip -faces.zip +advanced_source/data/ +beginner_source/.data/ +beginner_source/data/ +beginner_source/blitz/data/ +beginner_source/faces/ +beginner_source/hybrid_frontend/data/ +beginner_source/hymenoptera_data/ +intermediate_source/data/ +*.zip +MNIST/ #builds _build/ @@ -94,7 +97,7 @@ target/ .python-version # celery beat schedule file -celerybeat-schedule +celerybeat-schedule # dotenv .env diff --git a/Makefile b/Makefile index fd4970731e5..fa4d2b8d420 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ export LC_ALL=C # You can set these variables from the command line. -SPHINXOPTS = +SPHINXOPTS ?= SPHINXBUILD = sphinx-build SPHINXPROJ = PyTorchTutorials SOURCEDIR = . @@ -13,6 +13,9 @@ BUILDDIR = _build DATADIR = _data GH_PAGES_SOURCES = $(SOURCEDIR) Makefile +ZIPOPTS ?= -qo +TAROPTS ?= + # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) @@ -43,15 +46,15 @@ download: # transfer learning tutorial data wget -N https://download.pytorch.org/tutorial/hymenoptera_data.zip -P $(DATADIR) - unzip -o $(DATADIR)/hymenoptera_data.zip -d beginner_source/data/ + unzip $(ZIPOPTS) $(DATADIR)/hymenoptera_data.zip -d beginner_source/data/ # nlp tutorial data wget -N https://download.pytorch.org/tutorial/data.zip -P $(DATADIR) - unzip -o $(DATADIR)/data.zip -d intermediate_source/ # This will unzip all files in data.zip to intermediate_source/data/ folder + unzip $(ZIPOPTS) $(DATADIR)/data.zip -d intermediate_source/ # This will unzip all files in data.zip to intermediate_source/data/ folder # data loader tutorial wget -N https://download.pytorch.org/tutorial/faces.zip -P $(DATADIR) - unzip -o $(DATADIR)/faces.zip -d beginner_source/data/ + unzip $(ZIPOPTS) $(DATADIR)/faces.zip -d beginner_source/data/ wget -N https://download.pytorch.org/models/tutorials/4000_checkpoint.tar -P $(DATADIR) cp $(DATADIR)/4000_checkpoint.tar beginner_source/data/ @@ -63,7 +66,7 @@ download: # Download dataset for beginner_source/dcgan_faces_tutorial.py wget -N https://s3.amazonaws.com/pytorch-tutorial-assets/img_align_celeba.zip -P $(DATADIR) - unzip -q -o $(DATADIR)/img_align_celeba.zip -d beginner_source/data/celeba + unzip $(ZIPOPTS) $(DATADIR)/img_align_celeba.zip -d beginner_source/data/celeba # Download dataset for beginner_source/hybrid_frontend/introduction_to_hybrid_frontend_tutorial.py wget -N https://s3.amazonaws.com/pytorch-tutorial-assets/iris.data -P $(DATADIR) @@ -71,11 +74,11 @@ download: # Download dataset for beginner_source/chatbot_tutorial.py wget -N https://s3.amazonaws.com/pytorch-tutorial-assets/cornell_movie_dialogs_corpus.zip -P $(DATADIR) - unzip -q -o $(DATADIR)/cornell_movie_dialogs_corpus.zip -d beginner_source/data/ + unzip $(ZIPOPTS) $(DATADIR)/cornell_movie_dialogs_corpus.zip -d beginner_source/data/ # Download dataset for beginner_source/audio_classifier_tutorial.py wget -N https://s3.amazonaws.com/pytorch-tutorial-assets/UrbanSound8K.tar.gz -P $(DATADIR) - tar -xzf $(DATADIR)/UrbanSound8K.tar.gz -C ./beginner_source/data/ + tar $(TAROPTS) -xzf $(DATADIR)/UrbanSound8K.tar.gz -C ./beginner_source/data/ # Download model for beginner_source/fgsm_tutorial.py wget -N https://s3.amazonaws.com/pytorch-tutorial-assets/lenet_mnist_model.pth -P $(DATADIR) @@ -87,7 +90,7 @@ download: # Download data for advanced_source/dynamic_quantization_tutorial.py wget -N https://s3.amazonaws.com/pytorch-tutorial-assets/wikitext-2.zip -P $(DATADIR) - unzip -q -o $(DATADIR)/wikitext-2.zip -d advanced_source/data/ + unzip $(ZIPOPTS) $(DATADIR)/wikitext-2.zip -d advanced_source/data/ # Download model for advanced_source/static_quantization_tutorial.py wget -N https://download.pytorch.org/models/mobilenet_v2-b0353104.pth -P $(DATADIR) @@ -95,8 +98,7 @@ download: # Download dataset for advanced_source/static_quantization_tutorial.py wget -N https://s3.amazonaws.com/pytorch-tutorial-assets/imagenet_1k.zip -P $(DATADIR) - unzip -q -o $(DATADIR)/imagenet_1k.zip -d advanced_source/data/ - + unzip $(ZIPOPTS) $(DATADIR)/imagenet_1k.zip -d advanced_source/data/ docs: make download diff --git a/conf.py b/conf.py index 5fc4596ab10..06ba0f2c7ff 100644 --- a/conf.py +++ b/conf.py @@ -12,6 +12,16 @@ # # All configuration values have a default; values that are commented out # serve to show the default. +# + +# Because the sphinx gallery might take a long time, you can control specific +# files that generate the results using `GALLERY_PATTERN` environment variable, +# For example to run only `neural_style_transfer_tutorial.py`: +# GALLERY_PATTERN="neural_style_transfer_tutorial.py" make html +# or +# GALLERY_PATTERN="neural_style_transfer_tutorial.py" sphinx-build . _build +# +# GALLERY_PATTERN variable respects regular expressions. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the @@ -49,12 +59,11 @@ # -- Sphinx-gallery configuration -------------------------------------------- - sphinx_gallery_conf = { 'examples_dirs': ['beginner_source', 'intermediate_source', 'advanced_source'], 'gallery_dirs': ['beginner', 'intermediate', 'advanced'], - 'filename_pattern': 'tutorial.py', + 'filename_pattern': os.environ.get('GALLERY_PATTERN', r'tutorial.py'), 'backreferences_dir': False }