diff --git a/_static/css/pytorch_theme.css b/_static/css/pytorch_theme.css index 1b6b2ff0538..153f4889c08 100755 --- a/_static/css/pytorch_theme.css +++ b/_static/css/pytorch_theme.css @@ -112,7 +112,7 @@ footer p { } /* For hidden headers that appear in TOC tree */ -/* see http://stackoverflow.com/a/32363545/3343043 */ +/* see https://stackoverflow.com/a/32363545/3343043 */ .rst-content .hidden-section { display: none; } diff --git a/_static/doctools.js b/_static/doctools.js index d8928926bf2..6d984d66d2e 100755 --- a/_static/doctools.js +++ b/_static/doctools.js @@ -76,7 +76,7 @@ jQuery.fn.highlightText = function(text, className) { var span; var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + span = document.createElementNS("https://www.w3.org/2000/svg", "tspan"); } else { span = document.createElement("span"); span.className = className; @@ -88,7 +88,7 @@ jQuery.fn.highlightText = function(text, className) { node.nodeValue = val.substr(0, pos); if (isInSVG) { var bbox = span.getBBox(); - var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + var rect = document.createElementNS("https://www.w3.org/2000/svg", "rect"); rect.x.baseVal.value = bbox.x; rect.y.baseVal.value = bbox.y; rect.width.baseVal.value = bbox.width; diff --git a/_static/images/arrow-down-orange.svg b/_static/images/arrow-down-orange.svg index e9d8e9ecf24..12cad9b03c9 100755 --- a/_static/images/arrow-down-orange.svg +++ b/_static/images/arrow-down-orange.svg @@ -1,6 +1,6 @@ - - + + Group 5 Created with Sketch. diff --git a/_static/images/arrow-right-with-tail.svg b/_static/images/arrow-right-with-tail.svg index 5843588fca6..ab8ff251a45 100755 --- a/_static/images/arrow-right-with-tail.svg +++ b/_static/images/arrow-right-with-tail.svg @@ -1,6 +1,6 @@ - - + + Page 1 Created with Sketch. diff --git a/_static/images/chevron-down-grey.svg b/_static/images/chevron-down-grey.svg index 82d6514f250..1ad1587bf81 100755 --- a/_static/images/chevron-down-grey.svg +++ b/_static/images/chevron-down-grey.svg @@ -1,7 +1,7 @@ - - + Created with Sketch. diff --git a/_static/images/chevron-right-orange.svg b/_static/images/chevron-right-orange.svg index 7033fc93bf4..0b04e706128 100755 --- a/_static/images/chevron-right-orange.svg +++ b/_static/images/chevron-right-orange.svg @@ -1,7 +1,7 @@ - - + Page 1 Created with Sketch. diff --git a/_static/images/chevron-right-white.svg b/_static/images/chevron-right-white.svg index dd9e77f2616..650f2ac8527 100755 --- a/_static/images/chevron-right-white.svg +++ b/_static/images/chevron-right-white.svg @@ -1,7 +1,7 @@ - - + Page 1 Created with Sketch. diff --git a/_static/images/icon-close.svg b/_static/images/icon-close.svg index 348964e79f7..71f4b0a65fa 100755 --- a/_static/images/icon-close.svg +++ b/_static/images/icon-close.svg @@ -1,6 +1,6 @@ - - + + Page 1 Created with Sketch. diff --git a/_static/images/icon-menu-dots-dark.svg b/_static/images/icon-menu-dots-dark.svg index fa2ad044b3f..3edb64967f4 100755 --- a/_static/images/icon-menu-dots-dark.svg +++ b/_static/images/icon-menu-dots-dark.svg @@ -1,6 +1,6 @@ - - + + Page 1 Created with Sketch. diff --git a/_static/images/logo-dark.svg b/_static/images/logo-dark.svg index 9b4c1a56ac6..cda73fd2186 100755 --- a/_static/images/logo-dark.svg +++ b/_static/images/logo-dark.svg @@ -1,7 +1,7 @@ - - + - - + diff --git a/_static/images/logo-icon.svg b/_static/images/logo-icon.svg index 575f6823e47..dfef5be794a 100755 --- a/_static/images/logo-icon.svg +++ b/_static/images/logo-icon.svg @@ -1,7 +1,7 @@ - - + - + - - + - "].join(""),l.id=h,(m?l:n).innerHTML+=f,n.appendChild(l),m||(n.style.background="",n.style.overflow="hidden",k=g.style.overflow,g.style.overflow="hidden",g.appendChild(n)),i=c(l,a),m?l.parentNode.removeChild(l):(n.parentNode.removeChild(n),g.style.overflow=k),!!i},z=function(b){var c=a.matchMedia||a.msMatchMedia;if(c)return c(b).matches;var d;return y("@media "+b+" { #"+h+" { position: absolute; } }",function(b){d=(a.getComputedStyle?getComputedStyle(b,null):b.currentStyle)["position"]=="absolute"}),d},A=function(){function d(d,e){e=e||b.createElement(a[d]||"div"),d="on"+d;var f=d in e;return f||(e.setAttribute||(e=b.createElement("div")),e.setAttribute&&e.removeAttribute&&(e.setAttribute(d,""),f=F(e[d],"function"),F(e[d],"undefined")||(e[d]=c),e.removeAttribute(d))),e=null,f}var a={select:"input",change:"input",submit:"form",reset:"form",error:"img",load:"img",abort:"img"};return d}(),B={}.hasOwnProperty,C;!F(B,"undefined")&&!F(B.call,"undefined")?C=function(a,b){return B.call(a,b)}:C=function(a,b){return b in a&&F(a.constructor.prototype[b],"undefined")},Function.prototype.bind||(Function.prototype.bind=function(b){var c=this;if(typeof c!="function")throw new TypeError;var d=w.call(arguments,1),e=function(){if(this instanceof e){var a=function(){};a.prototype=c.prototype;var f=new a,g=c.apply(f,d.concat(w.call(arguments)));return Object(g)===g?g:f}return c.apply(b,d.concat(w.call(arguments)))};return e}),s.flexbox=function(){return J("flexWrap")},s.canvas=function(){var a=b.createElement("canvas");return!!a.getContext&&!!a.getContext("2d")},s.canvastext=function(){return!!e.canvas&&!!F(b.createElement("canvas").getContext("2d").fillText,"function")},s.webgl=function(){return!!a.WebGLRenderingContext},s.touch=function(){var c;return"ontouchstart"in a||a.DocumentTouch&&b instanceof DocumentTouch?c=!0:y(["@media (",n.join("touch-enabled),("),h,")","{#modernizr{top:9px;position:absolute}}"].join(""),function(a){c=a.offsetTop===9}),c},s.geolocation=function(){return"geolocation"in navigator},s.postmessage=function(){return!!a.postMessage},s.websqldatabase=function(){return!!a.openDatabase},s.indexedDB=function(){return!!J("indexedDB",a)},s.hashchange=function(){return A("hashchange",a)&&(b.documentMode===c||b.documentMode>7)},s.history=function(){return!!a.history&&!!history.pushState},s.draganddrop=function(){var a=b.createElement("div");return"draggable"in a||"ondragstart"in a&&"ondrop"in a},s.websockets=function(){return"WebSocket"in a||"MozWebSocket"in a},s.rgba=function(){return D("background-color:rgba(150,255,150,.5)"),G(j.backgroundColor,"rgba")},s.hsla=function(){return D("background-color:hsla(120,40%,100%,.5)"),G(j.backgroundColor,"rgba")||G(j.backgroundColor,"hsla")},s.multiplebgs=function(){return D("background:url(https://),url(https://),red url(https://)"),/(url\s*\(.*?){3}/.test(j.background)},s.backgroundsize=function(){return J("backgroundSize")},s.borderimage=function(){return J("borderImage")},s.borderradius=function(){return J("borderRadius")},s.boxshadow=function(){return J("boxShadow")},s.textshadow=function(){return b.createElement("div").style.textShadow===""},s.opacity=function(){return E("opacity:.55"),/^0.55$/.test(j.opacity)},s.cssanimations=function(){return J("animationName")},s.csscolumns=function(){return J("columnCount")},s.cssgradients=function(){var a="background-image:",b="gradient(linear,left top,right bottom,from(#9f9),to(white));",c="linear-gradient(left top,#9f9, white);";return D((a+"-webkit- ".split(" ").join(b+a)+n.join(c+a)).slice(0,-a.length)),G(j.backgroundImage,"gradient")},s.cssreflections=function(){return J("boxReflect")},s.csstransforms=function(){return!!J("transform")},s.csstransforms3d=function(){var a=!!J("perspective");return a&&"webkitPerspective"in g.style&&y("@media (transform-3d),(-webkit-transform-3d){#modernizr{left:9px;position:absolute;height:3px;}}",function(b,c){a=b.offsetLeft===9&&b.offsetHeight===3}),a},s.csstransitions=function(){return J("transition")},s.fontface=function(){var a;return y('@font-face {font-family:"font";src:url("https://")}',function(c,d){var e=b.getElementById("smodernizr"),f=e.sheet||e.styleSheet,g=f?f.cssRules&&f.cssRules[0]?f.cssRules[0].cssText:f.cssText||"":"";a=/src/i.test(g)&&g.indexOf(d.split(" ")[0])===0}),a},s.generatedcontent=function(){var a;return y(["#",h,"{font:0/0 a}#",h,':after{content:"',l,'";visibility:hidden;font:3px/1 a}'].join(""),function(b){a=b.offsetHeight>=3}),a},s.video=function(){var a=b.createElement("video"),c=!1;try{if(c=!!a.canPlayType)c=new Boolean(c),c.ogg=a.canPlayType('video/ogg; codecs="theora"').replace(/^no$/,""),c.h264=a.canPlayType('video/mp4; codecs="avc1.42E01E"').replace(/^no$/,""),c.webm=a.canPlayType('video/webm; codecs="vp8, vorbis"').replace(/^no$/,"")}catch(d){}return c},s.audio=function(){var a=b.createElement("audio"),c=!1;try{if(c=!!a.canPlayType)c=new Boolean(c),c.ogg=a.canPlayType('audio/ogg; codecs="vorbis"').replace(/^no$/,""),c.mp3=a.canPlayType("audio/mpeg;").replace(/^no$/,""),c.wav=a.canPlayType('audio/wav; codecs="1"').replace(/^no$/,""),c.m4a=(a.canPlayType("audio/x-m4a;")||a.canPlayType("audio/aac;")).replace(/^no$/,"")}catch(d){}return c},s.localstorage=function(){try{return localStorage.setItem(h,h),localStorage.removeItem(h),!0}catch(a){return!1}},s.sessionstorage=function(){try{return sessionStorage.setItem(h,h),sessionStorage.removeItem(h),!0}catch(a){return!1}},s.webworkers=function(){return!!a.Worker},s.applicationcache=function(){return!!a.applicationCache},s.svg=function(){return!!b.createElementNS&&!!b.createElementNS(r.svg,"svg").createSVGRect},s.inlinesvg=function(){var a=b.createElement("div");return a.innerHTML="",(a.firstChild&&a.firstChild.namespaceURI)==r.svg},s.smil=function(){return!!b.createElementNS&&/SVGAnimate/.test(m.call(b.createElementNS(r.svg,"animate")))},s.svgclippaths=function(){return!!b.createElementNS&&/SVGClipPath/.test(m.call(b.createElementNS(r.svg,"clipPath")))};for(var L in s)C(s,L)&&(x=L.toLowerCase(),e[x]=s[L](),v.push((e[x]?"":"no-")+x));return e.input||K(),e.addTest=function(a,b){if(typeof a=="object")for(var d in a)C(a,d)&&e.addTest(d,a[d]);else{a=a.toLowerCase();if(e[a]!==c)return e;b=typeof b=="function"?b():b,typeof f!="undefined"&&f&&(g.className+=" "+(b?"":"no-")+a),e[a]=b}return e},D(""),i=k=null,function(a,b){function k(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function l(){var a=r.elements;return typeof a=="string"?a.split(" "):a}function m(a){var b=i[a[g]];return b||(b={},h++,a[g]=h,i[h]=b),b}function n(a,c,f){c||(c=b);if(j)return c.createElement(a);f||(f=m(c));var g;return f.cache[a]?g=f.cache[a].cloneNode():e.test(a)?g=(f.cache[a]=f.createElem(a)).cloneNode():g=f.createElem(a),g.canHaveChildren&&!d.test(a)?f.frag.appendChild(g):g}function o(a,c){a||(a=b);if(j)return a.createDocumentFragment();c=c||m(a);var d=c.frag.cloneNode(),e=0,f=l(),g=f.length;for(;e",f="hidden"in a,j=a.childNodes.length==1||function(){b.createElement("a");var a=b.createDocumentFragment();return typeof a.cloneNode=="undefined"||typeof a.createDocumentFragment=="undefined"||typeof a.createElement=="undefined"}()}catch(c){f=!0,j=!0}})();var r={elements:c.elements||"abbr article aside audio bdi canvas data datalist details figcaption figure footer header hgroup mark meter nav output progress section summary time video",shivCSS:c.shivCSS!==!1,supportsUnknownElements:j,shivMethods:c.shivMethods!==!1,type:"default",shivDocument:q,createElement:n,createDocumentFragment:o};a.html5=r,q(b)}(this,b),e._version=d,e._prefixes=n,e._domPrefixes=q,e._cssomPrefixes=p,e.mq=z,e.hasEvent=A,e.testProp=function(a){return H([a])},e.testAllProps=J,e.testStyles=y,e.prefixed=function(a,b,c){return b?J(a,b,c):J(a,"pfx")},g.className=g.className.replace(/(^|\s)no-js(\s|$)/,"$1$2")+(f?" js "+v.join(" "):""),e}(this,this.document),function(a,b,c){function d(a){return"[object Function]"==o.call(a)}function e(a){return"string"==typeof a}function f(){}function g(a){return!a||"loaded"==a||"complete"==a||"uninitialized"==a}function h(){var a=p.shift();q=1,a?a.t?m(function(){("c"==a.t?B.injectCss:B.injectJs)(a.s,0,a.a,a.x,a.e,1)},0):(a(),h()):q=0}function i(a,c,d,e,f,i,j){function k(b){if(!o&&g(l.readyState)&&(u.r=o=1,!q&&h(),l.onload=l.onreadystatechange=null,b)){"img"!=a&&m(function(){t.removeChild(l)},50);for(var d in y[c])y[c].hasOwnProperty(d)&&y[c][d].onload()}}var j=j||B.errorTimeout,l=b.createElement(a),o=0,r=0,u={t:d,s:c,e:f,a:i,x:j};1===y[c]&&(r=1,y[c]=[]),"object"==a?l.data=c:(l.src=c,l.type=a),l.width=l.height="0",l.onerror=l.onload=l.onreadystatechange=function(){k.call(this,r)},p.splice(e,0,u),"img"!=a&&(r||2===y[c]?(t.insertBefore(l,s?null:n),m(k,j)):y[c].push(l))}function j(a,b,c,d,f){return q=0,b=b||"j",e(a)?i("c"==b?v:u,a,b,this.i++,c,d,f):(p.splice(this.i++,0,a),1==p.length&&h()),this}function k(){var a=B;return a.loader={load:j,i:0},a}var l=b.documentElement,m=a.setTimeout,n=b.getElementsByTagName("script")[0],o={}.toString,p=[],q=0,r="MozAppearance"in l.style,s=r&&!!b.createRange().compareNode,t=s?l:n.parentNode,l=a.opera&&"[object Opera]"==o.call(a.opera),l=!!b.attachEvent&&!l,u=r?"object":l?"script":"img",v=l?"script":u,w=Array.isArray||function(a){return"[object Array]"==o.call(a)},x=[],y={},z={timeout:function(a,b){return b.length&&(a.timeout=b[0]),a}},A,B;B=function(a){function b(a){var a=a.split("!"),b=x.length,c=a.pop(),d=a.length,c={url:c,origUrl:c,prefixes:a},e,f,g;for(f=0;f - + {% endblock %} diff --git a/advanced_source/ONNXLive.rst b/advanced_source/ONNXLive.rst index 3e7d9e15f4e..4322bf4060d 100644 --- a/advanced_source/ONNXLive.rst +++ b/advanced_source/ONNXLive.rst @@ -7,7 +7,7 @@ This tutorial will show you to convert a neural style transfer model that has be What is ONNX? ------------- -ONNX (Open Neural Network Exchange) is an open format to represent deep learning models. With ONNX, AI developers can more easily move models between state-of-the-art tools and choose the combination that is best for them. ONNX is developed and supported by a community of partners. You can learn more about ONNX and what tools are supported by going to `onnx.ai `_. +ONNX (Open Neural Network Exchange) is an open format to represent deep learning models. With ONNX, AI developers can more easily move models between state-of-the-art tools and choose the combination that is best for them. ONNX is developed and supported by a community of partners. You can learn more about ONNX and what tools are supported by going to `onnx.ai `_. Tutorial Overview ----------------- @@ -84,7 +84,7 @@ However, the pixel size of this image is important, as this will be the size for To get good performance, we'll use a resolution of 250x540. Feel free to take a larger resolution if you care less about FPS and more about style transfer quality. -Let's use `ImageMagick `_ to create a blank image of the resolution we want: +Let's use `ImageMagick `_ to create a blank image of the resolution we want: .. code-block:: bash diff --git a/advanced_source/cpp_extension.rst b/advanced_source/cpp_extension.rst index 52f541a31bb..555a8043055 100644 --- a/advanced_source/cpp_extension.rst +++ b/advanced_source/cpp_extension.rst @@ -1,6 +1,6 @@ Custom C++ and CUDA Extensions ============================== -**Author**: `Peter Goldsborough `_ +**Author**: `Peter Goldsborough `_ PyTorch provides a plethora of operations related to neural networks, arbitrary @@ -253,7 +253,7 @@ respect to each input of the forward pass. Ultimately, we will plop both the forward and backward function into a :class:`torch.autograd.Function` to create a nice Python binding. The backward function is slightly more involved, so we'll not dig deeper into the code (if you are interested, `Alex Graves' thesis -`_ is a good read for more +`_ is a good read for more information on this): .. code-block:: cpp @@ -314,7 +314,7 @@ Once you have your operation written in C++ and ATen, you can use pybind11 to bind your C++ functions or classes into Python in a very simple manner. Questions or issues you have about this part of PyTorch C++ extensions will largely be addressed by `pybind11 documentation -`_. +`_. For our extensions, the necessary binding code spans only four lines: diff --git a/advanced_source/torch_script_custom_ops.rst b/advanced_source/torch_script_custom_ops.rst index 754ee90123d..1c8d722952e 100644 --- a/advanced_source/torch_script_custom_ops.rst +++ b/advanced_source/torch_script_custom_ops.rst @@ -265,7 +265,7 @@ file should then be the following: build an isolated, reproducible environment in which to play around with the code from this tutorial. If you run into further troubles, please file an issue in the tutorial repository or post a question in `our forum - `_. + `_. To now build our operator, we can run the following commands from our ``warp_perspective`` folder: @@ -619,7 +619,7 @@ example will showcase this using CMake. Technically, you can also dynamically load the shared library into your C++ application at runtime in much the same way we did it in Python. On Linux, `you can do this with dlopen - `_. There exist + `_. There exist equivalents on other platforms. Building on the C++ execution tutorial linked above, let's start with a minimal diff --git a/beginner_source/chatbot_tutorial.py b/beginner_source/chatbot_tutorial.py index 8eba1a0a789..f4a174ab87f 100644 --- a/beginner_source/chatbot_tutorial.py +++ b/beginner_source/chatbot_tutorial.py @@ -344,7 +344,7 @@ def trim(self, min_count): MAX_LENGTH = 10 # Maximum sentence length to consider # Turn a Unicode string to plain ASCII, thanks to -# http://stackoverflow.com/a/518232/2809427 +# https://stackoverflow.com/a/518232/2809427 def unicodeToAscii(s): return ''.join( c for c in unicodedata.normalize('NFD', s) @@ -623,7 +623,7 @@ def batch2TrainData(voc, pair_batch): # :align: center # :alt: rnn_bidir # -# Image source: http://colah.github.io/posts/2015-09-NN-Types-FP/ +# Image source: https://colah.github.io/posts/2015-09-NN-Types-FP/ # # Note that an ``embedding`` layer is used to encode our word indices in # an arbitrarily sized feature space. For our models, this layer will map @@ -928,7 +928,7 @@ def maskNLLLoss(inp, target, mask): # :width: 60% # :alt: grad_clip # -# Image source: Goodfellow et al. *Deep Learning*. 2016. http://www.deeplearningbook.org/ +# Image source: Goodfellow et al. *Deep Learning*. 2016. https://www.deeplearningbook.org/ # # **Sequence of Operations:** # diff --git a/beginner_source/data_loading_tutorial.py b/beginner_source/data_loading_tutorial.py index c1b45fc9109..3793e5ac06b 100644 --- a/beginner_source/data_loading_tutorial.py +++ b/beginner_source/data_loading_tutorial.py @@ -48,7 +48,7 @@ # so that the images are in a directory named 'data/faces/'. # This dataset was actually # generated by applying excellent `dlib's pose -# estimation `__ +# estimation `__ # on a few images from imagenet tagged as 'face'. # # Dataset comes with a csv file with annotations which looks like this: diff --git a/beginner_source/dcgan_faces_tutorial.py b/beginner_source/dcgan_faces_tutorial.py index 2b572fca2df..c9abab89a14 100644 --- a/beginner_source/dcgan_faces_tutorial.py +++ b/beginner_source/dcgan_faces_tutorial.py @@ -218,7 +218,7 @@ # ---- # # In this tutorial we will use the `Celeb-A Faces -# dataset `__ which can +# dataset `__ which can # be downloaded at the linked site, or in `Google # Drive `__. # The dataset will download as a file named *img_align_celeba.zip*. Once diff --git a/beginner_source/finetuning_torchvision_models_tutorial.py b/beginner_source/finetuning_torchvision_models_tutorial.py index 98bd33b6894..042301559ec 100644 --- a/beginner_source/finetuning_torchvision_models_tutorial.py +++ b/beginner_source/finetuning_torchvision_models_tutorial.py @@ -27,8 +27,8 @@ # from which we derive predictions. It is called feature extraction # because we use the pretrained CNN as a fixed feature-extractor, and only # change the output layer. For more technical information about transfer -# learning see `here `__ and -# `here `__. +# learning see `here `__ and +# `here `__. # # In general both transfer learning methods follow the same few steps: # diff --git a/beginner_source/hybrid_frontend_tutorial.rst b/beginner_source/hybrid_frontend_tutorial.rst index d3fbd2fd589..89209b0affc 100644 --- a/beginner_source/hybrid_frontend_tutorial.rst +++ b/beginner_source/hybrid_frontend_tutorial.rst @@ -1,6 +1,6 @@ Hybrid Frontend Tutorials ------------------------- -**Authors**: `Nathan Inkawhich `_ and `Matthew Inkawhich `_ +**Authors**: `Nathan Inkawhich `_ and `Matthew Inkawhich `_ In this set of tutorials, you will learn the following: diff --git a/beginner_source/nlp/advanced_tutorial.py b/beginner_source/nlp/advanced_tutorial.py index 0d03593f9cb..010a38aeecb 100644 --- a/beginner_source/nlp/advanced_tutorial.py +++ b/beginner_source/nlp/advanced_tutorial.py @@ -97,7 +97,7 @@ unique non-negative indices. If the above discussion was too brief, you can check out -`this `__ write up from +`this `__ write up from Michael Collins on CRFs. Implementation Notes diff --git a/beginner_source/nn_tutorial.py b/beginner_source/nn_tutorial.py index a953e1da130..adbd56d059e 100644 --- a/beginner_source/nn_tutorial.py +++ b/beginner_source/nn_tutorial.py @@ -2,7 +2,7 @@ """ What is `torch.nn` *really*? ============================ -by Jeremy Howard, `fast.ai `_. Thanks to Rachel Thomas and Francisco Ingham. +by Jeremy Howard, `fast.ai `_. Thanks to Rachel Thomas and Francisco Ingham. """ ############################################################################### # We recommend running this tutorial as a notebook, not a script. To download the notebook (.ipynb) file, @@ -96,7 +96,7 @@ # # Let's first create a model using nothing but PyTorch tensor operations. We're assuming # you're already familiar with the basics of neural networks. (If you're not, you can -# learn them at `course.fast.ai `_). +# learn them at `course.fast.ai `_). # # PyTorch provides methods to create random or zero-filled tensors, which we will # use to create our weights and bias for a simple linear model. These are just regular @@ -548,7 +548,7 @@ def get_model(): # # In section 1, we were just trying to get a reasonable training loop set up for # use on our training data. In reality, you **always** should also have -# a `validation set `_, in order +# a `validation set `_, in order # to identify if you are overfitting. # # Shuffling the training data is @@ -693,7 +693,7 @@ def forward(self, xb): lr = 0.1 ############################################################################### -# `Momentum `_ is a variation on +# `Momentum `_ is a variation on # stochastic gradient descent that takes previous updates into account as well # and generally leads to faster training. diff --git a/beginner_source/transfer_learning_tutorial.py b/beginner_source/transfer_learning_tutorial.py index b236db687ca..755d4d5d67e 100644 --- a/beginner_source/transfer_learning_tutorial.py +++ b/beginner_source/transfer_learning_tutorial.py @@ -6,7 +6,7 @@ In this tutorial, you will learn how to train your network using transfer learning. You can read more about the transfer learning at `cs231n -notes `__ +notes `__ Quoting these notes, diff --git a/intermediate_source/char_rnn_classification_tutorial.py b/intermediate_source/char_rnn_classification_tutorial.py index 8cf3069d8c6..cde75e41510 100644 --- a/intermediate_source/char_rnn_classification_tutorial.py +++ b/intermediate_source/char_rnn_classification_tutorial.py @@ -40,10 +40,10 @@ It would also be useful to know about RNNs and how they work: - `The Unreasonable Effectiveness of Recurrent Neural - Networks `__ + Networks `__ shows a bunch of real life examples - `Understanding LSTM - Networks `__ + Networks `__ is about LSTMs specifically but also informative about RNNs in general @@ -79,7 +79,7 @@ def findFiles(path): return glob.glob(path) all_letters = string.ascii_letters + " .,;'" n_letters = len(all_letters) -# Turn a Unicode string to plain ASCII, thanks to http://stackoverflow.com/a/518232/2809427 +# Turn a Unicode string to plain ASCII, thanks to https://stackoverflow.com/a/518232/2809427 def unicodeToAscii(s): return ''.join( c for c in unicodedata.normalize('NFD', s) diff --git a/intermediate_source/char_rnn_generation_tutorial.py b/intermediate_source/char_rnn_generation_tutorial.py index 40084469e25..6f040b58778 100644 --- a/intermediate_source/char_rnn_generation_tutorial.py +++ b/intermediate_source/char_rnn_generation_tutorial.py @@ -50,10 +50,10 @@ It would also be useful to know about RNNs and how they work: - `The Unreasonable Effectiveness of Recurrent Neural - Networks `__ + Networks `__ shows a bunch of real life examples - `Understanding LSTM - Networks `__ + Networks `__ is about LSTMs specifically but also informative about RNNs in general @@ -86,7 +86,7 @@ def findFiles(path): return glob.glob(path) -# Turn a Unicode string to plain ASCII, thanks to http://stackoverflow.com/a/518232/2809427 +# Turn a Unicode string to plain ASCII, thanks to https://stackoverflow.com/a/518232/2809427 def unicodeToAscii(s): return ''.join( c for c in unicodedata.normalize('NFD', s) diff --git a/intermediate_source/dist_tuto.rst b/intermediate_source/dist_tuto.rst index 7f874adc254..36d07f18849 100644 --- a/intermediate_source/dist_tuto.rst +++ b/intermediate_source/dist_tuto.rst @@ -1,6 +1,6 @@ Writing Distributed Applications with PyTorch ============================================= -**Author**: `Séb Arnold `_ +**Author**: `Séb Arnold `_ In this short tutorial, we will be going over the distributed package of PyTorch. We'll see how to set up the distributed setting, use the different communication strategies, and go over some the internals of the package. @@ -27,7 +27,7 @@ In order to get started we need the ability to run multiple processes simultaneously. If you have access to compute cluster you should check with your local sysadmin or use your favorite coordination tool. (e.g., `pdsh `__, -`clustershell `__, or +`clustershell `__, or `others `__) For the purpose of this tutorial, we will use a single machine and fork multiple processes using the following template. @@ -74,7 +74,7 @@ every process will be able to coordinate through a master, using the same ip address and port. Note that we used the TCP backend, but we could have used `MPI `__ or -`Gloo `__ instead. (c.f. +`Gloo `__ instead. (c.f. `Section 5.1 <#communication-backends>`__) We will go over the magic happening in ``dist.init_process_group`` at the end of this tutorial, but it essentially allows processes to communicate with each other by @@ -377,7 +377,7 @@ world. could train any model on a large computer cluster. **Note:** While the last sentence is *technically* true, there are `a -lot more tricks `__ required to +lot more tricks `__ required to implement a production-level implementation of synchronous SGD. Again, use what `has been tested and optimized `__. @@ -500,7 +500,7 @@ optimized for different purposes. The advantage of using the MPI backend lies in MPI's wide availability - and high-level of optimization - on large computer clusters. `Some `__ `recent `__ -`implementations `__ are also able to take +`implementations `__ are also able to take advantage of CUDA IPC and GPU Direct technologies in order to avoid memory copies through the CPU. diff --git a/intermediate_source/reinforcement_q_learning.py b/intermediate_source/reinforcement_q_learning.py index f8ae786d437..86e353e15e1 100644 --- a/intermediate_source/reinforcement_q_learning.py +++ b/intermediate_source/reinforcement_q_learning.py @@ -397,7 +397,7 @@ def optimize_model(): if len(memory) < BATCH_SIZE: return transitions = memory.sample(BATCH_SIZE) - # Transpose the batch (see http://stackoverflow.com/a/19343/3343043 for + # Transpose the batch (see https://stackoverflow.com/a/19343/3343043 for # detailed explanation). This converts batch-array of Transitions # to Transition of batch-arrays. batch = Transition(*zip(*transitions)) diff --git a/intermediate_source/seq2seq_translation_tutorial.py b/intermediate_source/seq2seq_translation_tutorial.py index ba1a919975a..587d5edfe94 100644 --- a/intermediate_source/seq2seq_translation_tutorial.py +++ b/intermediate_source/seq2seq_translation_tutorial.py @@ -30,7 +30,7 @@ ... to varying degrees of success. This is made possible by the simple but powerful idea of the `sequence -to sequence network `__, in which two +to sequence network `__, in which two recurrent neural networks work together to transform one sequence to another. An encoder network condenses an input sequence into a vector, and a decoder network unfolds that vector into a new sequence. @@ -57,12 +57,12 @@ how they work: - `Learning Phrase Representations using RNN Encoder-Decoder for - Statistical Machine Translation `__ + Statistical Machine Translation `__ - `Sequence to Sequence Learning with Neural - Networks `__ + Networks `__ - `Neural Machine Translation by Jointly Learning to Align and Translate `__ -- `A Neural Conversational Model `__ +- `A Neural Conversational Model `__ You will also find the previous tutorials on :doc:`/intermediate/char_rnn_classification_tutorial` @@ -73,12 +73,12 @@ And for more, read the papers that introduced these topics: - `Learning Phrase Representations using RNN Encoder-Decoder for - Statistical Machine Translation `__ + Statistical Machine Translation `__ - `Sequence to Sequence Learning with Neural - Networks `__ + Networks `__ - `Neural Machine Translation by Jointly Learning to Align and Translate `__ -- `A Neural Conversational Model `__ +- `A Neural Conversational Model `__ **Requirements** @@ -105,11 +105,11 @@ # French translation pairs. # # `This question on Open Data Stack -# Exchange `__ -# pointed me to the open translation site http://tatoeba.org/ which has -# downloads available at http://tatoeba.org/eng/downloads - and better +# Exchange `__ +# pointed me to the open translation site https://tatoeba.org/ which has +# downloads available at https://tatoeba.org/eng/downloads - and better # yet, someone did the extra work of splitting language pairs into -# individual text files here: http://www.manythings.org/anki/ +# individual text files here: https://www.manythings.org/anki/ # # The English to French pairs are too big to include in the repo, so # download to ``data/eng-fra.txt`` before continuing. The file is a tab @@ -180,7 +180,7 @@ def addWord(self, word): # # Turn a Unicode string to plain ASCII, thanks to -# http://stackoverflow.com/a/518232/2809427 +# https://stackoverflow.com/a/518232/2809427 def unicodeToAscii(s): return ''.join( c for c in unicodedata.normalize('NFD', s) @@ -291,7 +291,7 @@ def prepareData(lang1, lang2, reverse=False): # A Recurrent Neural Network, or RNN, is a network that operates on a # sequence and uses its own output as input for subsequent steps. # -# A `Sequence to Sequence network `__, or +# A `Sequence to Sequence network `__, or # seq2seq network, or `Encoder Decoder # network `__, is a model # consisting of two RNNs called the encoder and decoder. The encoder reads