From a5aec564138b89d3111bf99aeca3e44eb9206938 Mon Sep 17 00:00:00 2001 From: Seth Weidman Date: Mon, 30 Sep 2019 09:39:08 -0700 Subject: [PATCH 01/12] First commit of named tensor tutorial --- _static/img/named_tensor.png | Bin 0 -> 69726 bytes index.rst | 19 + intermediate_source/named_tensor_tutorial.py | 469 +++++++++++++++++++ 3 files changed, 488 insertions(+) create mode 100644 _static/img/named_tensor.png create mode 100644 intermediate_source/named_tensor_tutorial.py diff --git a/_static/img/named_tensor.png b/_static/img/named_tensor.png new file mode 100644 index 0000000000000000000000000000000000000000..2efceb9f51645d4d85267fb2bca2ff620cfe15b3 GIT binary patch literal 69726 zcmeFY1zTIuwk`}5TA)}BidzdUUfiV=w-zW;tayvNLvShX#i6*nOK>evEI7d}xD!b7 zt?qsH+2_0a-hXg-o+OL4rj9x181Hz;SYaQPW$IcE%5keTE5 z>-KYR$Kx!no%=$QqjoN|-RZNuNnv#oUoSa77Fx~(o_DMnQu`R#ooJ*Hm>R+zy2DY? z(dZw8k5CDGvpGBo%oXw#wmYcOU7SD%Q8ZcM-V_W0G3s>$+G@U6aTyxgFZSuh-z+gq zrzWBJCL9u>`SR)I{lHgo|7WJOJWPV#Ry3L^H(_mFv<23W<4l|8l2nIjM6Gg|MmT7K zCZPd0beU-DZ$f3ZcT_ZuzCEJg0kg?hf{R_fs;@bf(|o>igxiyzwLq7G9^>7)>-Y z_ikl%G@^1DjF~AtYKm2w8gK*S_(Dn}$p` zn-N?matWV1dySD~5O;cB%qGnlSy8)?2}Nb}5g3m822u8Xsd_wT_?k*zDq6kt+nX=` zOj){v^hgFOm1k=6a?LscI?;3fsfoc0YxzJ0;AKD6P?YtdvwXLgR) z55#XNKYj^aVv3+r!R-c(-u=wKJ1zx(L*%G0>7w3QZXkGlN>;pB!WGOq5kxoU9M=Y48*%)PL zmJ&R3m<_&^tL9ZC+yRoS{5SaNkCHoi>>gEQ+MM8B`0;&3lOTHfUgl*ej#1bgEnDcL z^>9quHCenk-#l7USzbDc=YtfRLU|b4LF)8ru^yWwn=}p-0^v?HjWLg83D2J1`Wn5# z=Mf+MkV!qF+{F!B#2!4J(1 zp%aGtS3X!E-|Mw394CL$uAo(w)u#+kYjC)5OatY+dArfO-`;RLKU*OR`@k^axBDQg zpWBGQp4a}Z{R4Zx)0fo&b5aM4v9b)WNK|RwM)lIah#8iJ$+gPL$R*1y=ee4&*1Vn% zppZ~zR2Y1{DSxJX_TR~tkn>Va;9>TFXnYr-`WfJrqaJtESRMFH$*gKxJ0;w zU;R)nT%3~J-Mhemb9#s%FcG4N3|>jk1YS*@Y!QQ2=~lVBnkz@jA5j?uI_4$H)ynx} zq8Z_W?JdvsJoTd3R9rJ{B9A1N$y*Q}cp|Rt{Ow-t`5r=-WBVdAjC+f)5ZL1jh0Dg> ztUt#_U-8HvhIE;G4a^Pm_wDl1knz8?!*{@A!t>$%#<$Bg%|~JU(@4ui%h+wMDn9Jp zVT!I9m7{{fD6(n#e_ zTjJ_d%1~nbImW&G&11OEHLxqx_sRW}E&8jdChOo-fwb2@M|l^GT)N7jOWnegx)vQP z_kr$}SW|SpbOT0_eNV!v6oO4-^5+~Pw?2)CX$7&n10LYsYOSYtqZ=qQL_M~4r*g$w-J7?Xt({vp zDvm|R+I!n)5cwgskD$5z8zqd(T-}Zx=HWZx@7Rn3jd`P*_xeuAjxXCa(`FfMsclnS zb6umO%T*giQv=Lmw^_Ur2__7f-!rRi%aZqqsDTWCz#k2_#P zx%lE6<>2EbiaF9=++l3^yCSYRrk^CbB=;P>3?GPUST}JN!!mkS`h2MNiJ2aY5W$l+ z##2TFh=YTqf)hjPLtr6{A(|o3=&Go}sBfXW;TJ#BDcxCq5WS$R=iGldn?AAwalGrl zjYtzZGT~+weAglfH;izMU@34G7W7%Vnb|n*U+9xJO4|^p;InaFYIh5{Aw2~v(i=EzyXFJNm zS!SsW%9e7B`GtA53ddQ`MZX{B9S!Nnk1!Zyt#jmm8=fyNP&+T6$}HfD93AHhQq(cmul`$E5Y=@uxD(wo5#b)A$N0DJz`!n=h^P{LuAF{!`^o z`U+z3VA$|{!Z|M=o2%#1SAC0>BQFOZX{1D0$16<-hnBvJUwf3QB;Ql&9D`92^;~e1 zi5hE%528{ls$bFg4oIFy{gC?cC`clRC+OI$;P;Ydp608!EgU26kT`9XAM`&!sgdK` z<2Gr=JmzcZj~wS6`Nk!;O~)+LY`MTrKG*((SVF|AB-?EF?Y-qDpNC+_s$ff29(KKt z3-xatK1rsyq?8@>U!y~9zeKPTv)dS~H=7{h#<%>>9ADVHNPIz^e3qPf*M$fvp?;Wb z%4%tF)b6!`yN)~b1I8Snd8C!!SmO41cK@_A4N?V3sB~|4Sy-q|G-l!}&#h1^gO&X# zw$Qe1_3490&-~iAEQ^}hLZzcD%*o0hhJ0Ap$- zYJ6_v-o|4J+spC-l|ao}%8eEa591Cqgr2&jmt7ZELccdSx<6RR{oLh;S} zw6etiCUOERGH9f(5ma}aJ2^R{pQg{3c^FMheJI8t0`U~SbJJX`YY<{&$ZYa(=pMRK znv`18OO{NaDt!?x*R)pQpY{BY+F{H*E)~&!PMWbZERb19jmwEp!txf zYDyWlM+TxkuMdW8HJdfZ4Z_ESPx-0m9E>*n-gUcFl?UwHaB`XKYA~kIZI2l7_@(S@loZ&c)l0| zVgjKEx5{ojP-z#l%n}lXqBI8$NZ<@t&fu8#mtM^mi9@A_l>==yu;^njiUBp+xp?Nv zaO~1{?G1uU?uRfw;1Y`y)$n$JPs0V^=gVF9T@r6q`qiwlIQajRv7qVOJo3$ah_>-` zNlZ~S>s;NxXuN<46ZuRd3+wYbzBql`+eTQrG1`G&j}Q7L0lIkeXBAxZqQae%KcT`S zQ<9@n0gP1LtzBK3+h{Rg&~ILmQ&7xt@ZWrAR=grc8!#&xPnM_cuI-AZm3JD|szUR6 zje59W1ac8NdK>{b7dUpZ+Ky;w#IOH;(B)KLouHwiXM9)Ja?(;#6g0NAW-~OgH8N#$ zv$g|DqoE1A2?CearcQ=5Zq`;dj)HC?bpI$J2weZY%}z)2k0MT%B6M0xA890Q9ZYF> z*tpp^=tOa8XlR5TOw0sTKS=*qb>Ke{x^GTSc7p8euCA_Zu3T)k4(9Cd1Ox=wIXKxl zIaz@ctd8zBPKIu*HjecFY~)|t|NDgf9UBMxf42=(75;ly@Z)zkQ!A|x->pq;9DzPW1^79I|55%w zp8UJV|4~!>-!*x8IsRwO|MBF%Y6`ReoxuN?&_AX1kGp`lL~(`L|6BB;xG`*Re1O%U z`2Imf9r(rgyEMSU$pc?+{`vd&I&q%IdLE8fS;O93|6AfDjRev3EKaoDKw0ikiq{;4_Lr`)=v62|IqR$XT6Fe*JVD2^H2GJle^2@k209*$ zc=gHif6L&%3i%pF;{8w2{PXq~8ehX@3}OHG|IrjjS?s@?{g*7nA7gTq>G;Zg{QpV$ zA9aIyIR91Z-&6bMhZgMo_yimGKbiuB|L}i>|G&cjy9fSv=l`$T`IkcZU$ukw|JM0_ z@?;PRqMx(+o}+C@ILvIJ?)&y(Bb1lg>%=eRopErAsCP?>fMdbl{yyjac*bY-U;;`D z=_rct#9Cj`Yx~J8e+ulR*J{4rol+6wAr!sIcHfBP`>kW!Zo1;WLwP>Ln3Xp+4TI4;ttZB^Yjx+4ZIVO84QZ#lW!+NVGw*UD9r)YWwJ{k951eM4F()-9Uiz}SH!|fpCG4{m^^m0#X?~6~1 z8YYVv$&bKDN{+9-q(Uxb+IC%7qctOJmfKk%pPh2(_(I2bcYXJ*7dlkdIu?6bW{|yK z4Xz4o&3W41mouZCeh=>2DNM3KINwkknVv`Q#)Z!#+|Nh(tj$vQK;Js0H^+x^;MbLx zjZ_`TAhtbF+INs{TTd`P*9dE4y0-gP;^=Buz|@oT#~4)Tho~Zb;k$!cd(WIkomj9{ zn%XOr7PK+dm5Ft%sz0pA$B4~TYO!&h?O)hlvR6oWk%(c#&pg$A={;UmibU!&w<-tj6`ojwi9~Mis#@pcFCkQnWUFVt4 z58P8Am8P_($k%qGJhPbLEH$!pGR~XOD-q61cjhzAmkrUA1~&6s{xVeVd7!m;kHx1= zdj)C!`AJ`xCLWg?e7?GqLvCT&a(MjS{koT*Owh36k+uq~I)Og077bgoVWg$ML+hi; z%A2=Sr3D3D5)`3H{UO(1hFT+Rci4L4Qy-A^*$+^f69{a^q$g`oT+JC1o)?NCid~-T z!{g##an(!a9qqo!F+lnSJWPFb1nN{w<$k*rO-*EnY14kGah_P|)fXjSC{^KnJ9SgW zIW*MjB)51T`h-4W+wgWPS*7{>0fyJ?1rAlNJtG-Nl;^ypzfwK^m2#1+WrQQ-85G;;VN&X6_Y{ZBG(p7V9b(-euoXvj*rNELHQ+}7_jt(ZFxPU??m$(1n9sY| ztAx`z*=Ob3SQ4XpH$RowSZEg z4kcC>p({D8`>3{bc_;&91jD#z*y9?J<<%xfJ-ip`BRzBGzk*&zwyQ{3{z4EL`l-i^Vr2nG~6G!U8mM7N{pAkN4$#EHxFp7?4}T zkQs=!cgjYz*b0T)s^3*hBFG6HbJS90YWDYhnKZ5S+c-QDvmtPKc=fW+-C+>YF3{IY z(xgE2)+CSJ{nqv~l4Wh0;%%a`04KZ=`H=i42zj2hCV~tSAHvl4xi#goS>g|?*Fttf za(2#8ee6CHOTZy}z0=_01YQ2l=y-IyV@lRk2V%{!JUqyFcyZhhLM{3{jf)C1$WUI@ zfx?rT_lKG9+L5*zs2`asDqAQap@XkuLa6&+MRuic{7}~@WL9jwxs~IDJZf>>G8=iw zdOIs+6pgqYABE^xo04Fb?QXD?&n3PqJ9Hf>M7-q`eJm*L&#gHDu~UP;rHqN8m&W5$ z`LUK$dRFhSi#yUnmS3S*A@eD-taT|Aiq3Bklqn+9jF%|Z@+b6HW}*gB&7fb4oj8rm zS3pa1YwFrd9b_hgm}N7I`lvFjN+c<7!)=7@BLd8Gr)`5w1WU@kfwlgcl>|9)&2FFT zWmoG2?OVq^Zh90`qjz<8`;k@7VGM$*!2}U6Wp_~VGU~yO1<@a86QA1=gZ?0EY2-)| zO79_@-rM>Cd+PybGS(PTm7x6~Z3r&To-7G3ab~VAWFN#VKW?trFgOZNBsaZ6>A_3A z*B)=jDX_f@XKVSE{)jD==40X-9E7@KxOK4^o8}fvRFc?M+A2C-ERkGiC)nwhu|H^m zRphV!RpLEfeW#^@E?FIy&qBh6>A;A4V*L^IlJha4%7nIOa^_F}6ql)<>j+$!zDa6! z6V#`3J<2==p9qrk-U;P`TJ$jk443_PQu^LfWlZ88{p z5O3=1pH9DOn~Q>b0n%$zer0yXS~;=b*9LAGILwPUkKk{g$ed#`ok!Nwwd|LaVl2CS zSaFf2j2`^zOnJR~@Yvu@(^ytut)X*l)gRUf=C&kn(E!im_Qu=7CqW4!G6BxBdb2PJ zo+KZp%1@%@X`ph-62)8ac?PA<7}pagQ9o+Lu8d#VtG5o?EU3|8gBvPMXU@sDm1+q; z)Rv`O>etyVl>){<770~_m?j1*t9;;FB^u^G<_lr)OOU(8{Ha9%! zMIQAwy8%k5-Eqh2u5eKec{3npmXlVHDZPU!xSJhd)XcPxa$*E}%8FMY7mb4;&OHrP`|I)ZX<+9GS>Nh?%lY|E?#n(9lWgF{S}^i?6o6g7W6`+ z%KnrPvxPoeO@h5;m+m~qYAshssc300K`l`-aj3iY1H52x8&-o*`K3lTpv;l`!4xG` z3c51i*5@@4e63QrZ7-h9hqIetSMGYLqVGnAfN-Z9=`qi=t@gA1q9$Adfwk@XRu(-J z&iMiLnf4d9jAdu}h#(x%O|Ie)(?-)ppUe*Cs-?#(+UotVLg5hu0B6#wE>v z*v#p-4wtq65s$T!0VnIZXty?mK$aI&sD<6CNcq2JlXBSkChFAdsLxJoOq|~x!)`@# z7qKnD28YQfgO6>T36AGAJ9g`(E3%>cuxxYTSB;j1oAL4{7qO6)LUz^$6@ zc}KkxJ^g{F7w!s{>S}v=w|yq=o^~*)a%Nl4H@3Jr6o1FSm51Sn=U3jaz?szZQM6tA zWo*Y`?YO8PHw(mISj7*y$AqFF+O@<0(4Q+ymYjE51T%w*>i>BC{#J1F zY=5j`RDOrYYPMi->^TL|w;>h|@#Q7|IFT`p+Y|yD1W-7%5AgqieTvHvzXp?hv z<&rkz)V0X}bxXQT>f1gQ-0a(2mHVbrQE11F`rdR8xB;xby)^nq;r{sM^x1FYXc3FN zXT+_SSAAWNsI4>^2&d$BH1yr&^dHa%*9&Y!aCrzZ_Y(R&N2e^;cgqPn7?j?Z-j_*k zKJM@wH9($`oQE4w%$6ga@j14Oi&>14-Va9WGje=g6@NAtTeoltYN)rntTeNUl_IX@ z%J;CubOW$EALwpzCN6H4U+NO%%`cahQ`?;9r~wsnrTrM=gM zxIR`T$YS}Zx|Qwg?Fl_$S~z>lT*a4)+Q%^JusV_BJ z(nw1o`jwYje7ZHelqXZM7?0-Rq9Wy|+FXmF_l7c5_8+pgp+2Zrx7F@vV?LRr)kjk@ z?~R`LK~c!-oD~zN-ejfa$NJGbnVxK@c2((PU4HFiUU|P~m>yZaQF#3GQ;weyV!Du% z&|}jr-Z<1>B&Ny9tcDTLRU>X*esS5_{N=h1;b1*r-3VP_zNMesC;G|Z^Il$4{iw$K zmGj*l6tzP9(W!S~2sC-@i0G_>0{cTpxMZH`S&L{;B7*Gf_^6)KXql2|{`;F4)-Hc` z?>z5!?msYj-xyQuamUsX8i20b(h<&^*t};c}-h; z6Ed5fmbDwyFOCW?$KhdkC=k@mxNI|#iQ#9 zr_NjUb2&X0PsQuu$2s_X_Z|$(uaqbSCO(Cii54SG&pi(7XRjEf*DKKcP8KIWAa%3e zFoO<_v`9e4hfegu##)-Kb(f&7Cw{=#v3{$N|+fxux&_Z z{*~g|Q#Ho%mb*-Er>CQ9LcW1n%PhrIM3Pqz9p-(Vw7;EZJC^rHNNic%ZdT4=ZxQ+l z`L4_}M5G$RVAqNMsFxmnUVZ^F#`b}M`S-(({z!!gUkt=l6`bxZd4r2`(*r*U_TUlT zr*T4}QP^l_yID3IU%TYR#-{J!*k)#^WFsEGzRxa|R(OvHLZ`0KjQ~ zHBs!Jtec~pvRbLB8yl99MYK!mhjY4a`ir~*$rjV>!d0f`v2jMeMcvb4&gpwhb1*O+ z3z}uPs5dMh-~ObX=a8SMAzzPkxha<3Wr*R8)AuP0pqGPHthCG%x^UBX3MlDIpRaEV zY8@RUNMYG;C2A6wjisl^<=$x*C-!iG$^4CYrUVb!DW+Ggs0* zkD8lhqXhSh)SJqX&3v7L3kblT?{sgV`PJYILf&R7O6S^VQdlCufKR+&QomAZ{_&u0 zN;%(*{a$*SnpTmVyM{*lWEPC%83HHU&PD}!EWEfFs36Yorm68^{3^50iHKGgA{sIf z@-{x)6e}k*G>~7+Vl_2?On`k6D9&j_c{!v<{YDLS6=GPv6(coO8)mq#!z1gTyA!7Xxv9bZn|x;38sX&we)K$IFFh8ytTf~s>SUf@yl3deXWD~xOd>zk2BQuwSv3_jXO zB`El^0V@pI+|UJj>$RO?o|ME+SHghKw^0P_*OoQ}9;_D)_dK&sEJ90DElKCC^KexZ zY|9I=Xb@bVq-2g=Q9CC1Y4M{DGGlDtJvqbG21{Weq_1jBc-^gcp7MLd#RI;3C840lK z-qUcusk!vNa`i07J#yl4jdFU>)AAy$$V{t|$?fX3QcJ}VE_|vA!k7ySX=;PSfxwY3 zh288)V)2W7Puqg}fSZz+X-kokEv0skpnB9Mel-%XHLUR7wAa}gjv$ZX;Ve|Y8VKgp z3-1(3ch-kvN{H;7_C4(H-L=qT#%pm{R^)nv`Yt=vBQn(`OmmlU+_HWB(2;OwTN zhqW~)3ub;|a%j3cY8^OL@ihLzZZ>ejZ;jD20PI;`?G`^=+WUvQ^^2Gr`)DC`B zj=@Zt6LJOtaB$zbv}y4$jW9L_Mk)w2j^dFjERJ->M=gAKFb{Ui4Lmb8r~dd6H804$ z45-Aog-{r9K;L&NS2lRWOpo|@AMQh4URFaK_O2n}tPNU4k4bM`8v%b%dDkW1s*DFKnmAi& zYq>y7BuqYKpu;x7-c6uWqX8)sd=631Do{*`!%0CEs|`Dwu$O`=;#}=59`sG;35)3t z(|>I&{jratyH(R(GSBi>#j&ZIkPf1BVIE%x&m68FgfoT>fF?|w=_bL2nI24I(fc}X z<9r2EhY8y-88P@Lir~lC+>&404$_YHx0eJ;P&0^Q5E#JKj)nlqnJ6xkFu>YYrFY@b zEEkWVCtfwN(XeP!d;9seat1XeO8i$SG(1MEHJ?k7)+9Y&My>#0CR*594>u6hNxrl` zd+KMHVQ3^%mOG~QMlqI?KRyiIr>Mn-CWAa5=>O^&M zi=Gw(q8Btdb_hoKhzvi-q>XtU!}G>s2t-N`S;!hL*nt}$BOR-nOF(vg*U=oNMPYL@ksoNcP zju_0*;zXJvGI+{(Qbn%j=*!ae`Y2qDCZ%0eR`SehG|vmkAiP=j`rbYCiU1a15VcA8 zAbu@HA!6qAO>sH~RQ?lmo(APkUsvK=PM8vl?D>3PFU4-dVg%vP5Wb2LxADu>>gX8@ zfq>^H^brq7p#t4QB_QPTvCnKzdf*BWdYI^a`O!N@jNCS%vW)iR{(QXf{?7eGX0MKN z!&jUclVO(+Qtgv>C)Tl)XsBi2bGv2e86OK$0(D`LST((Dq4LY(yM8KvSz;-!$>F>| z9lX&^kC`C;O=~s@#AtiSZB%}X98&Rl3J$D4{4ujtMNyGq5BJ`Tm9Zcqh(-&peOea|v?1;jLv^wKSU|J;6fG&@lL}+>)T}W<9+48}b^A(ngFFWFZiM zsQ6w)Q4{UHMdwp3tVt|KdAl?BR3TTcJ4hPwd2QsT#LO?VHME_A&T=G!zffNHSVFBs zC|!zP;JIQ?zsmObsDjK$uTL{9zX}^GsJUgX0_k>K-X~smsMg+pZU>Hm`j!#z6{2t2+!d=Z#4OSAR| zB5Umvt2Y7SKRRztQ|ceCiz=9qe7FR)l0v?OCTi-xwlc7y5)on;uC+utNkbw?xm33* z;DmUv2+ZA3XDji=VA`@G=e+J_ODz47DhS~%z;87FsbutJD-$Y3w*#086IEN}j^87y zmpkJ;Uwj^Yoa(Ex$z-Q(W&8^xKG+IOMM|@T#ER;9o%SuuB%PiDi2tFasway)*{7gm zSi$fuZ3smw&Zr7@o$_i?Zd8r4l~o||AQWJYlNDseqTi$snOxFNCrX2{oMU0BRP~z% zFjTyX=)8O6c=34ptHUOWRKi&exw%n3s4zhf7G^4KOj+>3NRqBj3gI{t$dpCDVLN$N zu`lfqwHwX~pxa02LXPwmI_GD{CiRr~FmF>@<qQ_ zYPHj2i~XMVabax<_=`*%w7Bx420wkDs_U(e3-p>y;_Fk9{BhbWI~MwOvD zJJ;So=}q`uu)*CIl^TBo>`m-VJY0hn;ag_oUXQ*Pnk*13C7FdFGyTnN+`tEzsWe|N z#qSt+HlZ_Xhu^de+}48J+e;=;`;pGMsh!Kb5*%e*(RA<1atXZZtixRV4;r6-dei|4 ztq)g*!b>M_G72|cxqmTUe|8(;KP&YQiTsg;tcV}U@X?p<2+MWEW@$aBY5p;nES88m z2FRW%tPaH0qT{J$K;rIaw{kE5Y`_1;ER$IEeL%3QfwW-#P{3Ts;E+0v+TTV}d7X5T zz1_)RR8D^x%p8OA&~d}BI@Gp@vw?v4h>fU(F3q0@7-QLh^R{;U0eR87twG3BR=dZa zp?du{7CiF`8^9gdyL3Lv#CnK*>0<9|7w)nw&m)05y7{feM$mej%;9)Hx$V;PEM@#a z;I!9Vky}>Qs+D#hja5Mql8?rD_*F}aXu|?Ih~?#Ez+FXr@Qfk-(JXkRGleZ%28VZn z|7`C-Br(pC`%cF4a28n?>UNx!HNVb3;vmQ&(`n2B{F;*Zoh1&J-(&3FR?)eu94(}> zz*d5PjKs;jc^1}VmMZaI^jOxvb3%Bpa3qT?`}RuN(t#jfko!lOSh+tSbf z9?UNdsYhDN^nA%-VY%yW^(q+8?2tpsL{m*!;+YSGwsKElM9|aGNLP@GTK%MyKFJ9^ z8L;s{bcIlp-F|~!$F(9)bHsQD5bON*$?VTxIpHltD;`lHmD^voA~AI)4i;LOIv1 zeeM>a7LGAtG)_O~vVFWzcTm0>Jv~D*$1#Dz*zc>-9eZqT>_VzPuf^QJ9Nx*5fVvx6 z0-+E=YxaTS^q8CEmgno?>|vURm(xe60{|2OoI?`NgNL$&axK5$$=1|$&42VuuCloq zND4&7w3BKRLIa&~qW-3~C~>rlsW# zoHZGGWzJ;N-@o#bX&VgVk#WC)>5f7zv>abyI$dbJl0cd;xpk2<6we(Zj-<*4ydf4zqLUE1^Qxv|NJH4)0zgZc9 zAb@&bFMmC1-ib0@7-|5y?3rIbuZZOJJ3NQ_%EOjx^g1jEvzmfS7PLH~ycYwRk+A}%AqQ^&I^FCV$!=`uP7^Kx4 z4%evP6q_i7BPx3hz946oXHZvPe8LTv0cB6WcDJ9uZs2W zH0vs4D#bJK>1Ox>DX9E%Jf21lgE*}Z!f3UVkL z6bqF|{;c_m&ttN+?EY$tAQ^-L%3NE1(gv!Uc%1s8eQ8SSx9II=yz1nur`S!_-s^&G z;z)Ipeadk51}$k#U32EL%InxfEf^)=vXk1=4jRM}pfpdh2I~5Duyn}CItC71Upxt8@HCfx8rY6T!k5+JmfpVoy;P6Am0QiNxIsvS74v7 zCOKyII;%rda{HSc+t@LU{2bZo7$KFb9C60M7 zdb^VqeN{nBSxgVACw8{reGHtGFA_mn$sj;6CULCQU_!LbDOI+0y^reg=}-W#4z1}@ z1=l}#;etHB1Un~t=rmfg2yT)$d<9}Ua&E;iZ%Lyx>xQ?uNe(&=#rbh3DOUseVh)>a z#T@s(;wm^d!SyguuY>JYL&H5Iq5^|5+_Ym=Kk_Q1cvZyP44&v0pwQ&zi7fUaAiN_}ZcJvcx%`j-F2 zE=82qPLSS)^He?qVBKXDPnSgevQYOsE1~w)_xPEPb|+VhwyxCLgkHPD133h8snz<# zg_X<~j^f#$F`83ks{K~+mMT_(P%Df#<%NbW?F?~K3pAs?{EguBs*6vzhu)OZCh7-s z#T#IjY24rAB6BtFa!(0TEGyJs`iN&2V&2CJ&Z16C$G}&ng0Yr`YJGptC><(HA~Wza zXs3qBO@*2osX&)q>d!}qI<(L-0FrCubY5K7zDEpSfiG$`qhbI8K>BH%ZCQQ%5tDWM z6_cC&@la?z!|8BoCFVIEx!J}<)8wfXzF(XD%#$#)h?oEZ(Ki~tO%ua!x;ORa2L#WX zZs+Oz{!-%!-(z_gU5^g%R)F-LUw!IlU7u0X5ic`$)iN%$ds)T_lQW-beeX-8a}Qs@ z0{cX69s^eQfOmZ(N~ppNQF+)6q{sOGB57Z-gD^c82AU!JJqL6jfs#P7TTf75jX4%6bC(qJne1!mv8N zguNx`zPB*~Y9)gJp_cGE68F18S(LKyY41wEz+!JSVDQz$4E1_$BFTb(MX4~9EUCN)NM4$ubVSqzu}AF%lE}jqnM`V zZR=;gC9&2$&E;ks<$_%9Ht-TlE6zus(FIQ4>Ppm#bP zK@CN-_ojm?Ra)*DK(dAfgPo(PC^HpaajY1;Xp8!*r4XwDg#Ff0l{!-JLRg4hAm-e_ zm%KhPlUKigb6HXGJZqCIadkdXjyy0vl={k^Uu(nwB$>I(U!Bd+{}^kn8k4@J^Ir9P zWC9v6eIN1=nfS!gvHO(qIoxS)&_!n(V4K-50_ZV4On_J@RdT*F?gOuXFwOEua5(!Wb zsQ|iJ2CKOtw%9@Q_t#g8J!TBdt!JJitPR{ox&ww=cRaPXe0Z2AI**oN6Isg0(9WE^x>b;|v>!}Z%F;a{*WIOvpc88m3CFrDwIu#h6Kw{vL zG3)YFLK?E_sMai{0{y}I-M-6iPDm<=z8CG3xq^SV;ixDChoAx;}GHY4hBhd5WRkjzD?FuVbO>R@>u@kF6z$y6g4mIW15kVH?#Tnc_ZS=f(F&VFx%3_P2!Kjr$Adl_c(I|veRdeV|65?ECfZrW zZM{(#!)05V^0v{rWqL=gK=Cd6#8cyzwP5n6BE)(nvq(ho&}EMNZ=1yred;H_b3+UL zbxi;KXt}8@glgRld07Wkx@Y9A-+cB(xqL>vFhlS=2G=Ls9u|EUFzH<&?B>BtWv~PC zJ7@mjo4*R=bOlDvn;}h)J`SWvDT+?+l`fUuj|@t8(wRjP^{KF8aQdgk)_X0NtL=~p z=mSwzAf#X9qg>CUibCBUh%FY?m8X~=HCq<6156`dT;65vX+ODwaBS4{_YHl($qC-* zrN^wnj4NKXfj92{&$y`ISMLX5lU%op|?chF^ftiqvEXUCfjdia*h z71LYQt&a5J=2tJSk;;#H1kW*zdKO$<;vRO|;;DCjc~^Gn`9mULdspM@Wm*R*mzjx! z*8O>BKwRI*BrZ{sL}J<#n2}4~y&5h>8TZ7pc9LO3>UN+!3L9ImC+AmaNZNaR# z_#VkffN3&8@SKe&>DZ9g3w`q9ne+jm3V9fT7}1+oF)txuHbq`SDb8nJKyKyp;N!53 zRLhFbjYu$%z2Whc(uejV8G7L(<)h{KA#myW;w8D|trIs@euMj^e!tmBwW{DY^`D@wfjq6ZtkP`MPn)I5kA@T-NA;6lH*3H1~uY*@SpdOu~HkQ zqYoPv#}v3KYXQDqW`$gd1T;gJy@=OWV=YtSt*uZ|*#X~A>awaCVvzfCCH3i-LTU&z z5I{luxo^{eB*vG#|bWP<;K0}aYBhQ>Q%p%>jR^XVTwnwpw#TRik2 z;0Sp2d;$X-xl31a8+Rko?Z+nSr*Tv@RL6YErN##ch;pZAdQ6Md#5?KAyq#X_ZD(!* zJYh`Oj*a8UaG@OmG=cw%a-%L^;NK>$Q){~gNkvNxJ zaim`R3V@6N@+A%sguPRP;C4O8w(V3p%ij7r5Fi6ob`jT5LLsWy&L%0NGK|o)(kf?@ z)|rh>HD%%){K!x(S*dRumTCH$RX`lUDQ&uo^TpEJVxiWy7Or6D#+Rpm@cp!F&9DFh z{%0Z&qb8Fr(wX(v@n}tZQ{TA^{WiMC9bRbTlZvcAKVDkWUIvsD!bX138haC`jg|fV zXMJyNrWPN7gg^I#HCffqXoRt8d&|zEB)7@preRone+eRIqjBh@Gw*vuQ6;Da^!5>E z>T4hk0f79Kj1@L6wQR#baeYp@9u;0p=C4g24za|UjVSPAKY#La2V~5OAY@EaM(Uvo z^}y9D0E-YWOj0LSAq?2IH-9>4Wn_I18$NK}OG^J56+Q18YtzJLTYuW>yvVU8YmM4Kpn`JkQ8W(Y`DA?FsHB|J>&NlVp4r49) zd-1WS8M1XQcaubJCG{o3j74sl<7`u|nAoqmc?~qqt6h&LwcsTR0Nb9Y4bWw+(NC#~ zLa}#~>>snY*-=vE+$(t+r^cqpI^XBp4*9c7cX$#HWR9Q!M_`m|TsS_eD$M}cJC(67 zhKvpBFOC76S{VDH*BAsO05D>9A2#U6(KI+DJ63yz8(Q-&6x~^`@8^31Dfg+Dvrrm& zi7ChL&M8mmgVP$JK8rRppV)Eos4#fnY;|j1=_2lo(31KI zM}*D5dLRATOvV$K2WImuwjYX7O)h`KXzMLcT2C@xv%xD+wQ%9RhXd15vE7)#D{u$$ zyDsoPJYW%ozu(_rW_n*~+xE6J+`}%8GAX7CRS55A`aS7Tql2daWsLB;Tfg`{B{t93 z%GlS*j;V^tFet013vGoj#&fw=Mm(|9>r~bEc)jO-7#-CFWT1hu=jtEs4}zxxT+(qX zbohs9jB^n2hoJJiMCZH5_BsV4fSEH6!-qw1&JjfKX!NVwCD-W{^F#?9)D=HjOJI9$ z^h_6cQ-DqOs4Uod?9X5?z$fO~r(OT82NQchb{fy7et?DOm@=IU&jG$XDxg%y`T0?S1!oB-@o9qK$<;Lew8( zc&N71QiwSw2rHXIJK195b;$VLFYRE4Dd;jzPB_LNcKN$+YY5y<(l=oNPu6viQwfho z;XZ2uq+}exU7VJ&Ml@7AcmPDoAja~XGi-=Bxplco+4ZX}(5+m&M|#x58z727XYX1G zIzTKw*T;~;Tq;>D_GrmQvX4}`tU5mo^=er2;dS-_FonO+aT{06I4wp-ad%^`lHscu zB3V)h)|oBLsrYztq6!IQPXDZX@hNTdAS*#%!SffqI~I7=Qdw)R6o~NS#ARl@Z>nwD zW2!c#5)*hYBwUf3B+%-#<#n?bLc*Za<|{M#R!!=su={Lx75{sHt^?BiUFdR^8AMW) zG#i+DzTb|ZTpZYtxa;AUUCWNkvdvp?~GlT;SV7)33!3XoC?Bf5U~_!Rtq+>cM) zgI@f9?7e4DRNvA*s)(W@NkAkg0m(>a2!oP?AcEu|IVZ^(R6xRjr(Q$xnb?D&E{~- zeF$GMyVs2|e`-!t88~4=>34NGrnYgo9&oJxCGCM5Ks23I ziH!5{;EKh_km=;?u$DL&ySqr9sFoQ(MT(%dB37xpT}qN zLt8W$eHovoT$=EVv$yK`BmY@<_{#CGWSf{D|g4BtVH@ z!P%m~3dSyJKG(fXR>>B}1mA2JdBlBJ4YRr2S@Z%p<3jlv{@x2$0n=s1n|Wyt6^#n6 zw5E&Qz>%)U=wr0y(6LL(sa|B?3uIHtC!zPSDd>L`(Zf$ym41>M22JvCC) zVdQAk%R*%V@MJe{2qX#$1B6g}4^AdZ{i|y$-=L_a>2yf-AAg|3loSwvQDrGW|KWP( z$P(Ey3v1|O6CBh0GLd(#F*CFb2v|&R1u{!vwmUhWJhA7$Rm;l?pQ+lbBZ)Hz%M}-= zW1=~!ojxK#Pz6UkC7BBA|mu)8(dVG|0V$r-VL@M0F@czf9h4KiqXdudilz zrZnJYZ@p2h6ixe*j{~STE0+PSl{G5_=p%~IB&hqn6L-JbJCz!eqxExD+99l)#qNrx zB|C@ij>Fk3Z#jAwMCThzhCWyMpih7};lp*-`zspavK!_(Eyb%1f4@C|JpiyJ##;Fh zMnyNKn?{Bl$&WLQe}+^?x^A%P6g@Fmbx6oTIl>GCtJe6PT3a2)i~#MHI_}1ckZQiZ z6Z6(G_jr+uudR4O^Mvj#;z*AK*j}X7J?HZo%ZnGYh9^Z!p?$+1Dxvy@abzEu>rO}E zr@9i~>RcXfM?KNZ13X_Iy_M$J^8PUl_s7oXM1Zho>jXfkIJ~JuJ%&xW9ex0xX;zS9 zXFm5#dZGFT08`r)6{6*TDFAH5M|cx(;8Yf{zq?I~xeOgFjl)6%S;JuWAisVFwX>`AY&<0kHkJ^?rat>^szockd7POdl-nL(zO&Jm3ebK)U%_;qT zMnC`0EP%%%?)%Bh#kGqSOz+xfDez~7>zqbrASJiIAP8<``k@#7gfv?uzOV{~3^@bj z5>xj2emlkhu2D0l-yXs)&bCFtC*Ci#&<2353rRYR#WUXn;b-59Ed7vLI;q0WCUZ&3 zwJ9eXO9qyf)(1cX*xu+F?{o_`b0qE3yPG+#kTY`2^q48!sp#-4jf$GL*H|o;_6l_T zFxv~TK5(6J62zxC`S+n4JtO8%l+(eI?+K*1{>D*HB2r?2B*g7O&7o<=ftj$>hXl!^ zvl`=Kol(bw^ik5>qeB2C@UOwnGB$v<*4%rkSg}`PSDWB9kJ=Au-N}d2zzhCvEcHS~ zYn5<%Nw=~>X~NICd*OmlO4N?scE3yEJGTKy&G*(vEa@LNdM7X{hc_pH#zg0NO$HU_ zY6_kQdh3i{ODZ#_>{gm~_qM4tGMtys=+kNjmTVT!z^8TRO)+{&Q_o)DTKwUs`BH2R zg56$tT)q1%*u}KpMaVwwU@Zq~w$yG;#QDkIz}QHP)!a@EHKc%%#z2&5__|rdq*y2Q zm8opm`g^6|5Xsk9g50YmOK-#gjHK-~4s|*o#T!P3S2a5v=Kor$GC)$ABU_X)Utigm z%!PFhnCMQaW#sda3`==^X{gfY`d7mMgN5IXnFahbN_=y=ZjBlqPc#!|H2#f-1CB!2 z8pnR1Klr*}dz`=s#GMB=1f)UhxkdJ5qX@@q)nDD89LEbr>`!)#L?wIxW7S89_Ot`v zLtf`RT+PFia-^J0CqW7kzdA^C#8v7ODD%MSW)OQzXhGN!(8~T;Qos512F8g|q2*taqju8}U3EwkGX~yKW*5L!iJac# zvbX>GSO{+s7M!)p)-YWkjXojXP&;o`x>olv%G&Ur>Q%mf6k$2ZYQ0Ue14}3E1@e~z z&z{W5w(XJZcHZefYU5>vhL>;|t(L2av$^tz>xFM0c5riLUxD+<7^I9Yf0NC=sI9Ja z2Ka+t58lY455l($)98KNr=q@!5^je`{`qmdF<2BZXWIc9vwZJ&jDB8zxS8c5D%ZOE zTB9^1#Px+~i$u>2njdBa^prlPE9=x&lr-%0`X1`e!dKcuyKO}`9!Gv*0=QyCUjbxm zjPAFP@Gp{cy)gOhE0Yf0%5wnG$GOEL1vYO^O@pFI=rv~}-(Ma!s${0Ka)RmKDNEL( zbO04XF0mm8Mg^Uy>s`3|aJEB0@Eb$(!!vq}rY6}dA4D(9+>0r*?1hJba#f=6YN;3Q z<5l>ndQ#W6TCc_hSr0VMbJpUM0<>tR3}NuL2lZvbl0Vb{EZhkZpI9o5cGWZ#rE=&1 z^*Xj-YDpuKs@tvz?15S!Opv zDpGEWW#j_2DYtr*3H2>~c|zQ;1suUrY(fINV;y<7L^3+m$D>=JnLPb@!R;?A!SUekp{L2m=uJW&52WM0E6#j<0EW5saWN%`x4VYC0D~hFW=;nh1 z38#U$4)OJ%s&tCY!9UcM^@240zrz-$*Ny5!bF}4aY?4nlt}|%AJhvF zok9i(!;YNp-escmnNB?!0x$h`LO_`+SEgDl(9Sn!I$JeYoVW(58_b+Z{1*OUKR@qS zPYeYt&I7QneS!xs-!)21SRrct?N=rg!4>>pIZ;4swP5Qbi>p@|85ooWYRpoNhtt4T?t%pCo^EZ*dP|E9BR2vuKXj$ z9?l(mdCHoQxJV&x5m}ExUqhNWyL%iCb5fYD` zlg_Iw^Kn0a5xVb8KXA!{&kEzRs^+(w0YMBuCG5ao6{s5e)BrAZ<`6!6dM&j+FA1>| zjOXRBTMJ%u@&DS#R~d_3QYr<&p#W^`$c~u-m+^PIuY>Oof_* zsS$5t7gKiyfLuwKyZ0Vo-yTC090H4gpT^uG(6ZL2#SqIMzmCr~G@J%k2U7UH0IeDU zbL@imS0O4tNk(_ZWa-CvfUm`sGKwI^^1=#0t6xxo zR@Gt0)jmbzJ0lUkePJncI6_5=}E7*IQH5`q3>QM&{5txr-uf}kpq5hAjy6b#f)JQW=nYJh$eEk2Rl_NU4RZ-Jh6m1y87?-P(c)qA8!O0lhv zC%Z+ZsQ`(^qbATDz`2vNqa|iHKP-0i;mUB&A?5_#KE(=EZUSi(kBPP?O&tsr1f)$% z^PX!E`VfkSMJ4%BInh1lz}-dI9`Jyq{ZWO5dkACa7L`^XF;CFj+4q#As<)-)FG8#*sws zw~PTW^@xauv1%MNrImM+myabvAm}keILoGEke9- zpSYOM@Drl+)6x`ohW@!L1lvy}QT%jn2hgl?^1ftPsOcQjQTxfJV!VD8ctgMwO5tPlIY6oxWb@bh8fZ&wF_3I5IOpr)GL_-s%`~(Z;@)KLW;_&K&TZZ3rB{AB&n}Sl=+(dJUlw@7unZ21NJ#70ux(rcokxp6H-J#vd7O#kSQ4 z!?th?BRgK2mZ4XtceTm0qKL-tm8gK0Dtb!0vY?2HW7ywaZyFgnwQKeDQc7s)sgiW(9zl&_)vynqTYs4}xZ+Aro{Mdke6nXSK>pqW z{~@qF#5K{5bt@0Dv$Y!}Get+=@9#)$%8DZ?87ja>1!}T#Ts5>@W_Bzv^*u?7__iwCF@Wv9w8N8j>wddWfa+Xg)_C*V0 z#1BB&u*5qn81H9w&9u~(QdH8=H}5<4)?${@@$13tfKCniqVmYd@+77?AHGE{sBe)U z$zQ<+8;SAa{PO2;8Bk9j@ZGEs1n>wg`Bug_o()ot<)x%l=B$lDP_^$dYzx{nYZ2eQ zF4fTo*{82yMT~pmtcCbJ1^_L_z14PCU41akvB}yy>jC*<2)u@>x{L@)Kmtr)d2PO@>-u-XA%X7gn43Lg;2UjawjpRyeF zNlfO714Bhgs^)5bZ|mXUxkTs50&ZU~s?>&7_;uo?P}54)2I2h^`mgF7j~hT1`2r zx5e*Jrl=&NRO;)m6)cMZD)upe(ANy;;!aQf0tPp|^$MN7bfU{R#hxw^7|Yfbk0R(GTOPUV6o`MTwi@F`x={@6S>f_o`- zcH%&fslf65#@rT7KU`2*h3x&5&cY!2{50UpnUIyh^;bteEZ)v)N*zUybYJptQBjc; z))I`tbcGfn_>inSxBuNcOupITRIuG*C0rbNu~v7rTQs+?5Sy{c@WT&hs@sW|s=uXA zSH%M8331ESm(|zxO~EZjEgOp$V@qa)7Qf(wUcx+Fctx*!;l97fZnLe*qjsz8bgj{{ zNug-t?Bzv^ydh*r`+^7FH-|RDhG0jB=IcTg`E`c`t|kdY52KO~>XvxQ|7lVC3Sih+ zTN97$D@a1VnN_v*zP*}HLzM2+diVSke!B-fc-IN}*f4)M=+cZRty>K*wtybn%L2p- z8u@CRv&;Z1Y<@uVj~pu|?iZWyH(_b#g4Sf5%+e;cxOv!r*qpaQVovkH2|C!THl|pV zjuqvWl2u$7^ByCH{t_B3Q|5+q^F{b1nc~|?4A2Nxrv%eiOwwYTy;_mp-0vS)i~|?( z#c&AO2D|{pZTJ1gjmEtt5lgnr9TI${PO)IZ+oMG-hZ-`*H0eBMzuxxHM^4r`CKPOE z8uf-=tVM0QCR!LK+K6E^D2b-(RnAZ6lTp1` zA9JyWqtudXyGCpM_Y%q%n+xVE-su*a(^Z@^l>xo~Ew`NeNNy=~xLf(fI_6gWb1gyl zX_6cJRwfW>c$$eK?QdK<=U+ry>GyL~Dmwu*vikt)b42Ezk!}T82yjrE%I?LJ&C4s; zv9!bSgGB_Bv;Sxy=E%-@(ih`M+rF@>=f$(BULVvN=_ekJc9(IH-q=yj0E$8Cer3bW zlZrIj8ygk0TZoKK=>FE>+dUOm4x`M|W@$MFKdyZ$(1RFSRZKPg#th!_HJxwb0nF-6XMG1c66 zb=l;dXE;9z{Oi}r=xDZ4S4cP{UHk1XD1okCAJu|slanT$#s~~Lm)TB ze47iCuJILNQ$@pIi9Z4=Dy5tNGSUN(WZQ@+Hk-9JyP`pgfI(r%;~x$VzrL5gL|5C( zi``Z2Ui;8fGWN9ioaOpOBAUb;-t-X=wioIoz9Vn1pmy#c%FS2(T1k7?KuwRVP`=;( zs1NSmLF=<$KKgM7mu-e!RQM%rImGZmOya`{2iAXiKCe(h4*tRPE_d76bywGPYCjLXx!7zEUFHSFNz zxJRLscv$5U;6;#4)E4&wxZ7#HC1NTZojc~{{SJKN=m_^KZAQ8BJ} z=BT=Ebp8V$V6vty14tNdrg>Yh1W`pF!1S5Rc)mk5E$#&MT~#<)ygr)w`)GpjReG_+ z3(LLa>+Y-3fO-0$v#r3OdxQF4ws_k^2>1XYG2gA%%mB&!ATYLLF!7??T>-hZR|;RW z12kHN;#vled;=;5V`J2$mcsVJpAO#)bJs0nxg5AnrCOJ&^6^qnHr-6>ZH^5|5HqNF z&(}YXZ7#ypF~(v<%rKqwE_mfW7q<*oS`;u*C+L z_3gI0xltYgg-%(wUDMyL!e!L9FV?K()q!Jxt=Wpk$$FgnVB=AfeJErtmaHehSCv~g z9mtyz0cQn|39Xk0-BH~dm(BoME`N+I}6Za zviywuAM;v&7y~s8UN)fQ`sdgGnchmLdZ!&UgiZE;K0Of7SMk6oWrOAZ^UAFVH1iSH ze~fJn3VHNVTPX5z^nYFv172B-SpWZg`W-Ahz)FYbq%{9IlGY1&WiRHs|D5Ye!3Nk^QkJp)*IM2_(<<vc2H= zf3NcGmH#fxe;4NeXFI0wc~D77Nq9o?>i@l|f+infzaI8}(QzjTpB~t7^aQac{6Mg1 z@N)n9&o=te0BpFT>wQ+Qe}@E8#z13(avT-sf41|>U|<)qJ`c;00w&>l^nA&AK6ty~ zGpXtSxubv!2o1XuK8l(D6TFlcm& z@{6GV#*Y7n(*K?;x4HFy58D4Pd52hIm?=(LE*X_c*P!V1fJj0A$&aa^n_`T6Z@^)# zTO@|{(^2gAAUq-;_NJGQ_r#bl`s4#PIZO@xJVLxS&vrIfk|aKbIfVq0b9{m$7*3L9 zx}OMUZ_=1GXq<|9-L}78vG8WXq4Vr~Hy3$B{Hje?p_CqM+mn)ko47kK=oGy7{n9{&YiArj~BT=aKu@n`4HFr6Ld9Q#QH> z?cLlL>v2+Jq;O>Hwf7E&LZJh^LoKfs#Ewy+n+wsFd3Qftl7+Yh(@Mw;5dj4(eOs0d zkgdhBNYuK<``5NhS8i;{MbO8%M*UXtUFGKI>0omUk)`wR448I%l<`H!L|V?F^3?8n zZDEuhmVfo$Tw(ln#Ve=khmtMx z-g<;NmAnipeM@2O-@G8rXMCET7#P=uEr&!*Edgy-Fmu=0A4@TF8lT`K<=)DY^9fY@ z7EfbwW8lnTME2mN+84&>gS{A>G0T*-60cs=N{E(HD99}8?&fqcb26tUwf$y><@cSz z^R0|%i&QBzBE5b)N=T8rd;d9o$l3noc#n%CVDlkoMTh9zE&rEh;_n=njk}SDKc5n4 z#%6JsD5WHfrT+b#HqOiOB+6jTFVn7J<;5Y+&-tI2E_7O&VSZ)PTG~}@2QS8@-6Wth zhMF7&QD@mTzQzuhwKmw#GCfpWwut)Ej9u_5WG>7n%C|L;Q;402+=(df9$#keX5n9N zKX!Z%+Jxe8*(MrVSv{66LIjK_aHkTN1~8FY7J*f+&ZR3)bWN{eH)^Jc{%#2qnhmD- zs-$t!;y&&b(c8YnmY9;_=nEh{CQMgB`XhykQbcfe9R-pnBqvAA1UM~H?(XK{6==82 zM(VE3ROheCun|+rO3k(FzL?Cvu`XSQO0H^TFLbfNO&Rsq5`=$FUvu6FMEgyP{jNIR zrq?`j?difzlM*7Ia{QlVIr7~c+YI+vge|t`cd%l~`MWeB(IL1Idi7@*uW`bELALb+#*(NiTp!RhJg#To3=IU<@G8U)+lAgqGTY!OV zeT7fYc>bNso^gLD*u+P;K_2=vTiX}Mgs@1hILe>L$G+G?$b8=W5uRHL+CF+aXKFS} zs}kg1o1_-Gqh6W^M#Az3%sossz7qWO#$=YCYz<5nLGXho00M2FQ9wUDFW|QucUm^) z{QVh?1d|F;oed^;ut6#M8~eFDo?>BuTaJf4`QIDYysp{V@^LJw0Ry>`<*JFzsmS|kS* z{P$e;N)CN?|70#85rg-mUt;T=*=Z$HDHWoXEVPrO$M=Rw5Cls<&qV3yNsfJAzmh@= z+$bISw(q;2kCk+!vnwuxLsfX$-zLi`E{P%n>c*?#*IIjS2YnF(_6{Jk_jOTn1^gq# zdkg1VC6*FAlvLo{pQAA(lg}s!i1BWWYf`loZBZI@f&(@?N zc1SemutYQ-p_~7z_IN(kz76?zS|k+hH{^v-PjQUhxU%i zenXB6!s0$tL%re4roZ*iW6%d&l_0MAh=jIRuww6H0&EW8|0rQ+-8RLfh^D;;kpqrq zMV#WU=_UP9FB{M5t4*1j6Si~wS3k5&JfEZ;Aqx2ry^`fIUYBfc`s7HK*gAtuL{2=b zCl@>hMlNCaLeU#u+>ZL1kk+A^=Z@z~boy1SXTJ-u5T!#`Y_hN-W`schH6D^<)m6vi zIX$U*K}L;hW{FcEt7%86FIw{{{CcZ&s2D@y}RB*wBV51O_~ zjxXgweC1j82j)xAt4HlmXnA`$cuepqWs(z<7boSlxD~FBe8u{eeH=sHf)A&4q!y#K z_(uG$A_j$|^KkQgH%GyH7(<~Y_^}S;d-iW^&2*kxGMDsn6N*i}u3M25!(uZRj5j zr|yDXO95SY!+?A?sC;|7%|fEEHgr&6c-v;)C0xQ)A*12zc(LZ`0oV)a@kEN%1O8?N zbu@OA=GY?`YEH$jKvNsriPH^99pm7!Q{g)*dwovo{M@4;;?R5l__ak&mDJfR)Fb!C z3wfYAphr8vKPGWG-gMc;vS~t|n~C7{X>QyKHa?IC(hI>ME9_72oPkK1b%*M>`u?EW zQYsy|ijj+&@qjaoh|vA(n|peVOiUOfLK~P#&5@@dP0i~z!I*)bG1;bZ zYO7o+eunY8+)fx^Fx`xQQT<{hMgfd+h>Cd_8tUSQ6^-^auPk$2{SY;A2j9}7N0mh@ zYs02S1o_D2ru}F<8b)xaV(25`vU*M&7q2g2n#6Tdd%egnYNFjOU00wgiFc%BUex7{ za5Q~K9h3A%=FBl*zw5Y?lR5cj%BalY+U3xGOflt`A#{1)kBrac+~0q_MISI~1M#9} z=GNyfhw&G!j8E3yc6UOB_U_EwL9&#|)}$Xenm!X-ra4(*sUWhB=#Dbc#;k~5y{&bx z?4TRz?9W--kSKNEf%@&)T3}*jzaNR2+^;yc*$UbDy@fobhB?nE^4L*B(}hF z{URyN+1+KC%~0BU?j*pzda*Ej!54wX)Dl&Uf|QQ_mw2@l6?HO<_TE!&ZD%Hn%}4xK z2Kq(aQASVOz4JSNo8{ON8@w2i-NER{!Y;V2J3#Xi76my(6n{Z-!^~^B0S8i!X$Pua zyuCPDgauN|Bc_0+^@%NMmeb=pVm3>~ZlVipec@=dVdV5e!pxL z-#RQ}-f>lHM0jLFBATVx58oN}`SbSAeZ+;*4A{WKe7k7PioASriefUAw)eZm5VhLv z0+#lFJFEPY;BptpOR!!Fe@Fz(GN%>4?{OR5o_0G3wCz_K3UjJli#e0YUsHkj^#AZl zWSDmydellyjf!MGE-9DJcy8Hty#rPMY3#Q^kR)JqHDC5}i7UbN3geG5I4NEdr$Me@ z2VDVz6gMl@0wd~T75^Oq(xY>Vazp;UtW6DWrY|GkGB(NDK^?#05Tb|qp93Ti`WzVa zC#84nT_4so*t8!1ll z0S+j>7JP*NuXPzWa;z>Tczd+NSjg1u^vd6>z0tqe3d;x7a@0bQ6Bl+Lo7?$ zs!vQu(9P#2>7=W<0q4aCh?|0XTj{h1YvV&zy5^?42&2pUUdb>e%z5EaietL=pEn>~ zhNwTxALU>hl(qsazXeXS5na60=*2up-e-*WwTw+4oPrz}&^g1a=e{#uhF)@Pd8dV5 zR$?x_S)ts!?44)=I;LSEl%#+K25q4#F=MxD zLDTR(+5ZCO_rzyfjiWS_{sy9D?sk|ZSRZTw}@5(#dGF2~WLr;+nh|-4R zj-s)__b2`N!fv}p32?}I=cc{-k|U=&-dFtEi7`i5+?CN-t=u>|gGIgVg4GymPiD{Z z|B?&jA2rp@Y~q^k6Ic>&6Hprr+n0@h^gOTLvDjdweR8kr@<;^%IXm*uS0?_Z$)Ut> zItJEzvUiWmldFE_cLIC1duG)V35CSYDXq_jY30=jLG|~?uZyIY)&6Bl4ey6{PNaN( z;?-`H5N&8qh#MYqih+kzpVf(9k&dd|kTd->?$P?XQPRt~)r;s9_HzT!cmfDW9IFQnAThA9n~jO)D6q`ys;^J?RB_03gtcwFJu zQUdF?O@IG2md|CO^_8mX^)$>E8)*0w)MLd6x9djOh$i#e6w{J#*XU_xc7C1!dbhqUp$5p^-Iby4baDAIpX*adn}LHU7rkiwRE^5+I#VgTxoJD1__7&`CK+R$3u z1hDVl4g@X7y3X_ zi?M!G+9X-Z(rw2H+?QTsW97b>Q;m`TW$N# z6TLH`_INaW9wDe;ygXekxpu?NipvV@c>c0Xh z2xgvtwyG`lW1d|rr%?&d6a(EYBeq+`2K%o*@qQ6X#zligjkjoWl;Wshu&Z~Q5| z;(h!sqRqo5)J5V16++dR_ZPO3}9P5|b1&{hpLmF`tA{?ae*=4C7NGtw-iEfIvywBTI0 zP!bw{Y1Ayz^6>JT0Jh@%nMkUw#Ano+YxiGSQ%JibzOe|3rr$Ua1IrF*-wT|EICuoQM&L%Sr34FSj>{cfzL<;P)2XR720mdyP{ z$DZu(Q*Pt4LvFa#$eio?^TXkshhG^31tx>-3AwD+YVJ8F~q?A+>xni2vj*@7J?F^Q^4Xaj^Rc z0d(Oxwkc#tN8PEH9ESbNnN|E%Z<}*`-m(@n)NMiBcN6n^!=v8q5w)-e&@-}mk|;6! zk?5tV$wIZjHv!SBe!iAg$cqyrGy}lKjLPP@?>H(Pt*I>Q$?cwK@pj03pK##5TJ}9=!@y{{^9^We6DW3QW$2y+N!M{^6&w~5n!-v2zh;HY zink;hnpaNRjplxt8*O?Oh2*frhOfPUaCC&)et6cB?Y+29_V&5_&EvEb<4k_^a5z1c zgM(COmff#kLtPK=W|!0SaVF9_T6!LjiU>atP#s9BaYzC$1!rm z{*5U##d#k>gfL}($)Cv}+ayzC;5AIsVpt?Wi5xZ6YV!KYwJKE@bSd%1F_6hV~xZiOYec%{#ie+KZY04hf zZcn?wGqM>X*3tYcV)`Jrjr-1fBYfgoKMc_f!8Kn+fUV9F8+|m-qS)HeR0R7rd=p0C zqhHF3C4nsot5v0a#)k2v`=(uMiW_czf&X)IkP-)tma6!x=eIokiUJXr5aWMbWeaOY zKv8IQNOft^J%V^`oZmlLOv7(CwR@AGXi(r&+b^9p)>4J$>eK7wCd;oMioI4*^|r=+ z>*MCL`u>>@-6WqUq*`aF%lvVc`{PgFuqvQ@;0A$7vUER+8-HI~2VB*Ox+X;4Cq<`$^?>E8le1vL#?v0O#uTu-lHJDhOQ|#q;pXMpyR6EPzgeYga zk+fbj(z0cDQc4s)Q`~ZOkQFM+Dbw6Ea(8gGm~n?xH(`%91#BWb zpNl$X7e_kWBLoiCJK>q(oWQ0wojaR#in#p(9fxwE>UT?5PMIt+JgvjIQZ52c_d`$D zDMkHi$Im6doe78wJFfpZVYWo9_eb}qiw6MIyPD-Q)KM*gQ_sUqTUnP3eKb;CbL3Ys z6#45XPPil?miC(wNJdg`6IFXRW4G|vB4#^a$ywg4I)`L_&e7(&_+?a*3oe-tjYuU4 zN~d3DoU~r2O;C`7Q39gYVB!Zoax_M{af{$?FCGIFxX8M5Q`N6_;`}3GV?j8|EsIQxWc-p&T0e?s*Pw%IQ=x^@Q?0s-{;^2Op z`SIhqvrDhHx5M87=&h&WvZD@K?Q&Ky>)o_(yNRJAo|Acoo}=hZ`Yb$ z=b+F&OW>aC;xn9WTY_Q*|0d0DCqqpXDSmD6RbNDF6brd70?lqrOS`6FN*!MMbnl(# zsmZ|YnpWaZZRP<7aB2a0Q^TL-uoymiUOv2!ot1bes$2%38-uIvYufzJ+7Wz!L(5jO z*7;4QO!s6BxRxc!se!m{XR3(o(;h_k=s!!f}1xepULXb=FlcDZ z8g^hhgm@pSf0R4y5`r>J*Hb&Z1)1+tbWkh?&-WlnXa zs6kQ}(f?WYcaAjDWtzm%SK6>G=Axl+OHs7*4s}QOv)vhL*P+L=ReBjoA{kj}gxHZ? z4GOFHa(O?BWuF)-eFlynqn?h2fTT@XGaW-?`PH|k(?8c|YJZl${JU9n9WMDC>kVwi zD|5WR zL#^iZR-X3f;69U2>jFC#A#8zltqGQ-b2Jb~p;xjni%{}}xH(JbEf?%Gr42EB<;02$ z^)Povud~8e;q32<@7y7UC`iB7h|3HF}sAe98<`xoFLTQN0aJ18<^e19U$U2-r_2^)~P$%>fHmXmyEz%+O(8K%)udlj*m+eyRPIlSX7qMo%$!# zT~mm(Y|D~!JuKANJvS?MyFZGmf8p9Rbt{QHvHOZ_V*BhU+CaUm24mG zJz2l$6rG}H5UZp7gevd7eEx(r*P&=DJTGiQ>t{fXW}BBRF!}Sf1$h@re?56hZMpV- zg??DE%WLK=;Rlwj;^gbJGrj4wq7Jd4$Q~szgq3nHNZ`K5tI-U7`j6>Jcw{;RakplV zBm0J+D)72P<;A%9(LRh)|M`fUs{gw^i#Mj?=%S1_OW<6a$C*S%Oq1mFqm(AM*~4?U z8bN{QK(uYE<7e;5|00(~R+Pq{x~2^Vh8HC9CyT_JVp*LAysYQ?R$VLewmu_tw=83^ z!WFYF)tdTw?F-?Y1s|D{e5L)bbW1M_WEWSgRH!*P4&`%{fOARUdtDx9P9yr`;4;Dm zR?hXo#QxQ&{UKo>>*cvM)dz+zC=V*=B70qJ&E@v#Z1nX=y=B!Ygw!Wm3(f6S7x~QJ zsnzh;$g>WLes&)Jb(s#zcmq~b#XqC^ukJ5tXZxptVdsR*BNo8a8WA~=R6!WQf`uVxv zZF%nS*(Z55(oZqWe0uihE?+O4&^>dvohWs}U8y8C;3r3XfJ^U#32o8u+sqyh1GSS> zX;anEqGVdp+S!{@K2O3PkC|lwYa@z(ceP~+xfwJueNX08zeK5 z%(2wN+?cisx#qK zmzg;yJ)IaZE&A6Y~gF~^PyxjE(;b5U!s+o@DHX3 zzjH+KaDn8e*52gX*9>Ii2mV>>HLJ*rEYdaGOWOI_)R6vzKh6~>T+>9Bbma|+xYA?m zvL`A%Cy63d4z}eXb5A8tRsRqYsypG=GWlaudGQBdZ@|Yca|Is9+{dv1Y8PcrZN|gA zJIJNM7^)CueFa)-vBO2pjErk8OLZs8ZqVRmobaTJkOoWR^kQ}gsK%fak3bSI;Lb~L zHbY=zmq5|aw#=tamw%_&@ksI91coKkKg!em1zcDhd0!L}=y~6Re=-D{5As0t*(5OB zPxhBF2N&}~h^`CKkMh;DokiEGE?k zHu6~yuG>Rj*O2|Bwuc46K`B8_Xd#;1MYK#dxP~Z!OK#c^A?5k(F(s=@} zT;#=GO!W)*1WqsSQtasl{UR`j-1&BfFL+;0kp*LC^jg5#aI<)P3a2{Dvg`ER?+acm z2A*(`?+89`xfmI~`O4~fb)mY$U}##Lx11%+R*oMm$K7=s7GCW+**mZS8MFGU-cIty zu2~py=d60B_alYnQR*=#<Rd@}8%z2XnVDk(lXqm*@rejc#35k<(gx_9K5)r+7YWr6eY3`R zC>JO{!AtI&EV-r5w)s3cQf>r;?9jbB5xsVLWr|f!BlNl5KJ4hr@^0 zQ+ydZn%jd&4s6RVb^D32RHzL<2~2X)mj@fies%jbto=pXwvIC*(t(l&%j7XWM4-R? z#7o|e^SDi3I0X4gI-ADQq?t_=a>ibJmV4q|z5Y{}gnG-UQ%bt-$;Xv7%1m83ZjObq zW64p9jPZxs5VvI1y+FHfXCfJCBUm-lAN!B15YY(mv{&+{K#`1jzD!*CazN>{Xx&+w(lFtl`B5J~sXG_zF{HR|8qnoz&JV8|TaT zi4yKe%BOrU&%)EAiQ zBD=+3Vlv5Uha=&Kt;SXrD>Y`YJTFW<*Sv!1mi?1KmP4`x?QR3CsvARK$N2z0bDOCG zhlL-1=$!ZdY~~yMugu>phSbUA#`0Hc#FDoizDP;_wtrRrERUUe`CZ|w!A@xdc|k!h zSh+v9t@^Q%K>)L#rKhNRJlUgHA}m)|?dDa>U+yikH%ihzjx zq!$4d0RaIi(iH>&rFWzY0YdLZnutgdDI!g!_Z~_Jp@?*lmOui8-a_c11;}0OefIx6 z=Q;O&xZn4u6_PA6=A2`WImY{rF;*tX>^V_%mkssWy@p|n=5xfEmY}Z@L9oG6WHJ{t-9o7H)(@`4cp;G zdP7n8pOXC=g|p3|hM7%Ai`mLAZbow^DE~9% zzPH=Hc+kWl$QYkkJsJ?fO~*ybiC7+sj_#BH)DK`4{!gsab3&0M8>W-v{KVeH%A}aH zw8Dylrp=zIQrwxKC(ug2(U0R2kKJrs`tiUCbi`qxIG66~~bX#iX|L;Q!XcM-h%LHpg3vfW{Ia(Rpmjqc9DHU~quwAp@ElKX%b zU?z;CL7vK&XghU+#pkQQ5U_=5E{%d#+^)yBb(vOph1#-C;yk94NUh_w!(eTW>n+IS{K+3CDBVR?H1LyNA8Hwae}uGDr5BzuqH6~d(UB6iR^tG*zfPmo&I4cw(Q->B?j^5oZE zLI>Lu|D2bz^7W298#Bds>z!slB5<-Qribhj-*=W+@J95Mifw>LXf0^)qJ1yH56 zRap(0IA#rfYa;DVC!V6_`+=My1@3~OXAam_Kiz!HT6&_};h!$Wad08R4B|zEES;&U zm!12qy=l3e@9hgDLhN$BRd4(jI9_9yy}KCm&+xm)yCv2-9rEfrTw?ltxyG!n8b~9g zHiu(zI(_0(gbVfY3!Z8}(n}hq^Cm#UsM2>c`>TG@deBo?jKOIkLyKYHv+^nTL|4DP zs{v^Z0JO5f{Whi^Ca-p}dp)Pz&@BT#GwbbW*nFN;5%ldJhsIR1i-^$K7f0}x3Aq(l zD{6};zaAf)>vCJPPu!lqCiqtM-#!rJ@=S3-f;umOBf(ouGuCnVfstn-n1xWzv71mx ztZuvRw;ux*uiFdeSUgRo1U2c4_(*?MySj9`D89>j+1ew_(O3Mg!$rSV{Vq6gX@lKs z=7cTs^}PhRX5h_{A29+V2TvZ5PAIiSqM8336{LkeW%~$BUxU)%_ocC#mk~vY!8gBc zo3t-;NSi(H;Q_pgKUtNUl9%2a z$IHh<{ojlV@ANv?<)!u$IV}YIxQ~+rr9e1|)r6@*x?`){b=Lzi&}^EJV^$GFE^Z>WFT+No)* z&{NDr)}2#6F2K5K7oN8(x!z#<4poW<8HdZ=kt7THZL-vNUUTnt&$e{GaX00H5rq|D z!{vN!A7&z*7T(QK;@x}T>)4L^O~JDW*dfHrz3pIPa!uEGqrVe1dnEK(;cpc8Pt)h` zF(tB`I|6wx=|5hsKeiD2+$LBYROXMkmX$5iy$CNmVQ>*&IeS&5;b*N|<_>)0Yu)%~ z%C~XeRxE(UUP>R^zXeiyWG;XImWs_C=I%_Hb(+Kw|$jy#$P zk{O~)d1;JSBHnA1DKBxrcisRS?YL9g zm)*qeFi}2)xqlr05=3A3r}F4jn8hm09qa@E^beCF?#K1;^tMHk1kh_bzUsm+vnBh7Yf zaa!HvE6RHvF6rwnFw2z5xO4_5!ZMekRI?7uhWeI925sw2*m_TJN{y1&HAAlo4uhheJEhS+;P+saq1b@Ykgq!UA z{|8rawV?_?IBu}P0F46lw!FI@_J9(ILoQwR9nGEn^yJ51?x<=jsWeg#v1*DVq}Fc= z=A}{^>AJ}ZamUgsJ?aH8fB#9zXqp!`!Hd_})>jRER-JxYpUM9*9X$^8Ke1rWT3VyR zjpb32agkWVf=?fRxqrgR%t`dC5CS$9NB=$lzGa6Ui;d$>@1Rqq(`6tgB2-FJRSU6? zNIkz;=DR_wRZ=bSH1b^En(afpMJx-Tql<_|)a-3Q;6jC@uM7a_ITQ)1BM14~9Q@MN0SVsT{RJqvYDy)d{R?L+R*v}60GUzUNus9r~Cd#w_ zPv9o8Be-;JV;|Me47y7eZx=)09a!&ax-Q@cQ2I}OK=tUd-_5Ak>v1BNr`Qc?!xu<_DRLRBU=xbQiQFk~A1$Qe-7giYqSp$*5L7#Qwyx&DX`2Pyk=&*Nlr*2Q*;S&p5 zRrn@xxDQ02U8)TDGDE7f|%Cm1*L~w6o>Bcp0`DD_P z!c8I36~P`^o{Ey66rPe+{6YD$Tu&CeU-oWU$tLb$+wHS*s)!ipFSe!<3lA z=#*~V;(R~;T3>}u<+;M|PwM(A-w&AQl-t6?CoF;i?+Pp_DPl}z;2k6*vIJDY^f5%A_v@X6Ez znU8uWXLzmKWuNraVn01B9uM|?23Kn(MjPjA+!s;)!A`1Mi8d$q0_)O_+w zHp312+8yEZ1@n_tv_Q#u$~#;J1wJK1ds=ZUZ~b6wBWZybT`ni>-V}r%I#N$o`Xwky z%7%g@CV*%9HGHlR8cKg)q$>MPHGQ7n#+ZAaaY_+(E}F2z3k{%}jtWf}C$=_JhFLar zE7+i`le2L(nnV!V5(R)coTz^Tziji^pRx=W-{poIR`&}y?#(U`z!AtxP%W&4E zB|7UltEYjs3!;pO?bW^C)MOhxbaxO@o0y?(-yzT-Uztn;=mA3|vj9w{Vf-5|TyOQ( zs(Au_3Vw1uTmA@Er8cBu6nI8`#$9&(y_GK?6=#{4@uscxD-tWVf{`F>Mc1i-vZqqB zPky(?%P5FZY1Kt%^kCX!>u`k|q+@km0fepci6DUk;i5Va$xr?3LJvAlO%c}T%Ul67 zCsa2qNEe*`zj%l4>Rfa-d7FTR)1Z8d?nKR+l&43oSIOMlapQ) zZ+#TfW5r3rq_=W3Bw&IxUw!Ko!);QQQEBw?0biISGG5=RTo8f!ZRZ2RdzO|NuKV8b z0apj7G|SGfQemlG!%?H(fI0M9&@m~qK`ExfGO8|)Auw!xXkfzFbVqX(>R5~^K3OC} z#((?Eqo{%sTyt6XMdxr%;2QPp4@aId7L~~88}tfB4nI+Yo zY6&iDZ&vLujnWA|SB_9bXUPuK{kbG{&NXUR;mABCI7dB`^3ad-+t73U7{_V|xC|`i zuIARX47*@25b)C(6#TMYQ!%#>o&st$B&}wS2!lvQSrU8x>!%Cp-{ z8IZ@^aK|{?A!PrSg3)SM$Ixfs>QFb=>mv?KnRx7XX=OLJb3l_0h{{I8h-xy|rfPMBX1`;j!!KbMOPOrC5Olp+Uf^L4$9C5LZ7_i<_Mp-;%^kzGuAhWBq+AYS`(ma%aE-hFgAr+o}6-;dG?CdKOiVo|3X!(1O4&)CZq?=l2A? zW94&lb~vuf2Q~C0`(u|@LWAxN!=Qbnor;{d!sMKW3*)v0D7L1JeKye+0p8B}<^`oV z^yXxYj`^J$d52iuI-*MqaYPc&g0&R$Grnijn^@QyV4m3m21~gPXP=%Zv#ihl%(q~Y zCb7sSfNgN#&XtG8H345+g(&Ew!Trz5te|;6lgX{XUUVMa%f6Y+eyykG51r zzG-K9n^5ISLKiLeoWv@tc!96>gp#vt|4nfyMk^TIOB7{`7mx4+~=|7j_++MvXK zxI51y0~|S14?rF!?@2nENxlX648!a)y{kxsZUvrzKK`!e5 zn&KE@nBkp%-|g?liS!gTs%>~3RBp6yDPpr5M*LGPzkOFHmr#a zuOsp3RG)(Lcd_rK_yd%kCVkf7`}W-v{+M-Ran(|bA$Y@>_dWu~*AZa=)>g(&RXj&$ zu{{V0+&Pk0Eex>AbBp*;8vWke_xw;XxMldp_nr$`9z3^T6rd%<=I)hV8to80THc#> zaqkD`-2Tj;w*-0({7YG)m5winFsOVYg_qCA0yiFflO4|qMJ_?|;7rj-@)aApGo+5fh2-@D_{qKirrR*D!rlQ6fWc$v;8j zUp-nin-gwOPH2*=6}pYbMq%qcGWg+|X0$4C#K9@-QX*em2BCUg;Hd3*uR z-QM=a+ooT*kKrYncQqoGWug!E)e+D(<$SaK6C0kdIl-8+zwQPg1_dJ|Ux!aB;1$%G zoi5D)7Pnh6GCRwpw=@mM2a~Qh$e){dwTa9`b`8bd&A?u`%#diR8zQYbO!?C5VJI;E zrd%Ziv3}=NcUc7@Gz0lCf8CoY{5o*HHn^52^Ms0;TVyr;$xq;*99L}{*csVQNW2>D z>v)TBT`t1yu!J5xN}#Fuuz(7kKIRB=DGu8A6*GHy5~lF2Z4niB+GdqC5p+)QkGxzK zASZzsqm|~@#eLgaMaxy9SCNM%4!$`goVZt=YlyBisjdJCoD?6z2dL!gu*`&C9C4UG%580aM^0$g6 zt%P8*Y_fyXu6LHmwQ`OyGU?6n8?E@5u$rWd*~d%uOZFx=foAjsHK$PYD!rmGGSph; zCT8MJ`QqVP@xD<*iV)759n|lt@^70Iiu0;G|G>;1HRB?i@d1IB+46uYby{IWF|-;m?l=-D*cmYHe)x^vEC23e z#d$MKcxysq=)^YkE~>P=4vYq>WDI)j8ISvs;FR=OiJqBZ;Z%aavOiTxB)vKb$$2&|NAN}|0`c% zA1bpN6)<6ac2_0hce|N|Fs0}#OId>iM(g4|Db{a$6z$jG7 zq9p$#9w!|zK{wmqithtrrAuBR;Vi5EAK5wSa)8_9#D>&_0oeljQ!}3f-f%o6HTZuz zgK??=KM>yTv;v4F1|}s@FtWM&zdw`%ZVR%{{Br3k9i$3M#R3YzMt^(ufBJ_hd<1@g zEMj5+m?ei0fuc`m9ssCTc$$!uI|DN&xb?*M_-2K<9SK$1wSO5Pj zoWrivhouTWv_<03{fQR1Jk@T4QCP$Uw#K15byOcI=~|JLf^F#eZ7q4>EnddK{dE?^&d=^r|4G?uaZ$dp$Pj#?T%D}SGeS918B20k+2r9ttDvJ zWI=J=&^7g1=|Si+Uqh?ypnY*iW=8J2k5_>%$d~Zj6$RFz+dv3Z@F`#PN@lzt@5&jL)DC z6*uV>PcK0CUZEC=!TR(NqTtX zpo2uHm^23S<>1;kQzC@UVa;wJ{f$Z`y}L3K^FC5|5&tdblkQ~3y>7$Q9*S6rriEp7o=FffI*oq=SlW7JR`@UXoImAm{I%iqW5OzM4-E z7tdYL73mLQLK4oi54AN-A0%j+2!-afm<0y@-kup@z40-(g=%vY4!eOQ;+pH}gK3wb zJ8Uw20l0mSE9+MRg}z-s)Q|CgQZl~UX#h(xt;|(7cO8urXMyxJYo=_4=A#G;6Q*nF zOYNXT%Si899{vt8AWr6QbW=0g2jD$PHV2T4jBQ%Q9t^;n>2sT#-kW~=Eb+!!(8J`x zUdbznHOSx#bt+_?tsI?+3-3FDl+V2~4Rt{{H4&G2_x@u0m%t9RY0t>LJ=(wL1nqA7 z0ZM60v(Py?|4HMmLV|<`-aTUJ9piYp$)pCirzVK#BR(ncfPgOhwP*>hOzgjqP_=(l z7Ba6lZFVc*JC&CsM#oj&t|-XBWfHE8Y$Sfb4bP;XXL$Z3fU7q#*}{kEzh4E9NZag^ zr;GY#9lDxui75$NjG1ZW_px*|>n+7d#sDGv%SVi(<7L1L&4@rFjL%-bu5YrtjF%VV z8Rz&9k9-^`yJsSwpLA1g?(F+F_0N z4Xti9X<9cL=##a^9?>H-JHNahRRmy#c#nHbXaxZGqS-mn1NmLda-A>L4AUeZy1crP zQG^%R(>g#w_=CK=%Jus#W)R918l^93S241V6aCFirxSHQCZ@*z20#c3XxhPWh)QEV zuJ$iBfw%x4NJZyZyv=r}d*?x#gAl?nv_3KNS(_TF)pW?Tcp@c+w-JP#506z-3F!nj zfUDi=@;+K~X(6K6q}{q8p-0Y?=Aqj`Efy8Q&eE!TELV0{W2F0FTSvMH7U!WRWgmC}AJ6NjD1 zY^lxsc!+x9(LQd#?308Q+X{}rvE6KlIK~G2P=_r&biCv9`xtzJ?DjaptgIvfu=4lt zwl;Q`Fu!gj+wRtUtP)~3 zRT7d14BMW7n9lU?OY|x%UjNN`j}%l*GRqewg;f2ED7y}!#+eD!3;WKv%iJk>J@vD# z6z99T*4nn*QB)D0Za?&#@wnE`)uHZ~BgC^Q^Avm3$yIeT>;=PQu$$@$+L*^n&L^<< z_CMZlvCRUChwy;TJ5uw9M@vyhhLY0o%x{9Ewcyz?C{l9d9Qlkw-H%wwMl44~toKE) zHmbgcs6!k*Vyj%4<0F%90-|~)^E6G8126n`Ua81h3r;*NDcAjh5qW*J`g@X8W&hi% zl(B)3{DFd({)?UUr*IR#L1TUm@7qX5ETs8CxU+PMMV6^Xj?igk|7`QL`SY`R9_UL$ zwsb_EI_EGLa;-o&^+!7a7MbSn&$PXAV7Ys=Q=`GWwjr4%4OhEjh_u3j zU&gji+IVAf)&N}{F`gGY2d?YFHXf6g9Oc#bXPpw#o4QP&y##wI*lqDF4b~WW><5F7 zpY!nn-YHzfW93TW6b<(8JRE|U+k(HygS?&5IzqBefl}tG+zkks*s9iJ^J{~n4i*!Y zaZWN0Q{;Dpd-KSx7ao&QGaPSz+wwp*M4#mHGC_zzhesE`@E)t7Og%ggi1sI z;}6y*)#FE+qTo`km7ix@gJ-eAmS+;rvdptnN*3V0N3Tu*?t}SbOeftAWgTcn>D0$X z&^{32I>z#Shh|z^3#I6@F*fvSAXIL$X5}0S3EgRnmL5J0@cmBgal_J_Pd@Rl-;^%vheLuy0Cjl$~wxNA85+migGi{LkFjzL=Tgjw2|` zLRQ7!f1|DBSuDYu+RinB$t;YNIPY9xd2kZNk$~932zstQuZ9bvlw2^Lv&X+%_;4Oh z!Z7C{=fHVeYyQo>DY!c1+0 zzvS?SqjRSKdSS@G|0`M+P!0bd5ITEM@YcN?Ol@H--$zR{hn|w@su3vv&~(&$1~Qo5vgLm&fcJrV zvjfbWASs_UF=UH!%AtFxqCrKDFzgV@$34{I3pBL(^}ar}BPT~x?+{(A3p)f0dR9Dt zvu|&>2j!^zx{K6Kp~=AYwJHACCChGpo4hHWryOzUTNUzYY`jA{b7KqjE2P~4k_L7J z-y|g8|2wi$2sx)1zIN2|IwwKPOsH_WcP`Z`8(+>-hRyA)F1t(*DgWIIW&>=4wA#kz zF*dz(hQzvp^q2T;Lyn}_JMc^qvzxQ| zpsm}1f1%qX%IhA*G`A`qaCppS)R+}=bSMH;*?xUZ$nMD1mNbcgUq`0}s0+a|)ZtNd z+MWKXqroIWe6Ru=KRy1qK26imaX85{ep0JkHnZYy6xOoIM{{zO{Y!9N+DRZlRoqwHE6{AYIy5jr=qdKy@3Bg@Qo%7)m!4^% zrkc_D?+Hy=yR6c}GK?N?pd~(az_?lDw$iR44j@X2ZiaWoUJGc4eWM(G*ROs)!q?2% z1Q9*lYp_Ku&~$0DisXHGuRlZ#o>g;PZ^a>bKyF z{I?srqz5LlX+zIuhdh3SNZ9szNK^_~5qz0GLE~YuCsb^&cJeG+nQnf-lLCr-t*uJt zD{xJJ?e%`Q#+@ z&Nxlp-NVmg+o#TM;-ycT5=ydKMS9<^-;~lUrFm=52#u-DIAnppepFWQSs@xCIPE*3 zjIaHubhur^S_3aEtW0F6^Kiw8NH&O#>|iUm?{{xc2vnZ0YJ9tG@b!ZG982qAl<4JOF+XT$$+6OD@isgXd)|OJTR5_5|=6 z<`<+)yM9kt{!^q0H_@c_suRvD->@2^Ha6W!nlQEP-pAZ$eJ#d*(q0JpDiF_BR9sW? zWn$w@Jqh%I@|F%)4spuM--+40{VMmPeTKC>Pkp%O%+~13)gJNH0X5Bs&cxYtNTJe- z5c>XN#kb6&;?m7PY~cYK`OIVgIsqAFdYt3k`05?IR}TJQ@_e*mNtrClu15S9aqE` z=~=EE;y{1Yj)uyoImLhG$%riR=BvN3kbNmt4sS)se5&UQ*iPF_`D)&j?MGQ**MQ`BIv-CIJvT_|t27(6 zmVgwZOj6`HoD}QwXEM5e<0fmbx2^oXt=EZigz>MCrpSl{& zPFKG@`w<~wtPu%xN^;nXjN~m?k-{BuBtMB$$qphFtS|OmNU^bX8-`?)3%(`J zDtxQ)zz@_oMP4NXj0W9A_NsC03?5y zf1^>YQ3XsNOq9R79vC0!3Y80L`Y&N?+_)6=@KwF&lBE5KoQFRD$tr*z<)bk)S;oLa zz&FvRuCi7iC!f6JV;q>8>n4t;kzkrYA0C-PyU@MDWYly|FL9$`$4=(DQ2yr=kXB;b zqmy&u9Y)IX11K%3DOUFM+#GKL0U?)ak2-qId$$VhnT13BDyC0|PsDv|u7w3ql9{&; z#+oxtc#8QQVbo_phlVBKGC}h~wrNAxQ3qw^L$EN(mVXG4yo`AzLu2@_Y`cGa9Fl;9 ziLYH+1)YGUuwoWR5Gkm=%zm#$=qEd6o1d2Bnj604 z`e22W0~=EO1tfY+YmvSC+xsxHb&Ud9ot>p7Xy1j*dS(mFBpm-?5(v3&$b|kXGfJCg zl1PnvfQ(NGo2kDx%QV`qaiuU^3E9YI@dG(f!KCelF-ud8`O>Gu;I{$fcn%R$F$@)_ zQ;Av}Z}MUCXGc;(G5NO<{K3D{&h|=Wn!AmH55Zqa&agPfy(r6j*nIcm{zjiON-Roc za!3?Vx>Svv78$_T==iX6{rnD2;8gH?Y0&Yk^RsDcD`+|1w{RbmT+W->iPC1jPWgl? z?9j!eXFBKeSA?LZM45v1FPbuf`b_tHKkq7Pfehc^XelL#=vCi=^3dy|gAc_>v zSGqZV@_`>)r~H&sg=@~Fi~`$#zxI*k%VHDkhS9zw0JPc{QhM`$! zSZVdC+x@g1%|^CV4XnU!`B$dWS0L?%>5pTI8;c#|ku?QuFK2(wcr(hM+8YYr~H%;(3Nlz zx%(yPX!Cil?dFf0r%A8!6Y3wLuDLR~zjG8G+zg!T_R_-YejAfMiTa*+TP{C0JOTaF zW-W~VAN{wMvpx?@%eIMILmuPbT)%7*3;0)gowQCPZ+phs0-JK~EzcsL&37e9YAW%) zZnXjmPIQx@Moj*D)8=(adG(vGyU+&a8j%xUHkF^abV2ApB%Jm>@pu2T!E9A^CCuaA z&YBdo=h)mU=!$Lz%_MPOdKmWxWs_?*O(q#gWmJRsZO8H&>)uDkFNA2~|pZQ}G6LOxTCcf7?a+U0bXE;@7W#dkM zmMT^od>|Xx!V#QU-prbDN|x5@j9q|3oqn1fAg3mx8Crct`2+fey*C80So5FzG*JI_ z!L;w?4})Xo0`eLKW<7*Nw>~v2oky|U$`(jUBO`Tf7Idku5Jh`ncCZ?Trqa4P{WaT1 z-cUXB_5^mc^49s#P->*wAvoeFxGqTd)?Ype8cOW%J2X@RR{)u*elXb8T>62+37Ak% z{*$w(SpwWH>6(Io|em|{xjvKNMo+7=luON z>(bD`y&CDO^*G`I)S>TOSh(Uaai{(b;K4Bo-NorR`q#>mr$1@`Y4a2-*ML0F#zz}_ zG-%00MkpwgWlLMCUc&H}L`xJjHC?hEw?_jx3 zi2bHikVYswfrtlCjD(AWpKM(d zmvYmx-n0Xf5~V$#Ykl~4Zmfb|Qgn^F(3d$?+QOP8;1O}Ai4+ff^-JMM+>pPf1Xx^q zc`+MiMDduVh_uI-FJE)->FTOv4>6FVG7VPooz>5dDlD8Vd>xWWB{jj!&SWOo_q`my zAF+!R2gl+SrTd5gDkp?9ZSdNK-iQ>MLLGvK8{7?Bc{$d^ZWzh~x^y1&^t&KnvYk^T zn+AcAWl^Y?n|E(kVDhsh2GFI9@joWo*Vo<5rzZ+J_yfxs1c2<%P)&0#?I~hiN{!pZ zN)MaqRuiq=U?uhzMLy3V`%{Q9ssj(ph`Nq4@aP&S_(h~Cq zRSyxlfF0|jSDOn6FTK%Q=I_}9jS7ckQbu;zzJCazL(tGK_@!RDKzgVp@yp~$i$~r* za{gPC)o(gr%nkZY-lFWtU@heN9S~d)U77`@-3UieYiIhoSWEXzCDH61s$$KLrX2T? zP@I&8Wbdu=wnfg@PNYp+Wloz-Q+_iIzrKr>rV|9+?SY!N!2F>3)$L^I-s(jn5cw~i z4LgI=cbnf6K6OBKBIa7o3=Is_hs7+t)BN2cdCB(Z-8xXM)k1DW-yFTQ=T9z9i=Pw! zG+QDO9=#%0FS#;;O}L+7t#Q44R+JDH1t0wGTNQ62=kK~7ay2`_L26UkC{6jnmp>~2 zc;hNG`unR9uxRg|iV3om$Yc^)mjQ3(FRN~JzHreI$nDr@Mxt| zrHNlF{Izp2#vWXjB-JkwK-BPR^2&{WVgWO2wvWAxdp^Bg*QgU=T^Cek$HqkxtAKQe5wYZ8xr zUaR__QSjt>%4CITfHW)EUF9M3Pz7s4aA1mn3+C{lar1#Rr9qCapl(ZT%VXz~+_yp2 zm3wd_)4IX|WB#wJf6k2^0IGS)vY-jgzCUu|z&yxzcNSanME#8jlCMllYZ-bPLF-Q$xG-q$j zR{_EhGn|?Qv9m71!K%(kMIV+)MLDqo=;l8G4-D>y1XAlmod}`c03%zcb+>6kuIA5k zV71~SskHBM0g#)T;@82B%kXe&Nu~y)m9_k{)^|kl?W<#_<<5zhbvoIl8AjL4P4fn& zMqJPnDao0VtBMJ0HwCU5;yo?~19zTTyzWBgv+qA?`LJp-{^?Kp6%lN0nu21fsT_eY1!wBvr5$uMv*ne=Ha-jNGsmDF z?BRWPA|3ZuHpO{giM&a>dSzSe(@hJVnQLE(7et_HpvDP?SEyNB>CpZ#H&NO!487%j z4~$1uYig|jt%!oCG>jAPj8Z1 zOj$m+dv%iqDByD?d7a&9Q@4CN1aOG+=!Rka*fNWyej7&a?6*D?pg6acg2yFtrS`#n zUHObWNQY^LEsKdhhNl*c1D43E`Wszn2l`KsETP8i0F^R*xSe2kXaC*m11TSIl^@WM zjh%VTz#^;XwQ#JrNfS%He42`*2zl!v|+BA{8PTDr6~+-_=6E^$?L&&BTzL_K`U zgTfde4J8L!m}T0n}p@(~;cF>bF^G+lt;o|rwG&LzpK*!LD6af1ylqVu|oj$31 z5s$K2@!iB|rnH`u(2Lgy=eKWL|DbMmG*>S@6^;Y8jN>Eyz1y*X7Tu``&sKHCC=NX> z%VCMDcM$*y@0h=KsO0%H7)|Bk9rkh8_nPYT=#U}InFh5YlelV;p|ZnR#wLPU*;K=) zy0q-hGiQDEA{V?BPb-h7f82hukPlsU?(xnm>b?jm3g4GJ-7A>Rq)|EEe2%a#7wW+K zB7()1XGMd;=IjzNE8K580;Ut=o1oR^YC|mthna!I^;-wz?A5Oy<0W_67e1mt{TM*d^cG?t>4eTtFrw6}Jo z>R0^P?L5n`J8 zjNV;7mC;evfPBfk+}mwqK$Y$@a^F4sw+Gu@?dM2)pZi_s7>mZ%gLbEreF2T-u(Y(E z7c7xik+y((X?{q0uLC3mWQ8l;yN(8+R+!7#Lgup~2h+Rx=6_Ua);M!riudEU{?=yc z)s4S?)p#tFXO;O5()W#OW!eE1*8Nwss58o^N&kBP*VUC=oz$|ahtab;5)t1g$h&;v z+s;gnlH#RP5~Knhb#u;*CEc($EnK(qv=5RjH+n9%I{Gv%SC_dl_@kIo{{Dj#Y`sT@t&cK}e`@RImkaN`}{y9^VD zJ$NQGfr^fq$j2=7da79oUfpsyTeV7vXq~U=-OW_o%;Ebjmppxqa=#|^Z>916DWc$L zhHH!^5EnBW&#fz!j%YUhK?Qk#1%iQOVixuB))I$kPtYIPpQS|{)T9C<5{LdHSYp^(784S)SVy8Nnq?L9>0Ug=}Vi+pzYWqQZ< zWKce{|DM2eo}K9~AVgMRn=*>)fqTM44=*5jxCoaALC7YPppmiimuAl1IU^R*t_ za*_=M8MGdIzdxs-RoznEF~4oOQ$_135jGs8FCkLXn%kjmMJnMp#U3H^#E-}pdajrL z#KC3${Ta`UbX^h-e+^5?rniHoIQ(+eXG3T4bbl%E7fAFR;mD&gqybi_b+oJ(rb~LbXjtPs z@gy9bFW zFxb!yVnLL)a4a4u3BTvSHiB8^`DeEcYH=3(XZa0M-?`Ept`8x!W9_~O-!e8KZK{}S z7qHVeDJ@A9?Tzxs*}h zgAv{qi4hmf3nQ?MvXUAtU>*bdk}{tF@L2PgxmzBn>cE4<3~ko38S?WAs6l$ouuWF0 zMDJX^<5Ch!kXWgV>p76G(lq2AcR8YMxcDnMxC3oNfBM%8y^ZT^DdX96`gZ0&eQaH) zP~|n9{>%K*9I~KB*4ENbU!}D?i-m9NY0Ee?Wj1)_ejZJG{gm>F%G~N0DZ0v4=^L5; zA)t|VmXWq^O#z>wA?5A*aKlJ9|0R3ipJVa;8?(4T+S5XMFJJwN4m)V6?cfLXyB@PF zHR;>ito$Leq-pG83T35r6BFjS-hz)0n+u{4TexpcZ{)M}d_42up`8F_5Zn5>2HOF< zw_~YboGoyE2GCa*F#WwL(}KYCWhPQQ-Hu6_OliW#*3=2Wp*1KPQH)qt9-WQR;+ULI z`%kxtN3w7uXDuJIvCH&5HVvhRp@2&52`hV4U7oXO%EyX@rda$;;reYQlfp6Wvgi0P zP~{=2yu4$%#80TpRU=f+v;T?zd~gB1`fHI?*$2@eNnOy0H|ZFvIIHczv^ZF-#y~7f zeI|YUB^-D-6y{-$RJKdSmM`C?70lE86bA7_@0eVef$D+BeMp*dF?iW7W7#td_dK|+ z^zy$QbHPce6{4NyF3`6-P@4xrIP*Oz*fzE#9b8f0+DP5FPDVn5ayUa>3uV^B@3xu`@Bo zQiB;{W`3Xf-uM0e{vZ6G{(g`0#5|bSyk6IJo$H+UIp=-eS5lO@I{NsimguJUTyI9A zjz(xz=2jQuOP)*^Q+wrm1m)}AOdq_-XX@~(g*P$2Xk>E+au&R?bw#YwdqNtT)u#V8 zVo&3=`@L(M6R0+4z1-W;spJIY0zhu4O1NgSqpA?_So8^dMlGd1U(-U}hr}^%w1h+s zXRKMr-57L&Q>XmXF_o@B?mr3%il79$PF*z5Ab?mZLGH6t8>ilTvPc?DI1I+7xb{R! zS5CY{pM7#4pH+KnPx8RRvTgKUd5P%*j*Qqw|JFOq%!hREmNIc-7W5Rgq8~;*`mSEj z;q&clj+Z`rw@)z8clf&g8_K%#{eu*c8_bO#Q-G{xiXV|rF zVfvY7R;+`i4r40%EU040?`c+&FCg8Zh6;^i7_>WuTs}g8R5@48Aw#_fNLYpoX5cSHy@T$#ui2>rN>Gq*UT$N8tdXN7{J{D~&#|>qv`E%)_ za=EP*RP{Df@PKDm_G*9lVvjkYaX8m2;a_8CD#DtEm%7VRLic2gksz$Md%g)~vkVe* zW5*I5o(Fu?ulw3~e*TtFw(q9{X4TYwd~A2+19Qr^YYU6?l_>}p=oYwl*;@a(;fri0 zBGQr8pPA#4upt%mP;humNV`N5+yMvT3S{p3KBd<~KW;F1TYMSpedjCMCvDAAgs5-) zKOY%9w{vv(_PH6E^S~9im-VFCbGT88>DVy~9di0aE(UKwG8}ZEJbi^hU>FLTPw06* zS<=OQCfMhd>>8~ukG4SmL^I4weJI#R+EvZ#RR_1ZUs6@z%kWCKyO1Nx9rBT!L2z!h z#KB!64#jVRU0TgqM6)617MPT#=5@^n*^@L(F>es%@bVNnM^kGkv>(cfAx$Nt?%Tov zGU1T})lnJF2&1{AwFMCG6e?p$6e?1}E`wC4(mLI1;y25cZJl?6$CufEbAZdu9M1x$ zBg*q^&;qbNbf>G7$y%?5TG6$rnbqPj>(Qa zLoseT{Zs#Tp;}k3NNw6yJ_(=~if8~~S8@BJb~kI{kV=KXe4CaxahFdo%rK=m*GS1* zq)JB;DIPCcaJ0Iejvc)09p{6r@Wni^_uHQNq+c8h@MR~-ZwWtAIctnE*1>% zf28vy?c%c5J6QfhpQEWGgsy0~?AGYl2=Or3$p>>*q3+T;8eiPI!Fsq<^l^il80z7pv9lF<`hM(+breK8+y7fHQ~-Z;*!bMOf#=77 zxS8{si*oKb;4UB==4d?&`q;ZyHYSB(tgC8~I*~)%(MmFm0zE^XfDd=f@*DTL} za?(hg%{rh*uHRGow9@8SF>kKPbvV8;FIatzccs%5PHaBr6eZw4^dP#KD?bf1y=SMm z-?LI~C}Iz?&Zzh?vluN8P=a-Tq`%3nlMI=AGLeI-7*79~PG3`ASUJeo0sZCKJR?^K zP>T(xWr5n&IG>my|6%yxX4HWRRKln$>;;n9pzBXH)>?F6ZNX4}Y&9i%>86UNPkVxj z4Z@SG#)+bCRw3OScewvxn*1;jr68$&fWnZq?&l9vXr)g;gmC5Jub2L5zr+5GB) z$PjN%GXgZ8m&!L#emBv|PRNu{S?2UmBeKqz+m(ux9)u13(Q@rsL+}KteICq(IKQ8o|cXnE51HkjtSG^lPy)0zn%0f@y+j|?|ocnGp zdx#s~G0|KSIN(qi4V79x8xBCuca-b<_}kp;jKy_4Vov5L+3gj1rpJi79D=1Z@(UNI zGO?jx)x~2^EC0WYNXV6cSFSIX)`*uB+yI$Z2Cd1+6p_;?=gA}4(#2X^NlD_T7f|;Q zL%f^JVhY2D1vFLS^lxMfX++WGmdW{qc}E?F1vc<96s{Qnmd|0-iZVFHRyW ze@s1e0R3GP}{g|-q}aBefr`K5+)8c`xHbRx71RA>WSlnC?W@n+So65etf;D^pk)+s?M+ahf*1t zo+mO->pI9GjrYa1Qar9|*iN$${hKYY&wR6bE7KAFgFMlPCfQIDD%WZJ@si9DL62hI zD3{T{D68t3vcO%tDWsOx1JvLksZSCQMlAgciEC+eC@UO;VGBQPqJS29uqOE{n*g4pW)viNR*CQHV*(EdL9~%&t@*n3FN-M-xb?n=s~&BbhaFv zFV_dOM_P0EEuCEv8ozo14v`=8huhJX(HrrTz{JC2;c^r)pvl5>dq7*B=-G75`DJG6 z9x3h{dt8tDDGVEKh(a0R=fc?vbhTmq_dWI5+HhNren@>KHk4fqV+RMBqY3)0&T3LW zEf@BM%5MtXfh-A}W=R6zPbUA;4O3%I0COc=BeZFo656cX3)l`S`Bm-l7Pjz#^!WHE zDr*_CjQYw-9~Y^S^|rMYd4T&l5oA=DbyEHl)w2M%g9)hq6WJk|Ru*`d$HMqE`6@6l zf~U<&eHcG5bB{r{koIQ3>uQE5-MeF}t8MGEP27FyKKSwEuR%(aqllkrjA?P*xkIC< z=jwvTq%}bG05S@fWfiVrZ5yG$G_ZKph>(1&k4g zJb0VHtcmfO1s;&jx*0NiwxYwFLq6VIpSh0*mR4pxIj_n|^C0KwFj~v6#8T^+-K7wm zOi`efJ<*F|ernV|_Ae;8|Qat-g*-FXr1Eq~;Ea-Zvp${%kW-h8h$W zT)+mc=i7KZyL{l7^pDuOuuk7i1T~!QtLmDtyy`!U*- zYbc89l&G_Vk_gnN0+KfeBl=7e$b|@x{Rn`hs~R~z?Y`Rm1%Qhg`R7BPN2>5sWr)Z- zeu+CM*iHqEp*;v~+>a}_p?a>H?r0wx$ex$_qS=vaIF`;)E2)YXbe`oLpl}DApCY3c z?pY#UVk3`IwXmi7)$EQv2DQ2y4AHe>6_njqH8qx7E8ORVE&uFU^M3v@PjSRrlT_P; z9X?i`GID-TS3;^NxS%}cym;2xirun6g(p4caer1MH=s2Kc6oL-hFzA%2zdoV-wT6R zN!d7W)DGiRyR$R2zOmbzkI{xI!Qio^w{G`#wt<_-qjgVc&fZkDI{(Ws<&g48t`nvW zJkXuiY7TFYn%9}5(ZdicOXP7agDeBS3&A(S)bnOL=`=WTg&-8zzP(N#`pP^lJVh>I zPXmXif9Yg9=l}jgXn7(~al&>q;X#uXsrX;RnFXD6#E>5tQ{@A8B3+Hu)fq7D{dy2T5zX~tn4jT2D zj|wano;^(Xbe5NYd33l!s>=k&)S6pYdfmWMr5vLWJ&6O^OL0x$CyvJK-O4QgbsobF z&HH35(SV)}O&@f$OVGH-b(U6y!TLdH*I=)zCadK` zZel&0mqbXlm{@Syzc^5>vQ1Up-)7G4`SYYn?ps4Uk*iIShdJ_jCH)=#W`HM3*Y>6^ zPSkWE^LUB4&P42FK%uZfn)uGC-L~g&gX%=ggzl zVkkRy+4o6Am|t};4xc4vw(<)OJFAwheii6FWjMf_=)Si4)7a2J?Dz&e%!fA~L@+^1u7)Vhs_&MUB)u{@rN5#hx26hR-xrI}XQacbGqkGcc z!wE~}sS&()AitDP@9O}1Rv?i5^OpjEgCg$JiwF_LT*|_UR*XeT!=jlmiMlac(C&CoMlK4z_P=O{Fs55T8$QHNkOQU&WiglA9j0jgB zA$8JO%9)8%qqejvB%Aq9RJU8wqDw%=`Y#iufo<^-c*^(|K1X#2e!HPB z1xr~C_=7Jh%t_4~SCZ?$G3Zqjz1c+N8Ed=c>8Rw4`9@+Yg*_lm_0r6-1#7qybp6L9 zkh!J^LBZHL1{9>BXA@bbVU-sn7c{9KA#o(>A@2`(<83+M;m6Hs+rGMDwY~eOL*p3c zV(eWdj4IHHL777+*aR6FrBwAP)e2x}g?nwlx{$>1XbWawBj5Z4M`a=5x#8P%hTDX6 zJ%)ZTZpwg*K1eC(Rj(!BufbnCp-FM|eer?pVkA4rQhh91!mlytJxRLXrm*<66h^g! zkXQ72C+I?*>v$e~0$Zn@w?0telh0prYuD+e{9Ss_a|SOSUip2!Pj|%*e=ng{2URq_ z{>3%a0C7)fM=1G~Q~*d-XQQGHntL6NRS>EKV)`Va%W35OHgvE{pzEQ_cN_R(|9usQXAR z3NdQR-J%iU$gklSLVO;=Q=sc~db7n>S7~eAyJ{|NA8&K}odZ|Tx(p7CZUo+5o+`6C zM1<&6k$;(i$%Hj*%BZPodlWF$aTj^qoo#CC`=F?wS&e&9Kc;oJ23zF5vkG&oeX^7# zwkhf*@>axad@DP%_9ke^rqXO|4={&__C$=RE z(GsWX)|_;QQtMk!?!2I@SIsX*h85I(AMmis+WMf&j`;b_plYkQO!U=Lt4zSUj4r;6 zm*%wh&phX{tmjx)7uGj*m^K+Rfn)jZIlW=w485lBTXn*N9PA)9;qb3-AGC{WxL{|4rDc>uq)R+r&^(qb|WFs z6(0}gPv?zP4ga0NxTOo4!xiWTBH!ZKimdYJ^x9Fxr5(75_75*&Y|T9b&kl&88H&l= z#JSVZHM;GA=fQE8FuX_J`1jZy&xzhl>-p}3n+fpO0)_@2t+8*fpqjp$jBZD%xw{L7 zs~!dUpGVs7OeI|FJcqxuIeTDP8r+R(Rhu0o#|Gs&7Lff=2i>4!i~;)@VY$6P;eyr+J{e0AsKkFneBk3 z;;GcfQ;x&)p7M|XI66$eWs>u`3b5>qgiAjqQ-H!4>Z?4nMORK>uDq(xv4 zuRw`xffFHIjc5TW*kLnP(`13C5GotXRa=`xHeyG7301Gygi690&LDx!Ja0>A;{?!z>B9Kzk&u=QIT8L`qRx(gbPe+JU7#z=kjCEE}WbTRp@?R zrPJrg$`W2%kK5ZT)x2RZ$Le7!3({W({s_=H2+M=E@X0Qp)?Jv=8o#wWsOO|KRkN@c z7{V-yjf=kWm1pDI_J`trN;qh=IJHJRGd{JQRYx_PC#PY^wR%Xq*PiuH_+B3{F?oxm z3e!H*ol3z~j5&q$brx#AXK=pdI3}TWCPfhb-1p>&M@E*O40A~*lveaT&8hVM=@Z8< zaQX=93dz8t^5t!mg*fF(J%tP(>S=Uv*VVH4J2GL>5!3H1ZG1=MVC9}QXg)z|$9%s& z1c%Jh#uucCJRBrN-oif&7=(ucFG_i1-fh*X>~>}mJ{u9HSOiozk)7nWo)Vsau-BC* za`wodd3#qTcF7XT_inY_t6-nL&glf3o=QKdBGtLFs{CFrV?RXN&R4Oy zXDSh4B}W~6s7%zv_o7m&(JQxY2eBtLH?T#zHR75G5e*kR5fQ`jg}!UZ)~9uWFT05k z3`!S)BA$E@%@NRPt3Gm^?g1$6X`*hk=bI&HJ<-3yxFKdu7|l>Z#E_xU&$TS))3$ob zz}Q?xML3K-S^6qitQ<6{x{|VvF%!i~felMx0iYc45j>XN$U1SS<^#)I=cny`SYJI& z)x*+LM+cegh;^NXi3OeHj0&|l*&-6HY-~Ek*bqsg zj;i@`3&5Qr8Mv&yL(g{vTwmI-YCP1vw1e7WiH>Xq!vi~))6c$)^{CuWq`=qoP|1lrz47CM=?Jzbdzi!ApC zw8MkGUZS7c;7Zem7ij~E;2Bx>zt8d-8(eYd%xqB9WUS}59sYLwgl0QkXY2_mcXg_z z=NVMhJ)__2-TD<0b!ZyoG}m~0nnkc#uyih{lTTSZ`4vcUzwT_iUd-?Tc@Qa_oPm>K zqXVr}7-On^>mDuD3be^K(fM8v5dhnR$1;s?IywR*P$3>3ISC+^^2`VZDRVX?H9CGG zRZ+BP{W|=|l9_Z6E@&jCkOztpEgF`+MX#Jyu+JHE8w!S%Bhc~7=1Hs4+2TWxM;ksF z7|`gG*gFp@H>cw~m!{{u;WyzU?d~>u7$No<^a2By2A#+p0rV3-uKyhCP(L7<@sQV> zejF{b^#%aH?`neA_1^hCTEoVEqWh-a%;oE5j1^u6-P9`}He}}3TA2h>UTGf4Ndk-$ z>2C7u1W-kq2no(i695qOmwi=z{kY^}d|a6)DP? z1mujXY#!bwRiwTk1wDT;M!WbH0nW>Se!Oz`t1$oI;P=AwR5_T~n>z1TwXB&Fp@y-| z)Kl2x(5@p##U&!TlCNBc9VMacJVxsgq7>o~3tZ!Vs&4WZHLMR*)Z)-8ykwdai>X-5 zT&wFjR@!ulKH`D+GQr(q0{vX-jzXgh~ zya0k^!r?d5691K3{&zS#JptfwilVbW_kX2~|N4p%Fs!Yp>=O0(KN7%yi4{i%0>Lh? zCcH!JpJ4Zoi19HXKdEki$RqwAz2?6q;0DZqUhpbS3jWVG{}CR(^>+aMy^#NI+3yqi vZCd{zO^&|w?>q9_YyCgI`TxlMMzXm-ObykX@*iLQ1NbvG0$s%#xJ3OAF)1cl literal 0 HcmV?d00001 diff --git a/index.rst b/index.rst index 88bc2c431a8..0f1f5131951 100644 --- a/index.rst +++ b/index.rst @@ -121,6 +121,17 @@ Audio
+Named Tensor +---------------------- + +.. customgalleryitem:: + :figure: /_static/img/named_tensor.png + :tooltip: Named Tensor + :description: :doc:`intermediate/named_tensor_tutorial` + +.. raw:: html + +
Text ---------------------- @@ -297,6 +308,14 @@ PyTorch in Other Languages beginner/audio_preprocessing_tutorial +.. toctree:: + :maxdepth: 2 + :includehidden: + :hidden: + :caption: Named Tensor + + intermediate/named_tensor_tutorial + .. toctree:: :maxdepth: 2 :includehidden: diff --git a/intermediate_source/named_tensor_tutorial.py b/intermediate_source/named_tensor_tutorial.py new file mode 100644 index 00000000000..b3cba821bce --- /dev/null +++ b/intermediate_source/named_tensor_tutorial.py @@ -0,0 +1,469 @@ +# -*- coding: utf-8 -*- +""" +Introduction to Named Tensors in PyTorch +**************************************** +**Author**: `Richard Zou `_ + +`Sasha Rush `_ proposed the idea of +`named tensors `_ in a January 2019 blog post as a +way to enable more readable code when writing with the manipulations of multidimensional +arrays necessary for coding up Transformer and Convolutional architectures. With PyTorch 1.3, +we begin supporting the concept of named tensors by allowing ``Tensor``s to have **named +dimensions**; this tutorial is intended as a guide to the functionality that will +be included with the 1.3 launch. By the end of it, you will be able to: + +- Initiate ``Tensor``s with named dimensions, as well as removing or renmaing those dimensions +- Understand the basics of how dimension names are propagated through operations +- See how naming dimensions enables clearer code in two key areas: + - Broadcasting operations + - Flattening and unflattening dimensions + +Finally, we'll put this into practice by coding the operations of Multiheaded Attention +using named tensors, and see that the code is significantly more readable than it would +be with regular, "unnamed" tensors! +""" + +###################################################################### +# Basics: named dimensions +# ------------------------ +# +# Tensors now take a new `names` argument that represents a name for each dimension. +# Here we construct a tensor with names: +# + +import torch +imgs = torch.randn(1, 2, 2, 3 , names=('N', 'C', 'H', 'W')) + +###################################################################### +# This works with most factory functions, such as: +# +# - ``tensor`` +# - ``empty`` +# - ``ones`` +# - ``zeros`` +# +# There are two ways rename a ``Tensor``'s names: +# + +print(imgs.names) + +# Method #1: set .names attribute +imgs.names = ['batch', 'channel', 'width', 'height'] +print(imgs.names) + +# Method #2: specify new names: +imgs.rename(channel='C', width='W', height='H') +print(imgs.names) + +###################################################################### +# The preferred way to remove names is to call `tensor.rename(None)``: + +imgs.rename(None) + +###################################################################### +# Unnamed tensors (tensors with no named dimensions) still work as normal and do +# not have names in their repr. + +unnamed = torch.randn(2, 1, 3) +print(unnamed) +print(unnamed.names) + +###################################################################### +# Named tensors do not require that all dimensions be named. + +imgs = torch.randn(3, 1, 1, 2, names=('N', None, None, None)) +print(imgs.names) + +###################################################################### +# Because named tensors coexist with unnamed tensors, we need a nice way to write named-tensor-aware +# code that works with both named and unnamed tensors. Use ``tensor.refine_names(*names)`` to refine +# dimensions and lift unnamed dims to named dims. Refining a dimension is defined as a "rename" with +# the following constraints: +# +# - A ``None`` dim can be refined to have any name +# - A named dim can only be refined to have the same name. + +print(imgs.names) +print(imgs.refine_names('N', 'C', 'H', 'W').names) + +# Coerces the last two dims to 'H' and 'W'. In Python 2, use the string '...' instead of ... +print(imgs.refine_names(..., 'H', 'W').names) + +def catch_error(fn): + try: + fn() + except RuntimeError as err: + print(err) + +# Tried to refine an existing name to a different name +print(catch_error(lambda: imgs.refine_names('batch', 'channel', 'height', 'width'))) + +###################################################################### +# Most simple operations propagate names. The ultimate goal for named tensors is +# for all operations to propagate names in a reasonable, intuitive manner. Many +# common operations have been implemented at the time of the 1.3 release: + +named_imgs = imgs.refine_names('N', 'C', 'H', 'W') +print(named_imgs.abs().names) + +###################################################################### +# Accessors and Reduction +# ----------------------- +# +# One can use dimension names to refer to dimensions instead of the positional +# dimension. These operations also propagate names. Indexing (basic and +# advanced) has not been implemented yet but is on the roadmap. + +output = named_imgs.sum(['C']) # Perform a sum over the channel dimension +print(output.names) + +img0 = named_imgs.select('N', 0) # get one image +print(img0.names) + +###################################################################### +# Name inference +# -------------- +# +# Names are propagated on operations in a process called **name inference**. Name +# inference works in a two step process: +# +# - **Check names**: an operator may check that certain dimensions must match. +# - **Propagate names**: name inference computes and propagates output names to +# output tensors. +# +# Let's go through the very small example of adding 2 one-dim tensors with no +# broadcasting. + +x = torch.randn(3, names=('X',)) +y = torch.randn(3) +z = torch.randn(3, names=('Z',)) + +# **Check names**: first, we will check whether the names of these two tensors +# match. Two names match if and only if they are equal (string equality) or at +# least one is ``None`` (``None``s are essentially a special wildcard name). +# The only one of these three that will error, therefore, is ``x+z``: + +catch_error(lambda: x + z) + +# **Propagate names**: unify the two names by returning the most refined name of +# the two. With ``x + y``, ``X`` is more specific than ``None``. + +print((x + y).names) + +###################################################################### +# Most name inference rules are straightforward but some of them (the dot +# product ones) can have unexpected semantics. Let's go through a few more of +# them. +# +# Broadcasting +# ------------ +# +# Named tensors do not change broadcasting behavior; they still broadcast by +# position. However, when checking two dimensions for if they can be +# broadcasted, the names of those dimensions must match. Two names match if and +# only if they are equal (string equality), or if one is None. +# +# We do not support **automatic broadcasting** by names because the output +# ordering is ambiguous and does not work well with unnamed dimensions. However, +# we support **explicit broadcasting** by names. The two examples below help +# clarify this. + +# Automatic broadcasting: expected to fail +imgs = torch.randn(6, 6, 6, 6, names=('N', 'C', 'H', 'W')) +per_batch_scale = torch.rand(6, names=('N',)) +catch_error(lambda: imgs * per_batch_scale) + +# Explicit broadcasting: the names check out and the more refined names are +# propagated. +imgs = torch.randn(6, 6, 6, 6, names=('N', 'C', 'H', 'W')) +per_batch_scale_4d = torch.rand(6, 1, 1, 1, names=('N', None, None, None)) +print((imgs * per_batch_scale_4d).names) + +###################################################################### +# Matrix multiply +# --------------- +# +# Of course, many of you may be wondering about the very special operation of +# matrix multiplication. ``torch.mm(A, B)`` contracts away the second dimension +# of ``A`` with the first dimension of ``B``, returning a tensor with the first +# dim of ``A`` and the second dim of ``B``. (the other matmul functions, +# ``torch.matmul``, ``torch.mv``, ``torch.dot``, behave similarly). + +markov_states = torch.randn(128, 5, names=('batch', 'D')) +transition_matrix = torch.randn(5, 5, names=('in', 'out')) + +# Apply one transition +new_state = markov_states @ transition_matrix +print(new_state.names) + +###################################################################### +# New behavior: Explicit broadcasting by names +# -------------------------------------------- +# +# One of the main complaints about working with multiple dimensions is the need +# to unsqueeze "dummy" dimensions so that operations can occur. For example, in +# our per-batch-scale example before, with unnamed tensors we'd do the +# following: + +imgs = torch.randn(2, 2, 2, 2) # N, C, H, W +per_batch_scale = torch.rand(2) # N + +correct_result = imgs * per_batch_scale.view(2, 1, 1, 1) # N, C, H, W +incorrect_result = imgs * per_batch_scale.expand_as(imgs) +assert not torch.allclose(correct_result, incorrect_result) + +###################################################################### +# We can make these operations safer (and easily agnostic to the number of +# dimensions) by using names. We provide a new ``tensor.align_as(other)`` operation +# that permutes the dimensions of tensor to match the order specified in +# ``other.names``, adding one-sized dimensions where appropriate +# (``tensor.align_to(*names)`` works as well): + +imgs = imgs.refine_names('N', 'C', 'H', 'W') +per_batch_scale = per_batch_scale.refine_names('N') + +named_result = imgs * per_batch_scale.align_as(imgs) +assert torch.allclose(named_result.rename(None), correct_result) + +###################################################################### +# New behavior: Flattening and unflattening dimensions by names +# ------------------------------------------------------------- +# +# One common operation is flattening and unflattening dimensions. Right now, +# users perform this using either ``view``, ``reshape``, or ``flatten``; use +# cases include flattening batch dimensions to send tensors into operators that +# must take inputs with a certain number of dimensions (i.e., conv2d takes 4D input). +# +# To make these operation more semantically meaningful than view or reshape, we +# introduce a new ``tensor.unflatten(dim, namedshape)`` method and update +# ``flatten`` to work with names: ``tensor.flatten(dims, new_dim)``. +# +# ``flatten`` can only flatten adjacent dimensions but also works on +# non-contiguous dims. One must pass into ``unflatten`` a **namedshape**, which +# is a list of ``(dim, size)`` tuples, to specify how to unflatten the dim. It +# is possible to save the sizes during a ``flatten`` for ``unflatten`` but we +# do not yet do that. + +imgs = imgs.flatten(['C', 'H', 'W'], 'features') +print(imgs.names) + +imgs = imgs.unflatten('features', (('C', 2), ('H', 2), ('W', 2))) +print(imgs.names) + +###################################################################### +# Autograd support +# ---------------- +# +# Autograd currently supports named tensors in a limited manner: autograd +# ignores names on all tensors. Gradient computation is still correct but we +# lose the safety that names give us. It is on the roadmap to introduce handling +# of names to autograd. + +x = torch.randn(3, names=('D',)) +weight = torch.randn(3, names=('D',), requires_grad=True) +loss = (x - weight).abs() +grad_loss = torch.randn(3) +loss.backward(grad_loss) + +print(weight.grad) # Unnamed for now. Will be named in the future + +###################################################################### +# Other supported features +# ------------------------ +# +# See here (link to be included) for a detailed breakdown of what is +# supported with the 1.3 release, what is on the roadmap to be supported soon, +# and what will be supported in the future but not soon. +# +# In particular, three important features that we do not have plans to support +# soon are: +# +# - Retaining names when serializing or loading serialized ``Tensor``s via +# ``torch.save`` +# - Multi-processing via ``torch.multiprocessing`` +# - JIT support; for example, the following will error + +@torch.jit.script +def fn(x): + return x + +catch_error(lambda: fn(named_tensor)) + +###################################################################### +# Longer example: Multi-headed attention +# -------------------------------------- +# +# Now we'll go through a complete example of implementing a common advanced +# PyTorch ``nn.Module``: multi-headed attention. We assume the reader is already +# familiar with multi-headed attention; for a refresher, check out +# `this explanation `_. +# +# We adapt the implementation of multi-headed attention from +# `ParlAI `_; specifically +# `here `_. +# Read through the code at that example; then, compare with the code below, +# noting that there are four places labeled (I), (II), (III), and (IV) where +# using named tensors enables significantly more readable code. + +import torch.nn as nn +import torch.nn.functional as F +import math + +class MultiHeadAttention(nn.Module): + def __init__(self, n_heads, dim, dropout=0): + super(MultiHeadAttention, self).__init__() + self.n_heads = n_heads + self.dim = dim + + self.attn_dropout = nn.Dropout(p=dropout) + self.q_lin = nn.Linear(dim, dim) + self.k_lin = nn.Linear(dim, dim) + self.v_lin = nn.Linear(dim, dim) + nn.init.xavier_normal_(self.q_lin.weight) + nn.init.xavier_normal_(self.k_lin.weight) + nn.init.xavier_normal_(self.v_lin.weight) + self.out_lin = nn.Linear(dim, dim) + nn.init.xavier_normal_(self.out_lin.weight) + + def forward(self, query, key=None, value=None, mask=None): + # (I) + query = query.refine_names('N', 'T', 'D') + if mask.dim() is 2: + mask = mask.refine_names('N', 'T') # selfattn + else: + mask = mask.refine_names('N', 'T', 'T_key') # enc attn + + batch_size, query_len, dim = query.size() + assert dim == self.dim, \ + f'Dimensions do not match: {dim} query vs {self.dim} configured' + assert mask is not None, 'Mask is None, please specify a mask' + n_heads = self.n_heads + dim_per_head = dim // n_heads + scale = math.sqrt(dim_per_head) + + def prepare_head(tensor): + # (II) + tensor = tensor.refine_names('N', 'T', 'D') + return (tensor.unflatten('D', [('H', n_heads), ('D_head', dim_per_head)]) + .align_to('N', 'H', 'T', 'D_head').contiguous()) + + if key is None and value is None: + # self attention + key = value = query + elif value is None: + # key and value are the same, but query differs + key = key.refine_names('N', 'T', 'D') + value = key + key_len = key.size('T') + dim = key.size('D') + + # Distinguish between query_len (T) and key_len (T_key) dims. + k = prepare_head(self.k_lin(key)).renamed(T='T_key') + v = prepare_head(self.v_lin(value)).renamed(T='T_key') + q = prepare_head(self.q_lin(query)) + + dot_prod = q.matmul(k.transpose('D_head', 'T_key')) + dot_prod.refine_names('N', 'H', 'T', 'T_key') # just a check. + + # (III) + # Named tensors doesn't support == yet; the following is a workaround. + attn_mask = (mask.renamed(None) == 0).refine_names(*mask.names) + attn_mask = attn_mask.align_as(dot_prod) + dot_prod.masked_fill_(attn_mask, -float(1e20)) + + attn_weights = self.attn_dropout(F.softmax(dot_prod / scale, dim='T_key')) + + # (IV) + attentioned = ( + attn_weights.matmul(v).refine_names('N', 'H', 'T', 'D_head') + .align_to('N', 'T', 'H', 'D_head') + .flatten(['H', 'D_head'], 'D') + ) + + return self.out_lin(attentioned).refine_names('N', 'T', 'D') + +###################################################################### +# Let's dive into each of these areas in turn: +# +# **(I) Refining the input tensor dims** + +def forward(self, query, key=None, value=None, mask=None): + # (I) + query = query.refine_names('N', 'T', 'D') + +###################################################################### +# The ``query = query.refine_names('N', 'T', 'D')`` serves as error checking and +# asserts that the the dimensions can be refined to ['N', 'T', 'D']. This prevents +# potentially silent or confusing size mismatch errors later down the line. +# +# **(II) Manipulating dimensions in ``prepare_head``** + +def prepare_head(tensor): + # (II) + tensor = tensor.refine_names('N', 'T', 'D') + return (tensor.unflatten('D', [('H', n_heads), ('D_head', dim_per_head)]) + .align_to('N', 'H', 'T', 'D_head').contiguous()) + +###################################################################### +# Next, multihead attention takes the key, query, and value and splits their +# feature dimensions into multiple heads and rearranges the dim order to be +# ``['N', 'H', 'T', 'D_head']``. We can achieve something similar using view +# and transpose operations like the following: + +def prepare_head(tensor): + batch_size, seq_len, _ = tensor.size() # N, T, D + tensor = tensor.view(batch_size, seq_len, n_heads, dim_per_head) # N, T, H, D + return tensor.transpose(1, 2).contiguous() # N, H, T, D + +###################################################################### +# but our named tensor variant provides ops that, although are more verbose, have +# more semantic meaning than ``view`` and "enforcable" documentation in the form +# of names. +# +# **(III) Explicit broadcasting by names** + +def ignore(): + # (III) + # Named tensors doesn't support == yet; the following is a workaround. + attn_mask = (mask.renamed(None) == 0).refine_names(*mask.names) + attn_mask = attn_mask.align_as(dot_prod) + dot_prod.masked_fill_(attn_mask, -float(1e20)) + +###################################################################### +# ``mask`` usually has dims ``[N, T]`` or ``[N, T, T_key]``, while ``dot_prod`` +# has dims ``[N, H, T, T_key]``. To make ``mask`` broadcast correctly with +# ``dot_prod``, we would usually ``unsqueeze`` dim 1 (and also the last dim +# in the former). Using named tensors, we can simply align the two tensors a +# nd stop worrying about where to ``unsqueeze`` dims. +# +# **(IV) More dimension manipulation using ``align_to`` and ``flatten``** + +def ignore(): + # (IV) + attentioned = ( + attn_weights.matmul(v).refine_names('N', 'H', 'T', 'D_head') + .align_to('N', 'T', 'H', 'D_head') + .flatten(['H', 'D_head'], 'D') + ) + +###################################################################### +# (IV): Like (II), using ``align_to`` and ``flatten`` are more semantically +# meaningful than view. +# +# Running the example +# ------------------- + +n, t, d, h = 7, 5, 2 * 3, 3 +query = torch.randn(n, t, d, names=('N', 'T', 'D')) +mask = torch.ones(n, t, names=('N', 'T')) +attn = MultiHeadAttention(h, d) +output = attn(query, mask=mask) +print(output.names) + +###################################################################### +# Conclusion +# ---------- +# +# Thank you for reading! Named tensors are still very much in development; +# if you have feedback and/or suggestions for improvement, please let us +# know by creating an issue `here `_. From f23f0375b0c8e37c4f2594d8d752035f7e15ee71 Mon Sep 17 00:00:00 2001 From: Seth Weidman Date: Mon, 30 Sep 2019 11:20:07 -0700 Subject: [PATCH 02/12] Formatting updates --- intermediate_source/named_tensor_tutorial.py | 30 ++++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/intermediate_source/named_tensor_tutorial.py b/intermediate_source/named_tensor_tutorial.py index b3cba821bce..6b5a688c0cb 100644 --- a/intermediate_source/named_tensor_tutorial.py +++ b/intermediate_source/named_tensor_tutorial.py @@ -8,17 +8,17 @@ `named tensors `_ in a January 2019 blog post as a way to enable more readable code when writing with the manipulations of multidimensional arrays necessary for coding up Transformer and Convolutional architectures. With PyTorch 1.3, -we begin supporting the concept of named tensors by allowing ``Tensor``s to have **named +we begin supporting the concept of named tensors by allowing a ``Tensor`` to have **named dimensions**; this tutorial is intended as a guide to the functionality that will be included with the 1.3 launch. By the end of it, you will be able to: -- Initiate ``Tensor``s with named dimensions, as well as removing or renmaing those dimensions +- Initiate a ``Tensor`` with named dimensions, as well as removing or renmaing those dimensions - Understand the basics of how dimension names are propagated through operations - See how naming dimensions enables clearer code in two key areas: - Broadcasting operations - Flattening and unflattening dimensions -Finally, we'll put this into practice by coding the operations of Multiheaded Attention +Finally, we'll put this into practice by coding the operations of multi-headed attention using named tensors, and see that the code is significantly more readable than it would be with regular, "unnamed" tensors! """ @@ -27,7 +27,7 @@ # Basics: named dimensions # ------------------------ # -# Tensors now take a new `names` argument that represents a name for each dimension. +# Tensors now take a new ``names`` argument that represents a name for each dimension. # Here we construct a tensor with names: # @@ -56,7 +56,7 @@ print(imgs.names) ###################################################################### -# The preferred way to remove names is to call `tensor.rename(None)``: +# The preferred way to remove names is to call ``tensor.rename(None)``: imgs.rename(None) @@ -101,7 +101,8 @@ def catch_error(fn): ###################################################################### # Most simple operations propagate names. The ultimate goal for named tensors is # for all operations to propagate names in a reasonable, intuitive manner. Many -# common operations have been implemented at the time of the 1.3 release: +# common operations have been implemented at the time of the 1.3 release; here, +# for example, is `.abs()`: named_imgs = imgs.refine_names('N', 'C', 'H', 'W') print(named_imgs.abs().names) @@ -173,8 +174,7 @@ def catch_error(fn): per_batch_scale = torch.rand(6, names=('N',)) catch_error(lambda: imgs * per_batch_scale) -# Explicit broadcasting: the names check out and the more refined names are -# propagated. +# Explicit broadcasting: the names check out and the more refined names are propagated. imgs = torch.randn(6, 6, 6, 6, names=('N', 'C', 'H', 'W')) per_batch_scale_4d = torch.rand(6, 1, 1, 1, names=('N', None, None, None)) print((imgs * per_batch_scale_4d).names) @@ -187,7 +187,7 @@ def catch_error(fn): # matrix multiplication. ``torch.mm(A, B)`` contracts away the second dimension # of ``A`` with the first dimension of ``B``, returning a tensor with the first # dim of ``A`` and the second dim of ``B``. (the other matmul functions, -# ``torch.matmul``, ``torch.mv``, ``torch.dot``, behave similarly). +# ``torch.matmul``, ``torch.mv``, ``torch.dot``, behave similarly): markov_states = torch.randn(128, 5, names=('batch', 'D')) transition_matrix = torch.randn(5, 5, names=('in', 'out')) @@ -201,7 +201,7 @@ def catch_error(fn): # -------------------------------------------- # # One of the main complaints about working with multiple dimensions is the need -# to unsqueeze "dummy" dimensions so that operations can occur. For example, in +# to ``unsqueeze`` "dummy" dimensions so that operations can occur. For example, in # our per-batch-scale example before, with unnamed tensors we'd do the # following: @@ -239,7 +239,7 @@ def catch_error(fn): # ``flatten`` to work with names: ``tensor.flatten(dims, new_dim)``. # # ``flatten`` can only flatten adjacent dimensions but also works on -# non-contiguous dims. One must pass into ``unflatten`` a **namedshape**, which +# non-contiguous dims. One must pass into ``unflatten`` a **named shape**, which # is a list of ``(dim, size)`` tuples, to specify how to unflatten the dim. It # is possible to save the sizes during a ``flatten`` for ``unflatten`` but we # do not yet do that. @@ -278,7 +278,7 @@ def catch_error(fn): # In particular, three important features that we do not have plans to support # soon are: # -# - Retaining names when serializing or loading serialized ``Tensor``s via +# - Retaining names when serializing or loading a serialized ``Tensor`` via # ``torch.save`` # - Multi-processing via ``torch.multiprocessing`` # - JIT support; for example, the following will error @@ -302,8 +302,8 @@ def fn(x): # `ParlAI `_; specifically # `here `_. # Read through the code at that example; then, compare with the code below, -# noting that there are four places labeled (I), (II), (III), and (IV) where -# using named tensors enables significantly more readable code. +# noting that there are four places labeled (I), (II), (III), and (IV), where +# using named tensors enables more readable code. import torch.nn as nn import torch.nn.functional as F @@ -466,4 +466,4 @@ def ignore(): # # Thank you for reading! Named tensors are still very much in development; # if you have feedback and/or suggestions for improvement, please let us -# know by creating an issue `here `_. +# know by creating `an issue `_. From bdf3c13cc0527b8c527e59c0429770ab9bfccc3a Mon Sep 17 00:00:00 2001 From: Seth Weidman Date: Mon, 7 Oct 2019 17:24:45 -0700 Subject: [PATCH 03/12] Made most of Richard's changes --- intermediate_source/named_tensor_tutorial.py | 135 ++++++++++++------- 1 file changed, 89 insertions(+), 46 deletions(-) diff --git a/intermediate_source/named_tensor_tutorial.py b/intermediate_source/named_tensor_tutorial.py index 6b5a688c0cb..d2bb0c4c337 100644 --- a/intermediate_source/named_tensor_tutorial.py +++ b/intermediate_source/named_tensor_tutorial.py @@ -42,6 +42,10 @@ # - ``ones`` # - ``zeros`` # +# Unlike in +# `the original named tensors blogpost `_, +# named dimensions are ordered. `tensor.names[i]` is the name of the `i`th dimension of `tensor`. +# # There are two ways rename a ``Tensor``'s names: # @@ -113,7 +117,8 @@ def catch_error(fn): # # One can use dimension names to refer to dimensions instead of the positional # dimension. These operations also propagate names. Indexing (basic and -# advanced) has not been implemented yet but is on the roadmap. +# advanced) has not been implemented yet but is on the roadmap. Using the `named_imgs` +# tensor from above, we can do: output = named_imgs.sum(['C']) # Perform a sum over the channel dimension print(output.names) @@ -125,8 +130,8 @@ def catch_error(fn): # Name inference # -------------- # -# Names are propagated on operations in a process called **name inference**. Name -# inference works in a two step process: +# Names are propagated on operations in a two step process called **name inference**. It +# works as follows: # # - **Check names**: an operator may check that certain dimensions must match. # - **Propagate names**: name inference computes and propagates output names to @@ -166,8 +171,8 @@ def catch_error(fn): # # We do not support **automatic broadcasting** by names because the output # ordering is ambiguous and does not work well with unnamed dimensions. However, -# we support **explicit broadcasting** by names. The two examples below help -# clarify this. +# we support **explicit broadcasting** by names, which is introduced in a later +# section. The two examples below help clarify this. # Automatic broadcasting: expected to fail imgs = torch.randn(6, 6, 6, 6, names=('N', 'C', 'H', 'W')) @@ -268,8 +273,8 @@ def catch_error(fn): print(weight.grad) # Unnamed for now. Will be named in the future ###################################################################### -# Other supported features -# ------------------------ +# Other supported (and unsupported) features +# ------------------------------------------ # # See here (link to be included) for a detailed breakdown of what is # supported with the 1.3 release, what is on the roadmap to be supported soon, @@ -283,13 +288,18 @@ def catch_error(fn): # - Multi-processing via ``torch.multiprocessing`` # - JIT support; for example, the following will error +imgs_named = torch.randn(1, 2, 2, 3 , names=('N', 'C', 'H', 'W')) + @torch.jit.script def fn(x): return x -catch_error(lambda: fn(named_tensor)) +catch_error(lambda: fn(imgs_named)) ###################################################################### +# As a workaround, please drop names via `tensor = tensor.rename(None)` +# before using anything that does not yet support named tensors. +# # Longer example: Multi-headed attention # -------------------------------------- # @@ -327,47 +337,47 @@ def __init__(self, n_heads, dim, dropout=0): def forward(self, query, key=None, value=None, mask=None): # (I) - query = query.refine_names('N', 'T', 'D') - if mask.dim() is 2: - mask = mask.refine_names('N', 'T') # selfattn + query = query.refine_names(..., 'T', 'D') + self_attn = key is None and value is None + if self_attn: + mask = mask.refine_names(..., 'T') else: - mask = mask.refine_names('N', 'T', 'T_key') # enc attn + mask = mask.refine_names(..., 'T', 'T_key') # enc attn - batch_size, query_len, dim = query.size() + dim = query.size('D') assert dim == self.dim, \ - f'Dimensions do not match: {dim} query vs {self.dim} configured' + f'Dimensions do not match: {embedding_dim} query vs {self.dim} configured' assert mask is not None, 'Mask is None, please specify a mask' n_heads = self.n_heads dim_per_head = dim // n_heads scale = math.sqrt(dim_per_head) + # (II) def prepare_head(tensor): - # (II) - tensor = tensor.refine_names('N', 'T', 'D') + tensor = tensor.refine_names(..., 'T', 'D') return (tensor.unflatten('D', [('H', n_heads), ('D_head', dim_per_head)]) - .align_to('N', 'H', 'T', 'D_head').contiguous()) + .align_to(..., 'H', 'T', 'D_head')) - if key is None and value is None: - # self attention + if self_attn: key = value = query elif value is None: # key and value are the same, but query differs - key = key.refine_names('N', 'T', 'D') + key = key.refine_names(..., 'T', 'D') value = key key_len = key.size('T') dim = key.size('D') # Distinguish between query_len (T) and key_len (T_key) dims. - k = prepare_head(self.k_lin(key)).renamed(T='T_key') - v = prepare_head(self.v_lin(value)).renamed(T='T_key') + k = prepare_head(self.k_lin(key)).rename(T='T_key') + v = prepare_head(self.v_lin(value)).rename(T='T_key') q = prepare_head(self.q_lin(query)) - dot_prod = q.matmul(k.transpose('D_head', 'T_key')) - dot_prod.refine_names('N', 'H', 'T', 'T_key') # just a check. + dot_prod = q.div_(scale).matmul(k.align_to(..., 'D_head', 'T_key')) + dot_prod.refine_names(..., 'H', 'T', 'T_key') # just a check # (III) - # Named tensors doesn't support == yet; the following is a workaround. - attn_mask = (mask.renamed(None) == 0).refine_names(*mask.names) + # Named tensors doesn't support `==` yet; the following is a workaround. + attn_mask = (mask.rename(None) == 0).refine_names(*mask.names) attn_mask = attn_mask.align_as(dot_prod) dot_prod.masked_fill_(attn_mask, -float(1e20)) @@ -375,12 +385,12 @@ def prepare_head(tensor): # (IV) attentioned = ( - attn_weights.matmul(v).refine_names('N', 'H', 'T', 'D_head') - .align_to('N', 'T', 'H', 'D_head') + attn_weights.matmul(v).refine_names(..., 'H', 'T', 'D_head') + .align_to(..., 'T', 'H', 'D_head') .flatten(['H', 'D_head'], 'D') ) - return self.out_lin(attentioned).refine_names('N', 'T', 'D') + return self.out_lin(attentioned).refine_names(..., 'T', 'D') ###################################################################### # Let's dive into each of these areas in turn: @@ -389,12 +399,13 @@ def prepare_head(tensor): def forward(self, query, key=None, value=None, mask=None): # (I) - query = query.refine_names('N', 'T', 'D') + query = query.refine_names(..., 'T', 'D') ###################################################################### -# The ``query = query.refine_names('N', 'T', 'D')`` serves as error checking and -# asserts that the the dimensions can be refined to ['N', 'T', 'D']. This prevents -# potentially silent or confusing size mismatch errors later down the line. +# The ``query = query.refine_names(..., 'T', 'D')`` serves as enforcable documentation +# and lifts input dimensions to being named. It checks that the last two dimensions +# can be refined to `['T', 'D']`, preventing potentially silent or confusing size +# mismatch errors later down the line. # # **(II) Manipulating dimensions in ``prepare_head``** @@ -410,15 +421,41 @@ def prepare_head(tensor): # ``['N', 'H', 'T', 'D_head']``. We can achieve something similar using view # and transpose operations like the following: +# (II) def prepare_head(tensor): - batch_size, seq_len, _ = tensor.size() # N, T, D - tensor = tensor.view(batch_size, seq_len, n_heads, dim_per_head) # N, T, H, D - return tensor.transpose(1, 2).contiguous() # N, H, T, D + tensor = tensor.refine_names(..., 'T', 'D') + return (tensor.unflatten('D', [('H', n_heads), ('D_head', dim_per_head)]) + .align_to(..., 'H', 'T', 'D_head')) ###################################################################### -# but our named tensor variant provides ops that, although are more verbose, have -# more semantic meaning than ``view`` and "enforcable" documentation in the form -# of names. +# The first thing to note is how the code clearly states the input and +# output dimensions: the input tensor must end with the `T` and `D` dims +# and the output tensor ends in `H`, `T`, and `D_head` dims. +# +# The second thing to note is how clearly the code describes what is going on. +# prepare_head takes the key, query, and value and splits the embedding dim into +# multiple heads, finally rearranging the dim order to be `[..., 'H', 'T', 'D_head']`. +# ParlAI implements prepare_head as the following, using `view` and `transpose` +# operations: +# +# **(III) Explicit broadcasting by names** + +def prepare_head(tensor): + # input is [batch_size, seq_len, n_heads * dim_per_head] + # output is [batch_size * n_heads, seq_len, dim_per_head] + batch_size, seq_len, _ = tensor.size() + tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head) + tensor = ( + tensor.transpose(1, 2) + .contiguous() + .view(batch_size * n_heads, seq_len, dim_per_head) + ) + return tensor + +###################################################################### +# Our named tensor variant uses ops that, though more verbose, also have +# more semantic meaning than `view` and `transpose` and include enforcable +# documentation in the form of names. # # **(III) Explicit broadcasting by names** @@ -426,29 +463,35 @@ def ignore(): # (III) # Named tensors doesn't support == yet; the following is a workaround. attn_mask = (mask.renamed(None) == 0).refine_names(*mask.names) + + # recall that we had dot_prod.refine_names(..., 'H', 'T', 'T_key') attn_mask = attn_mask.align_as(dot_prod) + dot_prod.masked_fill_(attn_mask, -float(1e20)) ###################################################################### -# ``mask`` usually has dims ``[N, T]`` or ``[N, T, T_key]``, while ``dot_prod`` +# ``mask`` usually has dims ``[N, T]`` (in the case of self-attention) or +# ``[N, T, T_key]`` (in the case of encoder attention) while ``dot_prod`` # has dims ``[N, H, T, T_key]``. To make ``mask`` broadcast correctly with -# ``dot_prod``, we would usually ``unsqueeze`` dim 1 (and also the last dim -# in the former). Using named tensors, we can simply align the two tensors a -# nd stop worrying about where to ``unsqueeze`` dims. +# ``dot_prod``, we would usually `unsqueeze` dims `1` and `-1` in the case of self +# attention or `unsqueeze` dim `1` in the case of encoder attention. Using +# named tensors, we can simply align the two tensors and stop worrying about +# where to ``unsqueeze`` dims. Using named tensors, we simply align `attn_mask` +# to `dot_prod` using `align_as` and stop worrying about where to `unsqueeze` dims. # # **(IV) More dimension manipulation using ``align_to`` and ``flatten``** def ignore(): # (IV) attentioned = ( - attn_weights.matmul(v).refine_names('N', 'H', 'T', 'D_head') - .align_to('N', 'T', 'H', 'D_head') + attn_weights.matmul(v).refine_names(..., 'H', 'T', 'D_head') + .align_to(..., 'T', 'H', 'D_head') .flatten(['H', 'D_head'], 'D') ) ###################################################################### # (IV): Like (II), using ``align_to`` and ``flatten`` are more semantically -# meaningful than view. +# meaningful than `view`. # # Running the example # ------------------- From 1761555758e76f8e403549a75651d4b31a8a9fc2 Mon Sep 17 00:00:00 2001 From: Seth Weidman Date: Mon, 7 Oct 2019 18:04:48 -0700 Subject: [PATCH 04/12] Final draft of named tensor tutorial --- intermediate_source/named_tensor_tutorial.py | 116 ++++++++++--------- 1 file changed, 61 insertions(+), 55 deletions(-) diff --git a/intermediate_source/named_tensor_tutorial.py b/intermediate_source/named_tensor_tutorial.py index d2bb0c4c337..86c324d026e 100644 --- a/intermediate_source/named_tensor_tutorial.py +++ b/intermediate_source/named_tensor_tutorial.py @@ -4,12 +4,14 @@ **************************************** **Author**: `Richard Zou `_ -`Sasha Rush `_ proposed the idea of -`named tensors `_ in a January 2019 blog post as a -way to enable more readable code when writing with the manipulations of multidimensional -arrays necessary for coding up Transformer and Convolutional architectures. With PyTorch 1.3, -we begin supporting the concept of named tensors by allowing a ``Tensor`` to have **named -dimensions**; this tutorial is intended as a guide to the functionality that will +Named Tensors aim to make tensors easier to use by allowing users to associate explicit names +with tensor dimensions. In most cases, operations that take dimension parameters will accept +dimension names, avoiding the need to track dimensions by position. In addition, named tensors +use names to automatically check that APIs are being used correctly at runtime, providing extra +safety. Names can also be used to rearrange dimensions, for example, to support +"broadcasting by name" rather than "broadcasting by position". + +this tutorial is intended as a guide to the functionality that will be included with the 1.3 launch. By the end of it, you will be able to: - Initiate a ``Tensor`` with named dimensions, as well as removing or renmaing those dimensions @@ -18,35 +20,40 @@ - Broadcasting operations - Flattening and unflattening dimensions -Finally, we'll put this into practice by coding the operations of multi-headed attention -using named tensors, and see that the code is significantly more readable than it would -be with regular, "unnamed" tensors! +Finally, we'll put this into practice by writing a multi-headed attention module +using named tensors. + +Named tensors in PyTorch are inspired by and done in collaboration with +`Sasha Rush `_. +The original idea and proof of concept were proposed in his +`January 2019 blog post `_. """ ###################################################################### # Basics: named dimensions # ------------------------ # -# Tensors now take a new ``names`` argument that represents a name for each dimension. -# Here we construct a tensor with names: +# PyTorch now allows Tensors to have named dimensions; factory functions +# now take a new `names` argument that associates a name with each dimension. +# This works with most factory functions, such as +# +# - `tensor` +# - `empty` +# - `ones` +# - `zeros` +# - `randn` +# - `rand` # +# Here we construct a tensor with names: import torch imgs = torch.randn(1, 2, 2, 3 , names=('N', 'C', 'H', 'W')) -###################################################################### -# This works with most factory functions, such as: -# -# - ``tensor`` -# - ``empty`` -# - ``ones`` -# - ``zeros`` -# # Unlike in # `the original named tensors blogpost `_, -# named dimensions are ordered. `tensor.names[i]` is the name of the `i`th dimension of `tensor`. +# named dimensions are ordered: `tensor.names[i]` is the name of the `i`th dimension of `tensor`. # -# There are two ways rename a ``Tensor``'s names: +# There are two ways rename a ``Tensor``'s dimensions: # print(imgs.names) @@ -63,10 +70,11 @@ # The preferred way to remove names is to call ``tensor.rename(None)``: imgs.rename(None) +print(imgs.names) ###################################################################### # Unnamed tensors (tensors with no named dimensions) still work as normal and do -# not have names in their repr. +# not have names in their `repr`. unnamed = torch.randn(2, 1, 3) print(unnamed) @@ -87,8 +95,9 @@ # - A ``None`` dim can be refined to have any name # - A named dim can only be refined to have the same name. -print(imgs.names) -print(imgs.refine_names('N', 'C', 'H', 'W').names) +imgs = torch.randn(3, 1, 1, 2) +named_imgs= imgs.refine_names('N', 'C', 'H', 'W') +print(named_imgs.names) # Coerces the last two dims to 'H' and 'W'. In Python 2, use the string '...' instead of ... print(imgs.refine_names(..., 'H', 'W').names) @@ -105,7 +114,7 @@ def catch_error(fn): ###################################################################### # Most simple operations propagate names. The ultimate goal for named tensors is # for all operations to propagate names in a reasonable, intuitive manner. Many -# common operations have been implemented at the time of the 1.3 release; here, +# common operations have been added at the time of the 1.3 release; here, # for example, is `.abs()`: named_imgs = imgs.refine_names('N', 'C', 'H', 'W') @@ -133,9 +142,9 @@ def catch_error(fn): # Names are propagated on operations in a two step process called **name inference**. It # works as follows: # -# - **Check names**: an operator may check that certain dimensions must match. -# - **Propagate names**: name inference computes and propagates output names to -# output tensors. +# 1. **Check names**: an operator may check that certain dimensions must match. +# 2. **Propagate names**: name inference computes and propagates output names to +# output tensors. # # Let's go through the very small example of adding 2 one-dim tensors with no # broadcasting. @@ -151,7 +160,7 @@ def catch_error(fn): catch_error(lambda: x + z) -# **Propagate names**: unify the two names by returning the most refined name of +# **Propagate names**: _unify_ the two names by returning the most refined name of # the two. With ``x + y``, ``X`` is more specific than ``None``. print((x + y).names) @@ -188,11 +197,10 @@ def catch_error(fn): # Matrix multiply # --------------- # -# Of course, many of you may be wondering about the very special operation of -# matrix multiplication. ``torch.mm(A, B)`` contracts away the second dimension -# of ``A`` with the first dimension of ``B``, returning a tensor with the first -# dim of ``A`` and the second dim of ``B``. (the other matmul functions, -# ``torch.matmul``, ``torch.mv``, ``torch.dot``, behave similarly): +# `torch.mm(A, B)`` performs a dot product between the second dim of `A` +# and the first dim of `B`, returning a tensor with the first dim of `A` +# and the second dim of `B`. (the other matmul functions, such as `torch.matmul`, +# `torch.mv`, `torch.dot`, behave similarly). markov_states = torch.randn(128, 5, names=('batch', 'D')) transition_matrix = torch.randn(5, 5, names=('in', 'out')) @@ -202,6 +210,9 @@ def catch_error(fn): print(new_state.names) ###################################################################### +# Inherently, matrix multiply does not check if the contracted dimensions +# have the same name. +# # New behavior: Explicit broadcasting by names # -------------------------------------------- # @@ -272,6 +283,14 @@ def catch_error(fn): print(weight.grad) # Unnamed for now. Will be named in the future +weight.grad.zero_() +grad_loss = grad_loss.refine_names('C') +loss = (x - weight).abs() +# Ideally we'd check that the names of loss and grad_loss match but we don't yet. +loss.backward(grad_loss) + +print(weight.grad) # still unnamed + ###################################################################### # Other supported (and unsupported) features # ------------------------------------------ @@ -280,8 +299,8 @@ def catch_error(fn): # supported with the 1.3 release, what is on the roadmap to be supported soon, # and what will be supported in the future but not soon. # -# In particular, three important features that we do not have plans to support -# soon are: +# In particular, we want to call out three important features that are not +# currently supported: # # - Retaining names when serializing or loading a serialized ``Tensor`` via # ``torch.save`` @@ -313,7 +332,8 @@ def fn(x): # `here `_. # Read through the code at that example; then, compare with the code below, # noting that there are four places labeled (I), (II), (III), and (IV), where -# using named tensors enables more readable code. +# using named tensors enables more readable code; we will dive into each of these +# after the code block. import torch.nn as nn import torch.nn.functional as F @@ -393,8 +413,6 @@ def prepare_head(tensor): return self.out_lin(attentioned).refine_names(..., 'T', 'D') ###################################################################### -# Let's dive into each of these areas in turn: -# # **(I) Refining the input tensor dims** def forward(self, query, key=None, value=None, mask=None): @@ -409,18 +427,6 @@ def forward(self, query, key=None, value=None, mask=None): # # **(II) Manipulating dimensions in ``prepare_head``** -def prepare_head(tensor): - # (II) - tensor = tensor.refine_names('N', 'T', 'D') - return (tensor.unflatten('D', [('H', n_heads), ('D_head', dim_per_head)]) - .align_to('N', 'H', 'T', 'D_head').contiguous()) - -###################################################################### -# Next, multihead attention takes the key, query, and value and splits their -# feature dimensions into multiple heads and rearranges the dim order to be -# ``['N', 'H', 'T', 'D_head']``. We can achieve something similar using view -# and transpose operations like the following: - # (II) def prepare_head(tensor): tensor = tensor.refine_names(..., 'T', 'D') @@ -470,13 +476,13 @@ def ignore(): dot_prod.masked_fill_(attn_mask, -float(1e20)) ###################################################################### -# ``mask`` usually has dims ``[N, T]`` (in the case of self-attention) or +# ``mask`` usually has dims ``[N, T]`` (in the case of self attention) or # ``[N, T, T_key]`` (in the case of encoder attention) while ``dot_prod`` # has dims ``[N, H, T, T_key]``. To make ``mask`` broadcast correctly with # ``dot_prod``, we would usually `unsqueeze` dims `1` and `-1` in the case of self # attention or `unsqueeze` dim `1` in the case of encoder attention. Using # named tensors, we can simply align the two tensors and stop worrying about -# where to ``unsqueeze`` dims. Using named tensors, we simply align `attn_mask` +# where to unsqueeze` dims. Using named tensors, we simply align `attn_mask` # to `dot_prod` using `align_as` and stop worrying about where to `unsqueeze` dims. # # **(IV) More dimension manipulation using ``align_to`` and ``flatten``** @@ -490,8 +496,8 @@ def ignore(): ) ###################################################################### -# (IV): Like (II), using ``align_to`` and ``flatten`` are more semantically -# meaningful than `view`. +# (IV): Like (II), `align_to` and `flatten` are more semantically +# meaningful than `view` (despite being more verbose). # # Running the example # ------------------- From 5c15b669f234718d88f5387fe2e3090baa1c8b36 Mon Sep 17 00:00:00 2001 From: Seth Weidman Date: Tue, 8 Oct 2019 07:55:18 -0700 Subject: [PATCH 05/12] Formatting fixes, reordering for flow and clarity --- intermediate_source/named_tensor_tutorial.py | 193 ++++++++++--------- 1 file changed, 97 insertions(+), 96 deletions(-) diff --git a/intermediate_source/named_tensor_tutorial.py b/intermediate_source/named_tensor_tutorial.py index 86c324d026e..a50e4f3d2fd 100644 --- a/intermediate_source/named_tensor_tutorial.py +++ b/intermediate_source/named_tensor_tutorial.py @@ -1,9 +1,11 @@ # -*- coding: utf-8 -*- """ -Introduction to Named Tensors in PyTorch -**************************************** +(experimental) Introduction to Named Tensors in PyTorch +******************************************************* **Author**: `Richard Zou `_ +**Editor**: `Seth Weidman `_ + Named Tensors aim to make tensors easier to use by allowing users to associate explicit names with tensor dimensions. In most cases, operations that take dimension parameters will accept dimension names, avoiding the need to track dimensions by position. In addition, named tensors @@ -11,7 +13,7 @@ safety. Names can also be used to rearrange dimensions, for example, to support "broadcasting by name" rather than "broadcasting by position". -this tutorial is intended as a guide to the functionality that will +This tutorial is intended as a guide to the functionality that will be included with the 1.3 launch. By the end of it, you will be able to: - Initiate a ``Tensor`` with named dimensions, as well as removing or renmaing those dimensions @@ -20,61 +22,59 @@ - Broadcasting operations - Flattening and unflattening dimensions -Finally, we'll put this into practice by writing a multi-headed attention module +Finally, we'll put this into practice by writing a multi-head attention module using named tensors. Named tensors in PyTorch are inspired by and done in collaboration with `Sasha Rush `_. The original idea and proof of concept were proposed in his `January 2019 blog post `_. -""" -###################################################################### -# Basics: named dimensions -# ------------------------ -# -# PyTorch now allows Tensors to have named dimensions; factory functions -# now take a new `names` argument that associates a name with each dimension. -# This works with most factory functions, such as -# -# - `tensor` -# - `empty` -# - `ones` -# - `zeros` -# - `randn` -# - `rand` -# -# Here we construct a tensor with names: +Basics: named dimensions +======================== + +PyTorch now allows Tensors to have named dimensions; factory functions +now take a new `names` argument that associates a name with each dimension. +This works with most factory functions, such as + +- `tensor` +- `empty` +- `ones` +- `zeros` +- `randn` +- `rand` + +Here we construct a tensor with names: +""" import torch imgs = torch.randn(1, 2, 2, 3 , names=('N', 'C', 'H', 'W')) +print(imgs.names) +###################################################################### # Unlike in # `the original named tensors blogpost `_, -# named dimensions are ordered: `tensor.names[i]` is the name of the `i`th dimension of `tensor`. +# named dimensions are ordered: ``tensor.names[i]`` is the name of the ``i`` th dimension of ``tensor``. # # There are two ways rename a ``Tensor``'s dimensions: -# - -print(imgs.names) # Method #1: set .names attribute imgs.names = ['batch', 'channel', 'width', 'height'] print(imgs.names) # Method #2: specify new names: -imgs.rename(channel='C', width='W', height='H') +imgs = imgs.rename(channel='C', width='W', height='H') print(imgs.names) ###################################################################### # The preferred way to remove names is to call ``tensor.rename(None)``: -imgs.rename(None) +imgs = imgs.rename(None) print(imgs.names) ###################################################################### # Unnamed tensors (tensors with no named dimensions) still work as normal and do -# not have names in their `repr`. +# not have names in their ``repr``. unnamed = torch.randn(2, 1, 3) print(unnamed) @@ -87,17 +87,18 @@ print(imgs.names) ###################################################################### -# Because named tensors coexist with unnamed tensors, we need a nice way to write named-tensor-aware -# code that works with both named and unnamed tensors. Use ``tensor.refine_names(*names)`` to refine -# dimensions and lift unnamed dims to named dims. Refining a dimension is defined as a "rename" with -# the following constraints: +# Because named tensors coexist with unnamed tensors, we need a nice way to +# write named-tensor-aware code that works with both named and unnamed tensors. +# Use ``tensor.refine_names(*names)`` to refine dimensions and lift unnamed dims +# to named dims. Refining a dimension is defined as a "rename" with the following +# constraints: # # - A ``None`` dim can be refined to have any name # - A named dim can only be refined to have the same name. imgs = torch.randn(3, 1, 1, 2) -named_imgs= imgs.refine_names('N', 'C', 'H', 'W') -print(named_imgs.names) +imgs = imgs.refine_names('N', 'C', 'H', 'W') +print(imgs.names) # Coerces the last two dims to 'H' and 'W'. In Python 2, use the string '...' instead of ... print(imgs.refine_names(..., 'H', 'W').names) @@ -109,7 +110,7 @@ def catch_error(fn): print(err) # Tried to refine an existing name to a different name -print(catch_error(lambda: imgs.refine_names('batch', 'channel', 'height', 'width'))) +catch_error(lambda: imgs.refine_names('batch', 'channel', 'height', 'width')) ###################################################################### # Most simple operations propagate names. The ultimate goal for named tensors is @@ -121,12 +122,34 @@ def catch_error(fn): print(named_imgs.abs().names) ###################################################################### +# Speaking of operations propogating names, let's quickly cover one +# of the most important operations in PyTorch. +# +# Matrix multiply +# --------------- +# +# ``torch.mm(A, B)`` performs a dot product between the second dim of `A` +# and the first dim of `B`, returning a tensor with the first dim of `A` +# and the second dim of `B`. (the other matmul functions, such as ``torch.matmul``, +# ``torch.mv``, ``torch.dot``, behave similarly). + +markov_states = torch.randn(128, 5, names=('batch', 'D')) +transition_matrix = torch.randn(5, 5, names=('in', 'out')) + +# Apply one transition +new_state = markov_states @ transition_matrix +print(new_state.names) + +###################################################################### +# As you can see, matrix multiply does not check if the contracted dimensions +# have the same name. +# # Accessors and Reduction # ----------------------- # # One can use dimension names to refer to dimensions instead of the positional # dimension. These operations also propagate names. Indexing (basic and -# advanced) has not been implemented yet but is on the roadmap. Using the `named_imgs` +# advanced) has not been implemented yet but is on the roadmap. Using the ``named_imgs`` # tensor from above, we can do: output = named_imgs.sum(['C']) # Perform a sum over the channel dimension @@ -139,12 +162,12 @@ def catch_error(fn): # Name inference # -------------- # -# Names are propagated on operations in a two step process called **name inference**. It -# works as follows: +# Names are propagated on operations in a two step process called **name inference**. +# The two steps are: # -# 1. **Check names**: an operator may check that certain dimensions must match. -# 2. **Propagate names**: name inference computes and propagates output names to -# output tensors. +# 1. **Check names**: an operator may perform automatic checks at runtime that +# check that certain dimension names must match. +# 2. **Propagate names**: name inference propagates output names to output tensors. # # Let's go through the very small example of adding 2 one-dim tensors with no # broadcasting. @@ -153,6 +176,7 @@ def catch_error(fn): y = torch.randn(3) z = torch.randn(3, names=('Z',)) +###################################################################### # **Check names**: first, we will check whether the names of these two tensors # match. Two names match if and only if they are equal (string equality) or at # least one is ``None`` (``None``s are essentially a special wildcard name). @@ -160,58 +184,45 @@ def catch_error(fn): catch_error(lambda: x + z) +###################################################################### # **Propagate names**: _unify_ the two names by returning the most refined name of -# the two. With ``x + y``, ``X`` is more specific than ``None``. +# the two. With `x + y, `X` is more refined than `None`. print((x + y).names) ###################################################################### -# Most name inference rules are straightforward but some of them (the dot -# product ones) can have unexpected semantics. Let's go through a few more of -# them. +# Most name inference rules are straightforward but some of them can have +# unexpected semantics. Let's go through a few more of them. # # Broadcasting # ------------ # # Named tensors do not change broadcasting behavior; they still broadcast by # position. However, when checking two dimensions for if they can be -# broadcasted, the names of those dimensions must match. Two names match if and -# only if they are equal (string equality), or if one is None. +# broadcasted, the names of those dimensions must match. # -# We do not support **automatic broadcasting** by names because the output -# ordering is ambiguous and does not work well with unnamed dimensions. However, -# we support **explicit broadcasting** by names, which is introduced in a later -# section. The two examples below help clarify this. +# Furthermore, broadcasting with named tensors can prevent incorrect behavior. +# The following code will error, whereas without `names` it would add +# `per_batch_scale` to the last dimension of `imgs`. # Automatic broadcasting: expected to fail imgs = torch.randn(6, 6, 6, 6, names=('N', 'C', 'H', 'W')) per_batch_scale = torch.rand(6, names=('N',)) catch_error(lambda: imgs * per_batch_scale) -# Explicit broadcasting: the names check out and the more refined names are propagated. +###################################################################### +# How `should` we perform this broadcasting operation along the first +# dimension? One way, involving names, would be to explicitly initialize +# the ``per_batch_scale`` tensor as four dimensional, and give it names +# (such as ``('N', None, None, None)`` below) so that name inference will +# work. + imgs = torch.randn(6, 6, 6, 6, names=('N', 'C', 'H', 'W')) per_batch_scale_4d = torch.rand(6, 1, 1, 1, names=('N', None, None, None)) print((imgs * per_batch_scale_4d).names) ###################################################################### -# Matrix multiply -# --------------- -# -# `torch.mm(A, B)`` performs a dot product between the second dim of `A` -# and the first dim of `B`, returning a tensor with the first dim of `A` -# and the second dim of `B`. (the other matmul functions, such as `torch.matmul`, -# `torch.mv`, `torch.dot`, behave similarly). - -markov_states = torch.randn(128, 5, names=('batch', 'D')) -transition_matrix = torch.randn(5, 5, names=('in', 'out')) - -# Apply one transition -new_state = markov_states @ transition_matrix -print(new_state.names) - -###################################################################### -# Inherently, matrix multiply does not check if the contracted dimensions -# have the same name. +# However, named tensors enable an even better way, which we'll cover next. # # New behavior: Explicit broadcasting by names # -------------------------------------------- @@ -316,13 +327,13 @@ def fn(x): catch_error(lambda: fn(imgs_named)) ###################################################################### -# As a workaround, please drop names via `tensor = tensor.rename(None)` +# As a workaround, please drop names via ``tensor = tensor.rename(None)`` # before using anything that does not yet support named tensors. # # Longer example: Multi-headed attention # -------------------------------------- # -# Now we'll go through a complete example of implementing a common advanced +# Now we'll go through a complete example of implementing a common # PyTorch ``nn.Module``: multi-headed attention. We assume the reader is already # familiar with multi-headed attention; for a refresher, check out # `this explanation `_. @@ -378,12 +389,9 @@ def prepare_head(tensor): return (tensor.unflatten('D', [('H', n_heads), ('D_head', dim_per_head)]) .align_to(..., 'H', 'T', 'D_head')) + assert value is None if self_attn: key = value = query - elif value is None: - # key and value are the same, but query differs - key = key.refine_names(..., 'T', 'D') - value = key key_len = key.size('T') dim = key.size('D') @@ -396,9 +404,7 @@ def prepare_head(tensor): dot_prod.refine_names(..., 'H', 'T', 'T_key') # just a check # (III) - # Named tensors doesn't support `==` yet; the following is a workaround. - attn_mask = (mask.rename(None) == 0).refine_names(*mask.names) - attn_mask = attn_mask.align_as(dot_prod) + attn_mask = (attn_mask == 0).align_as(dot_prod) dot_prod.masked_fill_(attn_mask, -float(1e20)) attn_weights = self.attn_dropout(F.softmax(dot_prod / scale, dim='T_key')) @@ -422,7 +428,7 @@ def forward(self, query, key=None, value=None, mask=None): ###################################################################### # The ``query = query.refine_names(..., 'T', 'D')`` serves as enforcable documentation # and lifts input dimensions to being named. It checks that the last two dimensions -# can be refined to `['T', 'D']`, preventing potentially silent or confusing size +# can be refined to ``['T', 'D']``, preventing potentially silent or confusing size # mismatch errors later down the line. # # **(II) Manipulating dimensions in ``prepare_head``** @@ -435,13 +441,13 @@ def prepare_head(tensor): ###################################################################### # The first thing to note is how the code clearly states the input and -# output dimensions: the input tensor must end with the `T` and `D` dims -# and the output tensor ends in `H`, `T`, and `D_head` dims. +# output dimensions: the input tensor must end with the ``T`` and ``D`` dims +# and the output tensor ends in ``H``, ``T``, and ``D_head`` dims. # # The second thing to note is how clearly the code describes what is going on. # prepare_head takes the key, query, and value and splits the embedding dim into -# multiple heads, finally rearranging the dim order to be `[..., 'H', 'T', 'D_head']`. -# ParlAI implements prepare_head as the following, using `view` and `transpose` +# multiple heads, finally rearranging the dim order to be ``[..., 'H', 'T', 'D_head']``. +# ParlAI implements ``prepare_head`` as the following, using ``view`` and ``transpose`` # operations: # # **(III) Explicit broadcasting by names** @@ -460,18 +466,14 @@ def prepare_head(tensor): ###################################################################### # Our named tensor variant uses ops that, though more verbose, also have -# more semantic meaning than `view` and `transpose` and include enforcable +# more semantic meaning than ``view`` and ``transpose`` and include enforcable # documentation in the form of names. # # **(III) Explicit broadcasting by names** def ignore(): # (III) - # Named tensors doesn't support == yet; the following is a workaround. - attn_mask = (mask.renamed(None) == 0).refine_names(*mask.names) - - # recall that we had dot_prod.refine_names(..., 'H', 'T', 'T_key') - attn_mask = attn_mask.align_as(dot_prod) + attn_mask = (attn_mask == 0).align_as(dot_prod) dot_prod.masked_fill_(attn_mask, -float(1e20)) @@ -479,11 +481,10 @@ def ignore(): # ``mask`` usually has dims ``[N, T]`` (in the case of self attention) or # ``[N, T, T_key]`` (in the case of encoder attention) while ``dot_prod`` # has dims ``[N, H, T, T_key]``. To make ``mask`` broadcast correctly with -# ``dot_prod``, we would usually `unsqueeze` dims `1` and `-1` in the case of self -# attention or `unsqueeze` dim `1` in the case of encoder attention. Using -# named tensors, we can simply align the two tensors and stop worrying about -# where to unsqueeze` dims. Using named tensors, we simply align `attn_mask` -# to `dot_prod` using `align_as` and stop worrying about where to `unsqueeze` dims. +# ``dot_prod``, we would usually `unsqueeze` dims ``1`` and ``-1`` in the case of self +# attention or ``unsqueeze`` dim ``1`` in the case of encoder attention. Using +# named tensors, we simply align ``attn_mask`` to ``dot_prod`` using ``align_as`` +# and stop worrying about where to ``unsqueeze`` dims. # # **(IV) More dimension manipulation using ``align_to`` and ``flatten``** @@ -496,7 +497,7 @@ def ignore(): ) ###################################################################### -# (IV): Like (II), `align_to` and `flatten` are more semantically +# (IV): Like (II), ``align_to`` and ``flatten`` are more semantically # meaningful than `view` (despite being more verbose). # # Running the example From 91ccc857aa0f5ca2c1c324d6f9d3556adc58b234 Mon Sep 17 00:00:00 2001 From: Seth Weidman Date: Tue, 8 Oct 2019 13:57:48 -0700 Subject: [PATCH 06/12] Final formatting fixes, addressing comments on flow and wording --- intermediate_source/named_tensor_tutorial.py | 116 ++++++++++--------- 1 file changed, 61 insertions(+), 55 deletions(-) diff --git a/intermediate_source/named_tensor_tutorial.py b/intermediate_source/named_tensor_tutorial.py index a50e4f3d2fd..eed189293fc 100644 --- a/intermediate_source/named_tensor_tutorial.py +++ b/intermediate_source/named_tensor_tutorial.py @@ -62,7 +62,7 @@ imgs.names = ['batch', 'channel', 'width', 'height'] print(imgs.names) -# Method #2: specify new names: +# Method #2: specify new names (note: this changes names out-of-place) imgs = imgs.rename(channel='C', width='W', height='H') print(imgs.names) @@ -88,7 +88,7 @@ ###################################################################### # Because named tensors coexist with unnamed tensors, we need a nice way to -# write named-tensor-aware code that works with both named and unnamed tensors. +# write named tensor-aware code that works with both named and unnamed tensors. # Use ``tensor.refine_names(*names)`` to refine dimensions and lift unnamed dims # to named dims. Refining a dimension is defined as a "rename" with the following # constraints: @@ -97,8 +97,7 @@ # - A named dim can only be refined to have the same name. imgs = torch.randn(3, 1, 1, 2) -imgs = imgs.refine_names('N', 'C', 'H', 'W') -print(imgs.names) +print(imgs.refine_names('N', 'C', 'H', 'W')) # Coerces the last two dims to 'H' and 'W'. In Python 2, use the string '...' instead of ... print(imgs.refine_names(..., 'H', 'W').names) @@ -116,34 +115,12 @@ def catch_error(fn): # Most simple operations propagate names. The ultimate goal for named tensors is # for all operations to propagate names in a reasonable, intuitive manner. Many # common operations have been added at the time of the 1.3 release; here, -# for example, is `.abs()`: +# for example, is ``.abs()``: named_imgs = imgs.refine_names('N', 'C', 'H', 'W') print(named_imgs.abs().names) ###################################################################### -# Speaking of operations propogating names, let's quickly cover one -# of the most important operations in PyTorch. -# -# Matrix multiply -# --------------- -# -# ``torch.mm(A, B)`` performs a dot product between the second dim of `A` -# and the first dim of `B`, returning a tensor with the first dim of `A` -# and the second dim of `B`. (the other matmul functions, such as ``torch.matmul``, -# ``torch.mv``, ``torch.dot``, behave similarly). - -markov_states = torch.randn(128, 5, names=('batch', 'D')) -transition_matrix = torch.randn(5, 5, names=('in', 'out')) - -# Apply one transition -new_state = markov_states @ transition_matrix -print(new_state.names) - -###################################################################### -# As you can see, matrix multiply does not check if the contracted dimensions -# have the same name. -# # Accessors and Reduction # ----------------------- # @@ -179,23 +156,23 @@ def catch_error(fn): ###################################################################### # **Check names**: first, we will check whether the names of these two tensors # match. Two names match if and only if they are equal (string equality) or at -# least one is ``None`` (``None``s are essentially a special wildcard name). -# The only one of these three that will error, therefore, is ``x+z``: +# least one is ``None`` (``None`` is essentially a special wildcard name). +# The only one of these three that will error, therefore, is ``x + z``: catch_error(lambda: x + z) ###################################################################### -# **Propagate names**: _unify_ the two names by returning the most refined name of -# the two. With `x + y, `X` is more refined than `None`. +# **Propagate names**: `unify` the two names by returning the most refined name of +# the two. With ``x + y``, ``X`` is more refined than ``None``. print((x + y).names) ###################################################################### # Most name inference rules are straightforward but some of them can have -# unexpected semantics. Let's go through a few more of them. +# unexpected semantics. Let's go through a couple you're likely to encounter: +# broadcasting and matrix multiply. # -# Broadcasting -# ------------ +# **Broadcasting** # # Named tensors do not change broadcasting behavior; they still broadcast by # position. However, when checking two dimensions for if they can be @@ -203,7 +180,7 @@ def catch_error(fn): # # Furthermore, broadcasting with named tensors can prevent incorrect behavior. # The following code will error, whereas without `names` it would add -# `per_batch_scale` to the last dimension of `imgs`. +# ``per_batch_scale`` to the last dimension of ``imgs``. # Automatic broadcasting: expected to fail imgs = torch.randn(6, 6, 6, 6, names=('N', 'C', 'H', 'W')) @@ -212,17 +189,37 @@ def catch_error(fn): ###################################################################### # How `should` we perform this broadcasting operation along the first -# dimension? One way, involving names, would be to explicitly initialize -# the ``per_batch_scale`` tensor as four dimensional, and give it names -# (such as ``('N', None, None, None)`` below) so that name inference will -# work. +# dimension? One way, involving names, would be to name the ``per_batch_scale`` +# tensor such that it matches ``imgs.names``, as shown below. imgs = torch.randn(6, 6, 6, 6, names=('N', 'C', 'H', 'W')) -per_batch_scale_4d = torch.rand(6, 1, 1, 1, names=('N', None, None, None)) +per_batch_scale_4d = torch.rand(6, 1, 1, 1, names=('N', 'C', 'H', 'W')) print((imgs * per_batch_scale_4d).names) ###################################################################### -# However, named tensors enable an even better way, which we'll cover next. +# Another way would be to use the new explicit broadcasting by names +# functionality, covered below. +# +# **Matrix multiply** +# +# ``torch.mm(A, B)`` performs a dot product between the second dim of ``A`` +# and the first dim of ``B``, returning a tensor with the first dim of ``A`` +# and the second dim of ``B``. (the other matmul functions, such as ``torch.matmul``, +# ``torch.mv``, ``torch.dot``, behave similarly). + +markov_states = torch.randn(128, 5, names=('batch', 'D')) +transition_matrix = torch.randn(5, 5, names=('in', 'out')) + +# Apply one transition +new_state = markov_states @ transition_matrix +print(new_state.names) + +###################################################################### +# As you can see, matrix multiply does not check if the contracted dimensions +# have the same name. +# +# Next, we'll cover two new behaviors that named tensors enable: explicit +# broadcasting by names. # # New behavior: Explicit broadcasting by names # -------------------------------------------- @@ -250,6 +247,7 @@ def catch_error(fn): per_batch_scale = per_batch_scale.refine_names('N') named_result = imgs * per_batch_scale.align_as(imgs) +# note: named tensors do not yet work with allclose assert torch.allclose(named_result.rename(None), correct_result) ###################################################################### @@ -307,14 +305,12 @@ def catch_error(fn): # ------------------------------------------ # # See here (link to be included) for a detailed breakdown of what is -# supported with the 1.3 release, what is on the roadmap to be supported soon, -# and what will be supported in the future but not soon. +# supported with the 1.3 release. # # In particular, we want to call out three important features that are not # currently supported: # -# - Retaining names when serializing or loading a serialized ``Tensor`` via -# ``torch.save`` +# - Saving or loading named tensors via ``torch.save`` or ``torch.load`` # - Multi-processing via ``torch.multiprocessing`` # - JIT support; for example, the following will error @@ -330,15 +326,15 @@ def fn(x): # As a workaround, please drop names via ``tensor = tensor.rename(None)`` # before using anything that does not yet support named tensors. # -# Longer example: Multi-headed attention +# Longer example: Multi-head attention # -------------------------------------- # # Now we'll go through a complete example of implementing a common -# PyTorch ``nn.Module``: multi-headed attention. We assume the reader is already -# familiar with multi-headed attention; for a refresher, check out +# PyTorch ``nn.Module``: multi-head attention. We assume the reader is already +# familiar with multi-head attention; for a refresher, check out # `this explanation `_. # -# We adapt the implementation of multi-headed attention from +# We adapt the implementation of multi-head attention from # `ParlAI `_; specifically # `here `_. # Read through the code at that example; then, compare with the code below, @@ -431,7 +427,7 @@ def forward(self, query, key=None, value=None, mask=None): # can be refined to ``['T', 'D']``, preventing potentially silent or confusing size # mismatch errors later down the line. # -# **(II) Manipulating dimensions in ``prepare_head``** +# **(II) Manipulating dimensions in prepare_head** # (II) def prepare_head(tensor): @@ -449,8 +445,6 @@ def prepare_head(tensor): # multiple heads, finally rearranging the dim order to be ``[..., 'H', 'T', 'D_head']``. # ParlAI implements ``prepare_head`` as the following, using ``view`` and ``transpose`` # operations: -# -# **(III) Explicit broadcasting by names** def prepare_head(tensor): # input is [batch_size, seq_len, n_heads * dim_per_head] @@ -486,7 +480,7 @@ def ignore(): # named tensors, we simply align ``attn_mask`` to ``dot_prod`` using ``align_as`` # and stop worrying about where to ``unsqueeze`` dims. # -# **(IV) More dimension manipulation using ``align_to`` and ``flatten``** +# **(IV) More dimension manipulation using align_to and flatten** def ignore(): # (IV) @@ -497,8 +491,8 @@ def ignore(): ) ###################################################################### -# (IV): Like (II), ``align_to`` and ``flatten`` are more semantically -# meaningful than `view` (despite being more verbose). +# Here, as in (II), ``align_to`` and ``flatten`` are more semantically +# meaningful than ``view`` (despite being more verbose). # # Running the example # ------------------- @@ -508,6 +502,18 @@ def ignore(): mask = torch.ones(n, t, names=('N', 'T')) attn = MultiHeadAttention(h, d) output = attn(query, mask=mask) +# works as expected! +print(output.names) + +###################################################################### +# The above works as expected. Furthermore, note that in the code we +# did not mention the name of the batch dimension at all. In fact, +# the code is agnostic to the existence of the batch dimensions, so +# that we can run the following example-level code: + +query = torch.randn(t, d, names=('T', 'D')) +mask = torch.ones(t, names=('T',)) +output = attn(query, mask=mask) print(output.names) ###################################################################### From 7bfccfac66eab116e7b001a801f9bb99e60ebb4c Mon Sep 17 00:00:00 2001 From: Seth Weidman Date: Wed, 9 Oct 2019 10:41:03 -0700 Subject: [PATCH 07/12] Formatting, wording fixes --- index.rst | 4 +-- intermediate_source/named_tensor_tutorial.py | 33 +++++++++++--------- 2 files changed, 21 insertions(+), 16 deletions(-) diff --git a/index.rst b/index.rst index c74f0c517a3..025500b0a78 100644 --- a/index.rst +++ b/index.rst @@ -98,7 +98,7 @@ Audio
-Named Tensor +(experimental) Named Tensor ---------------------- .. customgalleryitem:: @@ -302,7 +302,7 @@ PyTorch Fundamentals In-Depth :maxdepth: 2 :includehidden: :hidden: - :caption: Named Tensor + :caption: (experimental) Named Tensor intermediate/named_tensor_tutorial diff --git a/intermediate_source/named_tensor_tutorial.py b/intermediate_source/named_tensor_tutorial.py index eed189293fc..cfc71ca1403 100644 --- a/intermediate_source/named_tensor_tutorial.py +++ b/intermediate_source/named_tensor_tutorial.py @@ -4,8 +4,6 @@ ******************************************************* **Author**: `Richard Zou `_ -**Editor**: `Seth Weidman `_ - Named Tensors aim to make tensors easier to use by allowing users to associate explicit names with tensor dimensions. In most cases, operations that take dimension parameters will accept dimension names, avoiding the need to track dimensions by position. In addition, named tensors @@ -103,10 +101,11 @@ print(imgs.refine_names(..., 'H', 'W').names) def catch_error(fn): - try: - fn() - except RuntimeError as err: - print(err) + fn() + assert False + +# Actually name 'imgs' using 'refine_names' +imgs = imgs.refine_names('N', 'C', 'H', 'W') # Tried to refine an existing name to a different name catch_error(lambda: imgs.refine_names('batch', 'channel', 'height', 'width')) @@ -129,7 +128,7 @@ def catch_error(fn): # advanced) has not been implemented yet but is on the roadmap. Using the ``named_imgs`` # tensor from above, we can do: -output = named_imgs.sum(['C']) # Perform a sum over the channel dimension +output = named_imgs.sum('C') # Perform a sum over the channel dimension print(output.names) img0 = named_imgs.select('N', 0) # get one image @@ -172,7 +171,8 @@ def catch_error(fn): # unexpected semantics. Let's go through a couple you're likely to encounter: # broadcasting and matrix multiply. # -# **Broadcasting** +# Broadcasting +# ^^^^^^^^^^^^ # # Named tensors do not change broadcasting behavior; they still broadcast by # position. However, when checking two dimensions for if they can be @@ -200,7 +200,8 @@ def catch_error(fn): # Another way would be to use the new explicit broadcasting by names # functionality, covered below. # -# **Matrix multiply** +# Matrix multiply +# ^^^^^^^^^^^^^^^ # # ``torch.mm(A, B)`` performs a dot product between the second dim of ``A`` # and the first dim of ``B``, returning a tensor with the first dim of ``A`` @@ -219,7 +220,7 @@ def catch_error(fn): # have the same name. # # Next, we'll cover two new behaviors that named tensors enable: explicit -# broadcasting by names. +# broadcasting by names and flattening and unflattening dimensions by names # # New behavior: Explicit broadcasting by names # -------------------------------------------- @@ -388,6 +389,10 @@ def prepare_head(tensor): assert value is None if self_attn: key = value = query + elif value is None: + # key and value are the same, but query differs + key = key.refine_names(..., 'T', 'D') + value = key key_len = key.size('T') dim = key.size('D') @@ -400,7 +405,7 @@ def prepare_head(tensor): dot_prod.refine_names(..., 'H', 'T', 'T_key') # just a check # (III) - attn_mask = (attn_mask == 0).align_as(dot_prod) + attn_mask = (mask == 0).align_as(dot_prod) dot_prod.masked_fill_(attn_mask, -float(1e20)) attn_weights = self.attn_dropout(F.softmax(dot_prod / scale, dim='T_key')) @@ -467,7 +472,7 @@ def prepare_head(tensor): def ignore(): # (III) - attn_mask = (attn_mask == 0).align_as(dot_prod) + attn_mask = (mask == 0).align_as(dot_prod) dot_prod.masked_fill_(attn_mask, -float(1e20)) @@ -508,8 +513,8 @@ def ignore(): ###################################################################### # The above works as expected. Furthermore, note that in the code we # did not mention the name of the batch dimension at all. In fact, -# the code is agnostic to the existence of the batch dimensions, so -# that we can run the following example-level code: +# our ``MultiHeadAttention`` module is agnostic to the existence of batch +# dimensions. query = torch.randn(t, d, names=('T', 'D')) mask = torch.ones(t, names=('T',)) From 9d5c022d00ae5b0e4e864e951b67f729cdfa14ce Mon Sep 17 00:00:00 2001 From: Richard Zou Date: Wed, 9 Oct 2019 14:27:47 -0700 Subject: [PATCH 08/12] Fix typos --- intermediate_source/named_tensor_tutorial.py | 1073 +++++++++--------- 1 file changed, 543 insertions(+), 530 deletions(-) diff --git a/intermediate_source/named_tensor_tutorial.py b/intermediate_source/named_tensor_tutorial.py index cfc71ca1403..35a8f3e20f9 100644 --- a/intermediate_source/named_tensor_tutorial.py +++ b/intermediate_source/named_tensor_tutorial.py @@ -1,530 +1,543 @@ -# -*- coding: utf-8 -*- -""" -(experimental) Introduction to Named Tensors in PyTorch -******************************************************* -**Author**: `Richard Zou `_ - -Named Tensors aim to make tensors easier to use by allowing users to associate explicit names -with tensor dimensions. In most cases, operations that take dimension parameters will accept -dimension names, avoiding the need to track dimensions by position. In addition, named tensors -use names to automatically check that APIs are being used correctly at runtime, providing extra -safety. Names can also be used to rearrange dimensions, for example, to support -"broadcasting by name" rather than "broadcasting by position". - -This tutorial is intended as a guide to the functionality that will -be included with the 1.3 launch. By the end of it, you will be able to: - -- Initiate a ``Tensor`` with named dimensions, as well as removing or renmaing those dimensions -- Understand the basics of how dimension names are propagated through operations -- See how naming dimensions enables clearer code in two key areas: - - Broadcasting operations - - Flattening and unflattening dimensions - -Finally, we'll put this into practice by writing a multi-head attention module -using named tensors. - -Named tensors in PyTorch are inspired by and done in collaboration with -`Sasha Rush `_. -The original idea and proof of concept were proposed in his -`January 2019 blog post `_. - -Basics: named dimensions -======================== - -PyTorch now allows Tensors to have named dimensions; factory functions -now take a new `names` argument that associates a name with each dimension. -This works with most factory functions, such as - -- `tensor` -- `empty` -- `ones` -- `zeros` -- `randn` -- `rand` - -Here we construct a tensor with names: -""" - -import torch -imgs = torch.randn(1, 2, 2, 3 , names=('N', 'C', 'H', 'W')) -print(imgs.names) - -###################################################################### -# Unlike in -# `the original named tensors blogpost `_, -# named dimensions are ordered: ``tensor.names[i]`` is the name of the ``i`` th dimension of ``tensor``. -# -# There are two ways rename a ``Tensor``'s dimensions: - -# Method #1: set .names attribute -imgs.names = ['batch', 'channel', 'width', 'height'] -print(imgs.names) - -# Method #2: specify new names (note: this changes names out-of-place) -imgs = imgs.rename(channel='C', width='W', height='H') -print(imgs.names) - -###################################################################### -# The preferred way to remove names is to call ``tensor.rename(None)``: - -imgs = imgs.rename(None) -print(imgs.names) - -###################################################################### -# Unnamed tensors (tensors with no named dimensions) still work as normal and do -# not have names in their ``repr``. - -unnamed = torch.randn(2, 1, 3) -print(unnamed) -print(unnamed.names) - -###################################################################### -# Named tensors do not require that all dimensions be named. - -imgs = torch.randn(3, 1, 1, 2, names=('N', None, None, None)) -print(imgs.names) - -###################################################################### -# Because named tensors coexist with unnamed tensors, we need a nice way to -# write named tensor-aware code that works with both named and unnamed tensors. -# Use ``tensor.refine_names(*names)`` to refine dimensions and lift unnamed dims -# to named dims. Refining a dimension is defined as a "rename" with the following -# constraints: -# -# - A ``None`` dim can be refined to have any name -# - A named dim can only be refined to have the same name. - -imgs = torch.randn(3, 1, 1, 2) -print(imgs.refine_names('N', 'C', 'H', 'W')) - -# Coerces the last two dims to 'H' and 'W'. In Python 2, use the string '...' instead of ... -print(imgs.refine_names(..., 'H', 'W').names) - -def catch_error(fn): - fn() - assert False - -# Actually name 'imgs' using 'refine_names' -imgs = imgs.refine_names('N', 'C', 'H', 'W') - -# Tried to refine an existing name to a different name -catch_error(lambda: imgs.refine_names('batch', 'channel', 'height', 'width')) - -###################################################################### -# Most simple operations propagate names. The ultimate goal for named tensors is -# for all operations to propagate names in a reasonable, intuitive manner. Many -# common operations have been added at the time of the 1.3 release; here, -# for example, is ``.abs()``: - -named_imgs = imgs.refine_names('N', 'C', 'H', 'W') -print(named_imgs.abs().names) - -###################################################################### -# Accessors and Reduction -# ----------------------- -# -# One can use dimension names to refer to dimensions instead of the positional -# dimension. These operations also propagate names. Indexing (basic and -# advanced) has not been implemented yet but is on the roadmap. Using the ``named_imgs`` -# tensor from above, we can do: - -output = named_imgs.sum('C') # Perform a sum over the channel dimension -print(output.names) - -img0 = named_imgs.select('N', 0) # get one image -print(img0.names) - -###################################################################### -# Name inference -# -------------- -# -# Names are propagated on operations in a two step process called **name inference**. -# The two steps are: -# -# 1. **Check names**: an operator may perform automatic checks at runtime that -# check that certain dimension names must match. -# 2. **Propagate names**: name inference propagates output names to output tensors. -# -# Let's go through the very small example of adding 2 one-dim tensors with no -# broadcasting. - -x = torch.randn(3, names=('X',)) -y = torch.randn(3) -z = torch.randn(3, names=('Z',)) - -###################################################################### -# **Check names**: first, we will check whether the names of these two tensors -# match. Two names match if and only if they are equal (string equality) or at -# least one is ``None`` (``None`` is essentially a special wildcard name). -# The only one of these three that will error, therefore, is ``x + z``: - -catch_error(lambda: x + z) - -###################################################################### -# **Propagate names**: `unify` the two names by returning the most refined name of -# the two. With ``x + y``, ``X`` is more refined than ``None``. - -print((x + y).names) - -###################################################################### -# Most name inference rules are straightforward but some of them can have -# unexpected semantics. Let's go through a couple you're likely to encounter: -# broadcasting and matrix multiply. -# -# Broadcasting -# ^^^^^^^^^^^^ -# -# Named tensors do not change broadcasting behavior; they still broadcast by -# position. However, when checking two dimensions for if they can be -# broadcasted, the names of those dimensions must match. -# -# Furthermore, broadcasting with named tensors can prevent incorrect behavior. -# The following code will error, whereas without `names` it would add -# ``per_batch_scale`` to the last dimension of ``imgs``. - -# Automatic broadcasting: expected to fail -imgs = torch.randn(6, 6, 6, 6, names=('N', 'C', 'H', 'W')) -per_batch_scale = torch.rand(6, names=('N',)) -catch_error(lambda: imgs * per_batch_scale) - -###################################################################### -# How `should` we perform this broadcasting operation along the first -# dimension? One way, involving names, would be to name the ``per_batch_scale`` -# tensor such that it matches ``imgs.names``, as shown below. - -imgs = torch.randn(6, 6, 6, 6, names=('N', 'C', 'H', 'W')) -per_batch_scale_4d = torch.rand(6, 1, 1, 1, names=('N', 'C', 'H', 'W')) -print((imgs * per_batch_scale_4d).names) - -###################################################################### -# Another way would be to use the new explicit broadcasting by names -# functionality, covered below. -# -# Matrix multiply -# ^^^^^^^^^^^^^^^ -# -# ``torch.mm(A, B)`` performs a dot product between the second dim of ``A`` -# and the first dim of ``B``, returning a tensor with the first dim of ``A`` -# and the second dim of ``B``. (the other matmul functions, such as ``torch.matmul``, -# ``torch.mv``, ``torch.dot``, behave similarly). - -markov_states = torch.randn(128, 5, names=('batch', 'D')) -transition_matrix = torch.randn(5, 5, names=('in', 'out')) - -# Apply one transition -new_state = markov_states @ transition_matrix -print(new_state.names) - -###################################################################### -# As you can see, matrix multiply does not check if the contracted dimensions -# have the same name. -# -# Next, we'll cover two new behaviors that named tensors enable: explicit -# broadcasting by names and flattening and unflattening dimensions by names -# -# New behavior: Explicit broadcasting by names -# -------------------------------------------- -# -# One of the main complaints about working with multiple dimensions is the need -# to ``unsqueeze`` "dummy" dimensions so that operations can occur. For example, in -# our per-batch-scale example before, with unnamed tensors we'd do the -# following: - -imgs = torch.randn(2, 2, 2, 2) # N, C, H, W -per_batch_scale = torch.rand(2) # N - -correct_result = imgs * per_batch_scale.view(2, 1, 1, 1) # N, C, H, W -incorrect_result = imgs * per_batch_scale.expand_as(imgs) -assert not torch.allclose(correct_result, incorrect_result) - -###################################################################### -# We can make these operations safer (and easily agnostic to the number of -# dimensions) by using names. We provide a new ``tensor.align_as(other)`` operation -# that permutes the dimensions of tensor to match the order specified in -# ``other.names``, adding one-sized dimensions where appropriate -# (``tensor.align_to(*names)`` works as well): - -imgs = imgs.refine_names('N', 'C', 'H', 'W') -per_batch_scale = per_batch_scale.refine_names('N') - -named_result = imgs * per_batch_scale.align_as(imgs) -# note: named tensors do not yet work with allclose -assert torch.allclose(named_result.rename(None), correct_result) - -###################################################################### -# New behavior: Flattening and unflattening dimensions by names -# ------------------------------------------------------------- -# -# One common operation is flattening and unflattening dimensions. Right now, -# users perform this using either ``view``, ``reshape``, or ``flatten``; use -# cases include flattening batch dimensions to send tensors into operators that -# must take inputs with a certain number of dimensions (i.e., conv2d takes 4D input). -# -# To make these operation more semantically meaningful than view or reshape, we -# introduce a new ``tensor.unflatten(dim, namedshape)`` method and update -# ``flatten`` to work with names: ``tensor.flatten(dims, new_dim)``. -# -# ``flatten`` can only flatten adjacent dimensions but also works on -# non-contiguous dims. One must pass into ``unflatten`` a **named shape**, which -# is a list of ``(dim, size)`` tuples, to specify how to unflatten the dim. It -# is possible to save the sizes during a ``flatten`` for ``unflatten`` but we -# do not yet do that. - -imgs = imgs.flatten(['C', 'H', 'W'], 'features') -print(imgs.names) - -imgs = imgs.unflatten('features', (('C', 2), ('H', 2), ('W', 2))) -print(imgs.names) - -###################################################################### -# Autograd support -# ---------------- -# -# Autograd currently supports named tensors in a limited manner: autograd -# ignores names on all tensors. Gradient computation is still correct but we -# lose the safety that names give us. It is on the roadmap to introduce handling -# of names to autograd. - -x = torch.randn(3, names=('D',)) -weight = torch.randn(3, names=('D',), requires_grad=True) -loss = (x - weight).abs() -grad_loss = torch.randn(3) -loss.backward(grad_loss) - -print(weight.grad) # Unnamed for now. Will be named in the future - -weight.grad.zero_() -grad_loss = grad_loss.refine_names('C') -loss = (x - weight).abs() -# Ideally we'd check that the names of loss and grad_loss match but we don't yet. -loss.backward(grad_loss) - -print(weight.grad) # still unnamed - -###################################################################### -# Other supported (and unsupported) features -# ------------------------------------------ -# -# See here (link to be included) for a detailed breakdown of what is -# supported with the 1.3 release. -# -# In particular, we want to call out three important features that are not -# currently supported: -# -# - Saving or loading named tensors via ``torch.save`` or ``torch.load`` -# - Multi-processing via ``torch.multiprocessing`` -# - JIT support; for example, the following will error - -imgs_named = torch.randn(1, 2, 2, 3 , names=('N', 'C', 'H', 'W')) - -@torch.jit.script -def fn(x): - return x - -catch_error(lambda: fn(imgs_named)) - -###################################################################### -# As a workaround, please drop names via ``tensor = tensor.rename(None)`` -# before using anything that does not yet support named tensors. -# -# Longer example: Multi-head attention -# -------------------------------------- -# -# Now we'll go through a complete example of implementing a common -# PyTorch ``nn.Module``: multi-head attention. We assume the reader is already -# familiar with multi-head attention; for a refresher, check out -# `this explanation `_. -# -# We adapt the implementation of multi-head attention from -# `ParlAI `_; specifically -# `here `_. -# Read through the code at that example; then, compare with the code below, -# noting that there are four places labeled (I), (II), (III), and (IV), where -# using named tensors enables more readable code; we will dive into each of these -# after the code block. - -import torch.nn as nn -import torch.nn.functional as F -import math - -class MultiHeadAttention(nn.Module): - def __init__(self, n_heads, dim, dropout=0): - super(MultiHeadAttention, self).__init__() - self.n_heads = n_heads - self.dim = dim - - self.attn_dropout = nn.Dropout(p=dropout) - self.q_lin = nn.Linear(dim, dim) - self.k_lin = nn.Linear(dim, dim) - self.v_lin = nn.Linear(dim, dim) - nn.init.xavier_normal_(self.q_lin.weight) - nn.init.xavier_normal_(self.k_lin.weight) - nn.init.xavier_normal_(self.v_lin.weight) - self.out_lin = nn.Linear(dim, dim) - nn.init.xavier_normal_(self.out_lin.weight) - - def forward(self, query, key=None, value=None, mask=None): - # (I) - query = query.refine_names(..., 'T', 'D') - self_attn = key is None and value is None - if self_attn: - mask = mask.refine_names(..., 'T') - else: - mask = mask.refine_names(..., 'T', 'T_key') # enc attn - - dim = query.size('D') - assert dim == self.dim, \ - f'Dimensions do not match: {embedding_dim} query vs {self.dim} configured' - assert mask is not None, 'Mask is None, please specify a mask' - n_heads = self.n_heads - dim_per_head = dim // n_heads - scale = math.sqrt(dim_per_head) - - # (II) - def prepare_head(tensor): - tensor = tensor.refine_names(..., 'T', 'D') - return (tensor.unflatten('D', [('H', n_heads), ('D_head', dim_per_head)]) - .align_to(..., 'H', 'T', 'D_head')) - - assert value is None - if self_attn: - key = value = query - elif value is None: - # key and value are the same, but query differs - key = key.refine_names(..., 'T', 'D') - value = key - key_len = key.size('T') - dim = key.size('D') - - # Distinguish between query_len (T) and key_len (T_key) dims. - k = prepare_head(self.k_lin(key)).rename(T='T_key') - v = prepare_head(self.v_lin(value)).rename(T='T_key') - q = prepare_head(self.q_lin(query)) - - dot_prod = q.div_(scale).matmul(k.align_to(..., 'D_head', 'T_key')) - dot_prod.refine_names(..., 'H', 'T', 'T_key') # just a check - - # (III) - attn_mask = (mask == 0).align_as(dot_prod) - dot_prod.masked_fill_(attn_mask, -float(1e20)) - - attn_weights = self.attn_dropout(F.softmax(dot_prod / scale, dim='T_key')) - - # (IV) - attentioned = ( - attn_weights.matmul(v).refine_names(..., 'H', 'T', 'D_head') - .align_to(..., 'T', 'H', 'D_head') - .flatten(['H', 'D_head'], 'D') - ) - - return self.out_lin(attentioned).refine_names(..., 'T', 'D') - -###################################################################### -# **(I) Refining the input tensor dims** - -def forward(self, query, key=None, value=None, mask=None): - # (I) - query = query.refine_names(..., 'T', 'D') - -###################################################################### -# The ``query = query.refine_names(..., 'T', 'D')`` serves as enforcable documentation -# and lifts input dimensions to being named. It checks that the last two dimensions -# can be refined to ``['T', 'D']``, preventing potentially silent or confusing size -# mismatch errors later down the line. -# -# **(II) Manipulating dimensions in prepare_head** - -# (II) -def prepare_head(tensor): - tensor = tensor.refine_names(..., 'T', 'D') - return (tensor.unflatten('D', [('H', n_heads), ('D_head', dim_per_head)]) - .align_to(..., 'H', 'T', 'D_head')) - -###################################################################### -# The first thing to note is how the code clearly states the input and -# output dimensions: the input tensor must end with the ``T`` and ``D`` dims -# and the output tensor ends in ``H``, ``T``, and ``D_head`` dims. -# -# The second thing to note is how clearly the code describes what is going on. -# prepare_head takes the key, query, and value and splits the embedding dim into -# multiple heads, finally rearranging the dim order to be ``[..., 'H', 'T', 'D_head']``. -# ParlAI implements ``prepare_head`` as the following, using ``view`` and ``transpose`` -# operations: - -def prepare_head(tensor): - # input is [batch_size, seq_len, n_heads * dim_per_head] - # output is [batch_size * n_heads, seq_len, dim_per_head] - batch_size, seq_len, _ = tensor.size() - tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head) - tensor = ( - tensor.transpose(1, 2) - .contiguous() - .view(batch_size * n_heads, seq_len, dim_per_head) - ) - return tensor - -###################################################################### -# Our named tensor variant uses ops that, though more verbose, also have -# more semantic meaning than ``view`` and ``transpose`` and include enforcable -# documentation in the form of names. -# -# **(III) Explicit broadcasting by names** - -def ignore(): - # (III) - attn_mask = (mask == 0).align_as(dot_prod) - - dot_prod.masked_fill_(attn_mask, -float(1e20)) - -###################################################################### -# ``mask`` usually has dims ``[N, T]`` (in the case of self attention) or -# ``[N, T, T_key]`` (in the case of encoder attention) while ``dot_prod`` -# has dims ``[N, H, T, T_key]``. To make ``mask`` broadcast correctly with -# ``dot_prod``, we would usually `unsqueeze` dims ``1`` and ``-1`` in the case of self -# attention or ``unsqueeze`` dim ``1`` in the case of encoder attention. Using -# named tensors, we simply align ``attn_mask`` to ``dot_prod`` using ``align_as`` -# and stop worrying about where to ``unsqueeze`` dims. -# -# **(IV) More dimension manipulation using align_to and flatten** - -def ignore(): - # (IV) - attentioned = ( - attn_weights.matmul(v).refine_names(..., 'H', 'T', 'D_head') - .align_to(..., 'T', 'H', 'D_head') - .flatten(['H', 'D_head'], 'D') - ) - -###################################################################### -# Here, as in (II), ``align_to`` and ``flatten`` are more semantically -# meaningful than ``view`` (despite being more verbose). -# -# Running the example -# ------------------- - -n, t, d, h = 7, 5, 2 * 3, 3 -query = torch.randn(n, t, d, names=('N', 'T', 'D')) -mask = torch.ones(n, t, names=('N', 'T')) -attn = MultiHeadAttention(h, d) -output = attn(query, mask=mask) -# works as expected! -print(output.names) - -###################################################################### -# The above works as expected. Furthermore, note that in the code we -# did not mention the name of the batch dimension at all. In fact, -# our ``MultiHeadAttention`` module is agnostic to the existence of batch -# dimensions. - -query = torch.randn(t, d, names=('T', 'D')) -mask = torch.ones(t, names=('T',)) -output = attn(query, mask=mask) -print(output.names) - -###################################################################### -# Conclusion -# ---------- -# -# Thank you for reading! Named tensors are still very much in development; -# if you have feedback and/or suggestions for improvement, please let us -# know by creating `an issue `_. +# -*- coding: utf-8 -*- +""" +(experimental) Introduction to Named Tensors in PyTorch +******************************************************* +**Author**: `Richard Zou `_ + +Named Tensors aim to make tensors easier to use by allowing users to associate +explicit names with tensor dimensions. In most cases, operations that take +dimension parameters will accept dimension names, avoiding the need to track +dimensions by position. In addition, named tensors use names to automatically +check that APIs are being used correctly at runtime, providing extra safety. +Names can also be used to rearrange dimensions, for example, to support +"broadcasting by name" rather than "broadcasting by position". + +This tutorial is intended as a guide to the functionality that will +be included with the 1.3 launch. By the end of it, you will be able to: + +- Create ``Tensor``s with named dimensions, as well as remove or rename those + dimensions +- Understand the basics of how operations propagate dimension names +- See how naming dimensions enables clearer code in two key areas: + - Broadcasting operations + - Flattening and unflattening dimensions + +Finally, we'll put this into practice by writing a multi-head attention module +using named tensors. + +Named tensors in PyTorch are inspired by and done in collaboration with +`Sasha Rush `_. +Sasha proposed the original idea and proof of concept in his +`January 2019 blog post `_. + +Basics: named dimensions +======================== + +PyTorch now allows Tensors to have named dimensions; factory functions +take a new `names` argument that associates a name with each dimension. +This works with most factory functions, such as + +- `tensor` +- `empty` +- `ones` +- `zeros` +- `randn` +- `rand` + +Here we construct a tensor with names: +""" + +import torch +imgs = torch.randn(1, 2, 2, 3, names=('N', 'C', 'H', 'W')) +print(imgs.names) + +###################################################################### +# Unlike in +# `the original named tensors blogpost `_, +# named dimensions are ordered: ``tensor.names[i]`` is the name of the ``i`` th +# dimension of ``tensor``. +# +# There are two ways to rename a ``Tensor``'s dimensions: + +# Method #1: set the .names attribute (this changes name in-place) +imgs.names = ['batch', 'channel', 'width', 'height'] +print(imgs.names) + +# Method #2: specify new names (this changes names out-of-place) +imgs = imgs.rename(channel='C', width='W', height='H') +print(imgs.names) + +###################################################################### +# The preferred way to remove names is to call ``tensor.rename(None)``: + +imgs = imgs.rename(None) +print(imgs.names) + +###################################################################### +# Unnamed tensors (tensors with no named dimensions) still work as +# normal and do not have names in their ``repr``. + +unnamed = torch.randn(2, 1, 3) +print(unnamed) +print(unnamed.names) + +###################################################################### +# Named tensors do not require that all dimensions be named. + +imgs = torch.randn(3, 1, 1, 2, names=('N', None, None, None)) +print(imgs.names) + +###################################################################### +# Because named tensors can coexist with unnamed tensors, we need a nice way to +# write named tensor-aware code that works with both named and unnamed tensors. +# Use ``tensor.refine_names(*names)`` to refine dimensions and lift unnamed +# dims to named dims. Refining a dimension is defined as a "rename" with the +# following constraints: +# +# - A ``None`` dim can be refined to have any name +# - A named dim can only be refined to have the same name. + +imgs = torch.randn(3, 1, 1, 2) +named_imgs = imgs.refine_names('N', 'C', 'H', 'W') +print(named_imgs.names) + +# Refine the last two dims to 'H' and 'W'. In Python 2, use the string '...' +# instead of ... +named_imgs = imgs.refine_names(..., 'H', 'W') +print(named_imgs.names) + + +def catch_error(fn): + try: + fn() + assert False + except RuntimeError as err: + err = str(err) + if len(err) > 180: + err = err[:180] + "..." + print(err) + + +named_imgs = imgs.refine_names('N', 'C', 'H', 'W') + +# Tried to refine an existing name to a different name +catch_error(lambda: named_imgs.refine_names('N', 'C', 'H', 'width')) + +###################################################################### +# Most simple operations propagate names. The ultimate goal for named tensors +# is for all operations to propagate names in a reasonable, intuitive manner. +# Support for many common operations has been added at the time of the 1.3 +# release; here, for example, is ``.abs()``: + +print(named_imgs.abs().names) + +###################################################################### +# Accessors and Reduction +# ----------------------- +# +# One can use dimension names to refer to dimensions instead of the positional +# dimension. These operations also propagate names. Indexing (basic and +# advanced) has not been implemented yet but is on the roadmap. Using the +# ``named_imgs`` tensor from above, we can do: + +output = named_imgs.sum('C') # Perform a sum over the channel dimension +print(output.names) + +img0 = named_imgs.select('N', 0) # get one image +print(img0.names) + +###################################################################### +# Name inference +# -------------- +# +# Names are propagated on operations in a two step process called +# **name inference**: +# +# 1. **Check names**: an operator may perform automatic checks at runtime that +# check that certain dimension names must match. +# 2. **Propagate names**: name inference propagates output names to output +# tensors. +# +# Let's go through the very small example of adding 2 one-dim tensors with no +# broadcasting. + +x = torch.randn(3, names=('X',)) +y = torch.randn(3) +z = torch.randn(3, names=('Z',)) + +###################################################################### +# **Check names**: first, we will check whether the names of these two tensors +# *match*. Two names match if and only if they are equal (string equality) or +# at least one is ``None`` (``None`` is essentially a special wildcard name). +# The only one of these three that will error, therefore, is ``x + z``: + +catch_error(lambda: x + z) + +###################################################################### +# **Propagate names**: *unify* the two names by returning the most refined name +# of the two. With ``x + y``, ``X`` is more refined than ``None``. + +print((x + y).names) + +###################################################################### +# Most name inference rules are straightforward but some of them can have +# unexpected semantics. Let's go through a couple you're likely to encounter: +# broadcasting and matrix multiply. +# +# Broadcasting +# ^^^^^^^^^^^^ +# +# Named tensors do not change broadcasting behavior; they still broadcast by +# position. However, when checking two dimensions for if they can be +# broadcasted, PyTorch also checks that the names of those dimensions match. +# +# This results in named tensors preventing unintended alignment during +# operations that broadcast. Without ``names``, ``per_batch_scale`` would be +# aligned with the last dimension of ``imgs``, which is what we intended. + +imgs = torch.randn(2, 2, 2, 2, names=('N', 'C', 'H', 'W')) +per_batch_scale = torch.rand(2, names=('N',)) +catch_error(lambda: imgs * per_batch_scale) + +###################################################################### +# However, we really wanted to perform the operation by aligning +# ``per_batch_scale`` with the batch dimension of ``imgs``. +# See the new explicit broadcasting by names functionality for how to +# align tensors by name, covered below. +# +# Matrix multiply +# ^^^^^^^^^^^^^^^ +# +# ``torch.mm(A, B)`` performs a dot product between the second dim of ``A`` +# and the first dim of ``B``, returning a tensor with the first dim of ``A`` +# and the second dim of ``B``. (other matmul functions, such as +# ``torch.matmul``, ``torch.mv``, and ``torch.dot``, behave similarly). + +markov_states = torch.randn(128, 5, names=('batch', 'D')) +transition_matrix = torch.randn(5, 5, names=('in', 'out')) + +# Apply one transition +new_state = markov_states @ transition_matrix +print(new_state.names) + +###################################################################### +# As you can see, matrix multiply does not check if the contracted dimensions +# have the same name. +# +# Next, we'll cover two new behaviors that named tensors enable: explicit +# broadcasting by names and flattening and unflattening dimensions by names +# +# New behavior: Explicit broadcasting by names +# -------------------------------------------- +# +# One of the main complaints about working with multiple dimensions is the need +# to ``unsqueeze`` "dummy" dimensions so that operations can occur. +# For example, in our per-batch-scale example before, with unnamed tensors +# we'd do the following: + +imgs = torch.randn(2, 2, 2, 2) # N, C, H, W +per_batch_scale = torch.rand(2) # N + +correct_result = imgs * per_batch_scale.view(2, 1, 1, 1) # N, C, H, W +incorrect_result = imgs * per_batch_scale.expand_as(imgs) +assert not torch.allclose(correct_result, incorrect_result) + +###################################################################### +# We can make these operations safer (and easily agnostic to the number of +# dimensions) by using names. We provide a new ``tensor.align_as(other)`` +# operation that permutes the dimensions of tensor to match the order specified +# in ``other.names``, adding one-sized dimensions where appropriate +# (``tensor.align_to(*names)`` works as well): + +imgs = imgs.refine_names('N', 'C', 'H', 'W') +per_batch_scale = per_batch_scale.refine_names('N') + +named_result = imgs * per_batch_scale.align_as(imgs) +# note: named tensors do not yet work with allclose +assert torch.allclose(named_result.rename(None), correct_result) + +###################################################################### +# New behavior: Flattening and unflattening dimensions by names +# ------------------------------------------------------------- +# +# One common operation is flattening and unflattening dimensions. Right now, +# users perform this using either ``view``, ``reshape``, or ``flatten``; use +# cases include flattening batch dimensions to send tensors into operators that +# must take inputs with a certain number of dimensions (i.e., conv2d takes 4D +# input). +# +# To make these operation more semantically meaningful than view or reshape, we +# introduce a new ``tensor.unflatten(dim, namedshape)`` method and update +# ``flatten`` to work with names: ``tensor.flatten(dims, new_dim)``. +# +# ``flatten`` can only flatten adjacent dimensions but also works on +# non-contiguous dims. One must pass into ``unflatten`` a **named shape**, +# which is a list of ``(dim, size)`` tuples, to specify how to unflatten the +# dim. It is possible to save the sizes during a ``flatten`` for ``unflatten`` +# but we do not yet do that. + +imgs = imgs.flatten(['C', 'H', 'W'], 'features') +print(imgs.names) + +imgs = imgs.unflatten('features', (('C', 2), ('H', 2), ('W', 2))) +print(imgs.names) + +###################################################################### +# Autograd support +# ---------------- +# +# Autograd currently supports named tensors in a limited manner: autograd +# ignores names on all tensors. Gradient computation is still correct but we +# lose the safety that names give us. It is on the roadmap to introduce +# handling of names to autograd. + +x = torch.randn(3, names=('D',)) +weight = torch.randn(3, names=('D',), requires_grad=True) +loss = (x - weight).abs() +grad_loss = torch.randn(3) +loss.backward(grad_loss) + +correct_grad = weight.grad.clone() +print(correct_grad) # Unnamed for now. Will be named in the future + +weight.grad.zero_() +grad_loss = grad_loss.refine_names('C') +loss = (x - weight).abs() +# Ideally we'd check that the names of loss and grad_loss match, but we don't +# yet +loss.backward(grad_loss) + +print(weight.grad) # still unnamed +assert torch.allclose(weight.grad, correct_grad) + +###################################################################### +# Other supported (and unsupported) features +# ------------------------------------------ +# +# See here (link to be included) for a detailed breakdown of what is +# supported with the 1.3 release. +# +# In particular, we want to call out three important features that are not +# currently supported: +# +# - Saving or loading named tensors via ``torch.save`` or ``torch.load`` +# - Multi-processing via ``torch.multiprocessing`` +# - JIT support; for example, the following will error + +imgs_named = torch.randn(1, 2, 2, 3, names=('N', 'C', 'H', 'W')) + + +@torch.jit.script +def fn(x): + return x + + +catch_error(lambda: fn(imgs_named)) + +###################################################################### +# As a workaround, please drop names via ``tensor = tensor.rename(None)`` +# before using anything that does not yet support named tensors. +# +# Longer example: Multi-head attention +# -------------------------------------- +# +# Now we'll go through a complete example of implementing a common +# PyTorch ``nn.Module``: multi-head attention. We assume the reader is already +# familiar with multi-head attention; for a refresher, check out +# `this explanation ` _ +# or +# `this explanation `_. +# +# We adapt the implementation of multi-head attention from +# `ParlAI `_; specifically +# `here `_. +# Read through the code at that example; then, compare with the code below, +# noting that there are four places labeled (I), (II), (III), and (IV), where +# using named tensors enables more readable code; we will dive into each of +# these after the code block. + +import torch.nn as nn +import torch.nn.functional as F +import math + + +class MultiHeadAttention(nn.Module): + def __init__(self, n_heads, dim, dropout=0): + super(MultiHeadAttention, self).__init__() + self.n_heads = n_heads + self.dim = dim + + self.attn_dropout = nn.Dropout(p=dropout) + self.q_lin = nn.Linear(dim, dim) + self.k_lin = nn.Linear(dim, dim) + self.v_lin = nn.Linear(dim, dim) + nn.init.xavier_normal_(self.q_lin.weight) + nn.init.xavier_normal_(self.k_lin.weight) + nn.init.xavier_normal_(self.v_lin.weight) + self.out_lin = nn.Linear(dim, dim) + nn.init.xavier_normal_(self.out_lin.weight) + + def forward(self, query, key=None, value=None, mask=None): + # (I) + query = query.refine_names(..., 'T', 'D') + self_attn = key is None and value is None + if self_attn: + mask = mask.refine_names(..., 'T') + else: + mask = mask.refine_names(..., 'T', 'T_key') # enc attn + + dim = query.size('D') + assert dim == self.dim, \ + f'Dimensions do not match: {dim} query vs {self.dim} configured' + assert mask is not None, 'Mask is None, please specify a mask' + n_heads = self.n_heads + dim_per_head = dim // n_heads + scale = math.sqrt(dim_per_head) + + # (II) + def prepare_head(tensor): + tensor = tensor.refine_names(..., 'T', 'D') + return (tensor.unflatten('D', [('H', n_heads), ('D_head', dim_per_head)]) + .align_to(..., 'H', 'T', 'D_head')) + + assert value is None + if self_attn: + key = value = query + elif value is None: + # key and value are the same, but query differs + key = key.refine_names(..., 'T', 'D') + value = key + dim = key.size('D') + + # Distinguish between query_len (T) and key_len (T_key) dims. + k = prepare_head(self.k_lin(key)).rename(T='T_key') + v = prepare_head(self.v_lin(value)).rename(T='T_key') + q = prepare_head(self.q_lin(query)) + + dot_prod = q.div_(scale).matmul(k.align_to(..., 'D_head', 'T_key')) + dot_prod.refine_names(..., 'H', 'T', 'T_key') # just a check + + # (III) + attn_mask = (mask == 0).align_as(dot_prod) + dot_prod.masked_fill_(attn_mask, -float(1e20)) + + attn_weights = self.attn_dropout(F.softmax(dot_prod / scale, + dim='T_key')) + + # (IV) + attentioned = ( + attn_weights.matmul(v).refine_names(..., 'H', 'T', 'D_head') + .align_to(..., 'T', 'H', 'D_head') + .flatten(['H', 'D_head'], 'D') + ) + + return self.out_lin(attentioned).refine_names(..., 'T', 'D') + +###################################################################### +# **(I) Refining the input tensor dims** + +def forward(self, query, key=None, value=None, mask=None): + # (I) + query = query.refine_names(..., 'T', 'D') + +###################################################################### +# The ``query = query.refine_names(..., 'T', 'D')`` serves as enforcable documentation +# and lifts input dimensions to being named. It checks that the last two dimensions +# can be refined to ``['T', 'D']``, preventing potentially silent or confusing size +# mismatch errors later down the line. +# +# **(II) Manipulating dimensions in prepare_head** + +# (II) +def prepare_head(tensor): + tensor = tensor.refine_names(..., 'T', 'D') + return (tensor.unflatten('D', [('H', n_heads), ('D_head', dim_per_head)]) + .align_to(..., 'H', 'T', 'D_head')) + +###################################################################### +# The first thing to note is how the code clearly states the input and +# output dimensions: the input tensor must end with the ``T`` and ``D`` dims +# and the output tensor ends in ``H``, ``T``, and ``D_head`` dims. +# +# The second thing to note is how clearly the code describes what is going on. +# prepare_head takes the key, query, and value and splits the embedding dim into +# multiple heads, finally rearranging the dim order to be ``[..., 'H', 'T', 'D_head']``. +# ParlAI implements ``prepare_head`` as the following, using ``view`` and ``transpose`` +# operations: + +def prepare_head(tensor): + # input is [batch_size, seq_len, n_heads * dim_per_head] + # output is [batch_size * n_heads, seq_len, dim_per_head] + batch_size, seq_len, _ = tensor.size() + tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head) + tensor = ( + tensor.transpose(1, 2) + .contiguous() + .view(batch_size * n_heads, seq_len, dim_per_head) + ) + return tensor + +###################################################################### +# Our named tensor variant uses ops that, though more verbose, also have +# more semantic meaning than ``view`` and ``transpose`` and include enforcable +# documentation in the form of names. +# +# **(III) Explicit broadcasting by names** + +def ignore(): + # (III) + attn_mask = (mask == 0).align_as(dot_prod) + dot_prod.masked_fill_(attn_mask, -float(1e20)) + +###################################################################### +# ``mask`` usually has dims ``[N, T]`` (in the case of self attention) or +# ``[N, T, T_key]`` (in the case of encoder attention) while ``dot_prod`` +# has dims ``[N, H, T, T_key]``. To make ``mask`` broadcast correctly with +# ``dot_prod``, we would usually `unsqueeze` dims ``1`` and ``-1`` in the case +# of self attention or ``unsqueeze`` dim ``1`` in the case of encoder +# attention. Using named tensors, we simply align ``attn_mask`` to ``dot_prod`` +# using ``align_as`` and stop worrying about where to ``unsqueeze`` dims. +# +# **(IV) More dimension manipulation using align_to and flatten** + +def ignore(): + # (IV) + attentioned = ( + attn_weights.matmul(v).refine_names(..., 'H', 'T', 'D_head') + .align_to(..., 'T', 'H', 'D_head') + .flatten(['H', 'D_head'], 'D') + ) + +###################################################################### +# Here, as in (II), ``align_to`` and ``flatten`` are more semantically +# meaningful than ``view`` (despite being more verbose). +# +# Running the example +# ------------------- + +n, t, d, h = 7, 5, 2 * 3, 3 +query = torch.randn(n, t, d, names=('N', 'T', 'D')) +mask = torch.ones(n, t, names=('N', 'T')) +attn = MultiHeadAttention(h, d) +output = attn(query, mask=mask) +# works as expected! +print(output.names) + +###################################################################### +# The above works as expected. Furthermore, note that in the code we +# did not mention the name of the batch dimension at all. In fact, +# our ``MultiHeadAttention`` module is agnostic to the existence of batch +# dimensions. + +query = torch.randn(t, d, names=('T', 'D')) +mask = torch.ones(t, names=('T',)) +output = attn(query, mask=mask) +print(output.names) + +###################################################################### +# Conclusion +# ---------- +# +# Thank you for reading! Named tensors are still very much in development; +# if you have feedback and/or suggestions for improvement, please let us +# know by creating `an issue `_. From 167921fe2fd5f0771f03c986fa214dd111ffc114 Mon Sep 17 00:00:00 2001 From: Richard Zou Date: Wed, 9 Oct 2019 14:39:37 -0700 Subject: [PATCH 09/12] Fix more typos --- intermediate_source/named_tensor_tutorial.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/intermediate_source/named_tensor_tutorial.py b/intermediate_source/named_tensor_tutorial.py index 35a8f3e20f9..dedd0fd7c76 100644 --- a/intermediate_source/named_tensor_tutorial.py +++ b/intermediate_source/named_tensor_tutorial.py @@ -193,15 +193,15 @@ def catch_error(fn): # # This results in named tensors preventing unintended alignment during # operations that broadcast. Without ``names``, ``per_batch_scale`` would be -# aligned with the last dimension of ``imgs``, which is what we intended. +# aligned with the last dimension of ``imgs``, which is not what we intended. imgs = torch.randn(2, 2, 2, 2, names=('N', 'C', 'H', 'W')) per_batch_scale = torch.rand(2, names=('N',)) catch_error(lambda: imgs * per_batch_scale) ###################################################################### -# However, we really wanted to perform the operation by aligning -# ``per_batch_scale`` with the batch dimension of ``imgs``. +# We really wanted to perform the operation by aligning ``per_batch_scale`` +# with the batch dimension of ``imgs``. # See the new explicit broadcasting by names functionality for how to # align tensors by name, covered below. # From b1cd8bb6ddff832f122881e4bb93a3acc9eb568c Mon Sep 17 00:00:00 2001 From: Richard Zou Date: Wed, 9 Oct 2019 14:53:58 -0700 Subject: [PATCH 10/12] Fix more typos --- intermediate_source/named_tensor_tutorial.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/intermediate_source/named_tensor_tutorial.py b/intermediate_source/named_tensor_tutorial.py index dedd0fd7c76..83fde0ad555 100644 --- a/intermediate_source/named_tensor_tutorial.py +++ b/intermediate_source/named_tensor_tutorial.py @@ -15,7 +15,7 @@ This tutorial is intended as a guide to the functionality that will be included with the 1.3 launch. By the end of it, you will be able to: -- Create ``Tensor``s with named dimensions, as well as remove or rename those +- Create Tensors with named dimensions, as well as remove or rename those dimensions - Understand the basics of how operations propagate dimension names - See how naming dimensions enables clearer code in two key areas: @@ -192,17 +192,19 @@ def catch_error(fn): # broadcasted, PyTorch also checks that the names of those dimensions match. # # This results in named tensors preventing unintended alignment during -# operations that broadcast. Without ``names``, ``per_batch_scale`` would be -# aligned with the last dimension of ``imgs``, which is not what we intended. +# operations that broadcast. In the below example, we apply a +# ``per_batch_scale`` to ``imgs``. imgs = torch.randn(2, 2, 2, 2, names=('N', 'C', 'H', 'W')) per_batch_scale = torch.rand(2, names=('N',)) catch_error(lambda: imgs * per_batch_scale) ###################################################################### -# We really wanted to perform the operation by aligning ``per_batch_scale`` -# with the batch dimension of ``imgs``. -# See the new explicit broadcasting by names functionality for how to +# Without ``names``, the ``per_batch_scale`` tensor is aligned with the last +# dimension of ``imgs``, which is not what we intended. We really wanted to +# perform the operation by aligning ``per_batch_scale`` with the batch +# dimension of ``imgs``. +# See the new "explicit broadcasting by names" functionality for how to # align tensors by name, covered below. # # Matrix multiply From 1399cad1762f0ee1ce14c902c772be2bbfd1dd97 Mon Sep 17 00:00:00 2001 From: Richard Zou Date: Wed, 9 Oct 2019 14:59:46 -0700 Subject: [PATCH 11/12] More typo fixes --- intermediate_source/named_tensor_tutorial.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/intermediate_source/named_tensor_tutorial.py b/intermediate_source/named_tensor_tutorial.py index 83fde0ad555..3b38cac9a1c 100644 --- a/intermediate_source/named_tensor_tutorial.py +++ b/intermediate_source/named_tensor_tutorial.py @@ -288,10 +288,10 @@ def catch_error(fn): # Autograd support # ---------------- # -# Autograd currently supports named tensors in a limited manner: autograd -# ignores names on all tensors. Gradient computation is still correct but we -# lose the safety that names give us. It is on the roadmap to introduce -# handling of names to autograd. +# Autograd currently ignores names on all tensors and just treats them like +# regular tensors. Gradient computation is correct but we lose the safety that +# names give us. It is on the roadmap to introduce handling of names to +# autograd. x = torch.randn(3, names=('D',)) weight = torch.randn(3, names=('D',), requires_grad=True) @@ -316,8 +316,8 @@ def catch_error(fn): # Other supported (and unsupported) features # ------------------------------------------ # -# See here (link to be included) for a detailed breakdown of what is -# supported with the 1.3 release. +# `See here `_ for a +# detailed breakdown of what is supported with the 1.3 release. # # In particular, we want to call out three important features that are not # currently supported: @@ -346,7 +346,7 @@ def fn(x): # Now we'll go through a complete example of implementing a common # PyTorch ``nn.Module``: multi-head attention. We assume the reader is already # familiar with multi-head attention; for a refresher, check out -# `this explanation ` _ +# `this explanation `_ # or # `this explanation `_. # From 1ac281b91afe49cb81a2f0b9d74b2b14afca0ba9 Mon Sep 17 00:00:00 2001 From: Richard Zou Date: Wed, 9 Oct 2019 15:04:44 -0700 Subject: [PATCH 12/12] More fixes --- intermediate_source/named_tensor_tutorial.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/intermediate_source/named_tensor_tutorial.py b/intermediate_source/named_tensor_tutorial.py index 3b38cac9a1c..09946a50809 100644 --- a/intermediate_source/named_tensor_tutorial.py +++ b/intermediate_source/named_tensor_tutorial.py @@ -480,8 +480,8 @@ def prepare_head(tensor): return tensor ###################################################################### -# Our named tensor variant uses ops that, though more verbose, also have -# more semantic meaning than ``view`` and ``transpose`` and include enforcable +# Our named tensor variant uses ops that, though more verbose, have more +# semantic meaning than ``view`` and ``transpose`` and includes enforcable # documentation in the form of names. # # **(III) Explicit broadcasting by names** @@ -512,7 +512,7 @@ def ignore(): ###################################################################### # Here, as in (II), ``align_to`` and ``flatten`` are more semantically -# meaningful than ``view`` (despite being more verbose). +# meaningful than ``view`` and ``transpose`` (despite being more verbose). # # Running the example # -------------------