207
207
< div class ="pytorch-left-menu-search ">
208
208
209
209
< div class ="version ">
210
- < a href ='https://pytorch.org/docs/versions.html '> master (1.12.0a0+git0d66748 ) ▼</ a >
210
+ < a href ='https://pytorch.org/docs/versions.html '> master (1.12.0a0+git2a7f9f0 ) ▼</ a >
211
211
</ div >
212
212
213
213
@@ -714,7 +714,7 @@ <h1>Source code for torch._tensor</h1><div class="highlight"><pre>
714
714
< span class ="c1 "> # All strings are unicode in Python 3.</ span >
715
715
< span class ="k "> return</ span > < span class ="n "> torch</ span > < span class ="o "> .</ span > < span class ="n "> _tensor_str</ span > < span class ="o "> .</ span > < span class ="n "> _str</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> )</ span >
716
716
717
- < div class =" viewcode-block " id =" Tensor.backward " > < a class =" viewcode-back " href =" ../../generated/torch.Tensor.backward.html#torch.Tensor.backward " > [docs] </ a > < span class ="k "> def</ span > < span class ="nf "> backward</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ,</ span > < span class ="n "> gradient</ span > < span class ="o "> =</ span > < span class ="kc "> None</ span > < span class ="p "> ,</ span > < span class ="n "> retain_graph</ span > < span class ="o "> =</ span > < span class ="kc "> None</ span > < span class ="p "> ,</ span > < span class ="n "> create_graph</ span > < span class ="o "> =</ span > < span class ="kc "> False</ span > < span class ="p "> ,</ span > < span class ="n "> inputs</ span > < span class ="o "> =</ span > < span class ="kc "> None</ span > < span class ="p "> ):</ span >
717
+ < span class ="k "> def</ span > < span class ="nf "> backward</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ,</ span > < span class ="n "> gradient</ span > < span class ="o "> =</ span > < span class ="kc "> None</ span > < span class ="p "> ,</ span > < span class ="n "> retain_graph</ span > < span class ="o "> =</ span > < span class ="kc "> None</ span > < span class ="p "> ,</ span > < span class ="n "> create_graph</ span > < span class ="o "> =</ span > < span class ="kc "> False</ span > < span class ="p "> ,</ span > < span class ="n "> inputs</ span > < span class ="o "> =</ span > < span class ="kc "> None</ span > < span class ="p "> ):</ span >
718
718
< span class ="sa "> r</ span > < span class ="sd "> """Computes the gradient of current tensor w.r.t. graph leaves.</ span >
719
719
720
720
< span class ="sd "> The graph is differentiated using the chain rule. If the tensor is</ span >
@@ -770,7 +770,7 @@ <h1>Source code for torch._tensor</h1><div class="highlight"><pre>
770
770
< span class ="n "> retain_graph</ span > < span class ="o "> =</ span > < span class ="n "> retain_graph</ span > < span class ="p "> ,</ span >
771
771
< span class ="n "> create_graph</ span > < span class ="o "> =</ span > < span class ="n "> create_graph</ span > < span class ="p "> ,</ span >
772
772
< span class ="n "> inputs</ span > < span class ="o "> =</ span > < span class ="n "> inputs</ span > < span class ="p "> )</ span >
773
- < span class ="n "> torch</ span > < span class ="o "> .</ span > < span class ="n "> autograd</ span > < span class ="o "> .</ span > < span class ="n "> backward</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ,</ span > < span class ="n "> gradient</ span > < span class ="p "> ,</ span > < span class ="n "> retain_graph</ span > < span class ="p "> ,</ span > < span class ="n "> create_graph</ span > < span class ="p "> ,</ span > < span class ="n "> inputs</ span > < span class ="o "> =</ span > < span class ="n "> inputs</ span > < span class ="p "> )</ span > </ div >
773
+ < span class ="n "> torch</ span > < span class ="o "> .</ span > < span class ="n "> autograd</ span > < span class ="o "> .</ span > < span class ="n "> backward</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ,</ span > < span class ="n "> gradient</ span > < span class ="p "> ,</ span > < span class ="n "> retain_graph</ span > < span class ="p "> ,</ span > < span class ="n "> create_graph</ span > < span class ="p "> ,</ span > < span class ="n "> inputs</ span > < span class ="o "> =</ span > < span class ="n "> inputs</ span > < span class ="p "> )</ span >
774
774
775
775
< span class ="k "> def</ span > < span class ="nf "> register_hook</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ,</ span > < span class ="n "> hook</ span > < span class ="p "> ):</ span >
776
776
< span class ="sa "> r</ span > < span class ="sd "> """Registers a backward hook.</ span >
@@ -872,14 +872,14 @@ <h1>Source code for torch._tensor</h1><div class="highlight"><pre>
872
872
< span class ="s2 "> have forward mode AD gradients.</ span >
873
873
< span class ="s2 "> """</ span > < span class ="p "> )</ span >
874
874
875
- < span class ="k "> def</ span > < span class ="nf "> is_shared</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ):</ span >
875
+ < div class =" viewcode-block " id =" Tensor.is_shared " > < a class =" viewcode-back " href =" ../../generated/torch.Tensor.is_shared.html#torch.Tensor.is_shared " > [docs] </ a > < span class ="k "> def</ span > < span class ="nf "> is_shared</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ):</ span >
876
876
< span class ="sa "> r</ span > < span class ="sd "> """Checks if tensor is in shared memory.</ span >
877
877
878
878
< span class ="sd "> This is always ``True`` for CUDA tensors.</ span >
879
879
< span class ="sd "> """</ span >
880
880
< span class ="k "> if</ span > < span class ="n "> has_torch_function_unary</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ):</ span >
881
881
< span class ="k "> return</ span > < span class ="n "> handle_torch_function</ span > < span class ="p "> (</ span > < span class ="n "> Tensor</ span > < span class ="o "> .</ span > < span class ="n "> is_shared</ span > < span class ="p "> ,</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ,),</ span > < span class ="bp "> self</ span > < span class ="p "> )</ span >
882
- < span class ="k "> return</ span > < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> storage</ span > < span class ="p "> ()</ span > < span class ="o "> .</ span > < span class ="n "> is_shared</ span > < span class ="p "> ()</ span >
882
+ < span class ="k "> return</ span > < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> storage</ span > < span class ="p "> ()</ span > < span class ="o "> .</ span > < span class ="n "> is_shared</ span > < span class ="p "> ()</ span > </ div >
883
883
884
884
< span class ="k "> def</ span > < span class ="nf "> share_memory_</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ):</ span >
885
885
< span class ="sa "> r</ span > < span class ="sd "> """Moves the underlying storage to shared memory.</ span >
@@ -938,7 +938,7 @@ <h1>Source code for torch._tensor</h1><div class="highlight"><pre>
938
938
< span class ="k "> return</ span > < span class ="n "> torch</ span > < span class ="o "> .</ span > < span class ="n "> stft</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ,</ span > < span class ="n "> n_fft</ span > < span class ="p "> ,</ span > < span class ="n "> hop_length</ span > < span class ="p "> ,</ span > < span class ="n "> win_length</ span > < span class ="p "> ,</ span > < span class ="n "> window</ span > < span class ="p "> ,</ span > < span class ="n "> center</ span > < span class ="p "> ,</ span >
939
939
< span class ="n "> pad_mode</ span > < span class ="p "> ,</ span > < span class ="n "> normalized</ span > < span class ="p "> ,</ span > < span class ="n "> onesided</ span > < span class ="p "> ,</ span > < span class ="n "> return_complex</ span > < span class ="o "> =</ span > < span class ="n "> return_complex</ span > < span class ="p "> )</ span >
940
940
941
- < span class ="k "> def</ span > < span class ="nf "> istft</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ,</ span > < span class ="n "> n_fft</ span > < span class ="p "> :</ span > < span class ="nb "> int</ span > < span class ="p "> ,</ span > < span class ="n "> hop_length</ span > < span class ="p "> :</ span > < span class ="n "> Optional</ span > < span class ="p "> [</ span > < span class ="nb "> int</ span > < span class ="p "> ]</ span > < span class ="o "> =</ span > < span class ="kc "> None</ span > < span class ="p "> ,</ span >
941
+ < div class =" viewcode-block " id =" Tensor.istft " > < a class =" viewcode-back " href =" ../../generated/torch.Tensor.istft.html#torch.Tensor.istft " > [docs] </ a > < span class ="k "> def</ span > < span class ="nf "> istft</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ,</ span > < span class ="n "> n_fft</ span > < span class ="p "> :</ span > < span class ="nb "> int</ span > < span class ="p "> ,</ span > < span class ="n "> hop_length</ span > < span class ="p "> :</ span > < span class ="n "> Optional</ span > < span class ="p "> [</ span > < span class ="nb "> int</ span > < span class ="p "> ]</ span > < span class ="o "> =</ span > < span class ="kc "> None</ span > < span class ="p "> ,</ span >
942
942
< span class ="n "> win_length</ span > < span class ="p "> :</ span > < span class ="n "> Optional</ span > < span class ="p "> [</ span > < span class ="nb "> int</ span > < span class ="p "> ]</ span > < span class ="o "> =</ span > < span class ="kc "> None</ span > < span class ="p "> ,</ span > < span class ="n "> window</ span > < span class ="p "> :</ span > < span class ="s1 "> 'Optional[Tensor]'</ span > < span class ="o "> =</ span > < span class ="kc "> None</ span > < span class ="p "> ,</ span >
943
943
< span class ="n "> center</ span > < span class ="p "> :</ span > < span class ="nb "> bool</ span > < span class ="o "> =</ span > < span class ="kc "> True</ span > < span class ="p "> ,</ span > < span class ="n "> normalized</ span > < span class ="p "> :</ span > < span class ="nb "> bool</ span > < span class ="o "> =</ span > < span class ="kc "> False</ span > < span class ="p "> ,</ span >
944
944
< span class ="n "> onesided</ span > < span class ="p "> :</ span > < span class ="n "> Optional</ span > < span class ="p "> [</ span > < span class ="nb "> bool</ span > < span class ="p "> ]</ span > < span class ="o "> =</ span > < span class ="kc "> None</ span > < span class ="p "> ,</ span > < span class ="n "> length</ span > < span class ="p "> :</ span > < span class ="n "> Optional</ span > < span class ="p "> [</ span > < span class ="nb "> int</ span > < span class ="p "> ]</ span > < span class ="o "> =</ span > < span class ="kc "> None</ span > < span class ="p "> ,</ span >
@@ -951,7 +951,7 @@ <h1>Source code for torch._tensor</h1><div class="highlight"><pre>
951
951
< span class ="n "> return_complex</ span > < span class ="o "> =</ span > < span class ="n "> return_complex</ span >
952
952
< span class ="p "> )</ span >
953
953
< span class ="k "> return</ span > < span class ="n "> torch</ span > < span class ="o "> .</ span > < span class ="n "> istft</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ,</ span > < span class ="n "> n_fft</ span > < span class ="p "> ,</ span > < span class ="n "> hop_length</ span > < span class ="p "> ,</ span > < span class ="n "> win_length</ span > < span class ="p "> ,</ span > < span class ="n "> window</ span > < span class ="p "> ,</ span > < span class ="n "> center</ span > < span class ="p "> ,</ span >
954
- < span class ="n "> normalized</ span > < span class ="p "> ,</ span > < span class ="n "> onesided</ span > < span class ="p "> ,</ span > < span class ="n "> length</ span > < span class ="p "> ,</ span > < span class ="n "> return_complex</ span > < span class ="o "> =</ span > < span class ="n "> return_complex</ span > < span class ="p "> )</ span >
954
+ < span class ="n "> normalized</ span > < span class ="p "> ,</ span > < span class ="n "> onesided</ span > < span class ="p "> ,</ span > < span class ="n "> length</ span > < span class ="p "> ,</ span > < span class ="n "> return_complex</ span > < span class ="o "> =</ span > < span class ="n "> return_complex</ span > < span class ="p "> )</ span > </ div >
955
955
956
956
< span class ="k "> def</ span > < span class ="nf "> resize</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ,</ span > < span class ="o "> *</ span > < span class ="n "> sizes</ span > < span class ="p "> ):</ span >
957
957
< span class ="k "> if</ span > < span class ="n "> has_torch_function_unary</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ):</ span >
0 commit comments