1 #include "torch/csrc/autograd/VariableTypeUtils.h" 2 #include "torch/csrc/autograd/generated/VariableType.h" 3 #include "torch/csrc/autograd/FunctionsManual.h" 4 5 #include <ATen/RedispatchFunctions.h> 6 #include <c10/core/impl/TorchDispatchModeTLS.h> 7 #include <ATen/core/TorchDispatchUtils.h> 8 #include <torch/library.h> 9 10 #include <ATen/SparseCsrTensorUtils.h> 11 12 13 // ${generated_comment} 14 15 // NOTE [Sharded File]: on this file's split-into-shards state 16 // 17 // Back in the good old days, VariableType.cpp was generated as one 18 // file with every function in it, and everything was great and 19 // simple. 20 // 21 // However, this file was also very large (over 36,000 lines), and 22 // compiling it was very slow, and in fact was a significant 23 // bottleneck for incremental rebuilds. To address this, we now 24 // generate the file split across multiple shards, named 25 // VariableType_0.cpp and so on, which can be compiled in parallel. 26 // 27 // For ease of inspection and debugging, so that it's not necessary to 28 // go rooting around in multiple files, we also generate all the 29 // functions together in VariableTypeEverything.cpp. This generated 30 // file is only for convenience; it's not actually used in the 31 // build. If the file you're looking at now is one of the shards, you 32 // may want to switch over to the Everything variant to make you 33 // grepping smoother. 34 35 using namespace at; 36 using namespace torch::autograd::generated; 37 using namespace torch::autograd::generated::details; 38 39 40 namespace torch::autograd { 41 42 namespace VariableType { 43 namespace{ reset_grad_accumulator(Variable & self)44 C10_UNUSED void reset_grad_accumulator(Variable & self) { 45 AutogradMeta* meta = torch::autograd::impl::get_autograd_meta(self); 46 if (meta != nullptr) { 47 meta->grad_accumulator_.reset(); 48 } 49 } 50 } 51 52 namespace { 53 54 55 ${type_derived_method_definitions} 56 } 57 } 58 59 namespace { 60 61 ${wrapper_registrations} 62 63 } 64 65 } // namespace torch::autograd 66