1 #include <torch/csrc/jit/runtime/operator.h> 2 #include <torch/csrc/jit/runtime/custom_operator.h> 3 #include <torch/csrc/jit/runtime/register_ops_utils.h> 4 5 #include <ATen/UnboxingFunctions.h> 6 7 // ${generated_comment} 8 9 // NOTE [Sharded File]: This file is generated in a sharded fashion to speed up 10 // incremental rebuilds. See the comment at the top of 11 // templates/VariableType.cpp for an analogous, in-depth discussion. 12 // 13 // Generated by tools/jit/gen_unboxing.py. This file registers all ATen ops into JIT op registry instead of c10 14 // dispatcher. JIT op registry only takes boxed kernels, so we are calling unboxing functions in UnboxingFunctions.h 15 // to cast arguments into C++ types (instead of IValue) and delegate to unboxed kernels. 16 17 namespace torch { namespace jit { 18 19 using autograd::Variable; 20 using autograd::variable_list; 21 using at::Scalar; 22 using at::ScalarType; 23 using at::Tensor; 24 using at::TensorOptions; 25 using at::DeviceGuard; 26 27 using ::c10::fmap; 28 using ::c10::filter; 29 30 namespace { 31 32 RegisterOperators reg({ 33 34 // Generated operators 35 ${unboxed_ops} 36 }); 37 38 } // anon namespace 39 40 41 }} // namespace torch::jit 42