xref: /aosp_15_r20/external/pytorch/tools/autograd/templates/Functions.h (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #pragma once
2 
3 // ${generated_comment}
4 
5 #include <ATen/ATen.h>
6 #include <ATen/core/functional.h>
7 #include <ATen/TensorGeometry.h>
8 
9 #include "torch/csrc/autograd/function.h"
10 #include "torch/csrc/autograd/variable.h"
11 #include "torch/csrc/autograd/saved_variable.h"
12 #include <torch/csrc/Export.h>
13 
14 #include <c10/core/SymIntArrayRef.h>
15 
16 namespace torch { namespace autograd { namespace generated {
17 
18 using at::Scalar;
19 using at::Tensor;
20 using at::IntArrayRef;
21 using at::ArrayRef;
22 using at::Type;
23 using at::TensorGeometry;
24 using at::ScalarType;
25 using std::optional;
26 using c10::fmap;
27 
28 inline std::vector<Tensor> unpack_list(at::ArrayRef<SavedVariable> xs, std::shared_ptr<Node> saved_for = nullptr) {
29   // NB: we must explicitly do the conversion in the lambda, otherwise template
30   // deduction will give a Tensor of Variable which is not convertible
31   return fmap(xs, [&saved_for](const SavedVariable& x) {
32     // TODO(crcrpar): Use `std::move(saved_for)` to avoid incrementing refcount, which would need refactoring.
33     return static_cast<Tensor>(x.unpack(saved_for));
34   });
35 }
36 
37 inline c10::List<std::optional<Tensor>> unpack_opt_list(at::ArrayRef<SavedVariable> xs, std::shared_ptr<Node> saved_for = nullptr) {
38   torch::List<std::optional<Tensor>> result;
39   result.reserve(xs.size());
40   for (const SavedVariable& v : xs) {
41     auto var = v.unpack(saved_for);
42     result.push_back(var.defined() ? std::optional<Tensor>(var) : ::std::nullopt);
43   }
44   return result;
45 }
46 
47 using torch::autograd::TypeAndSize;
48 
49 ${autograd_function_declarations}
50 
51 }}} // namespace torch::autograd::generated
52