xref: /aosp_15_r20/external/pytorch/tools/autograd/templates/python_variable_methods.cpp (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
2 // ${generated_comment}
3 
4 #include <Python.h>
5 
6 // Undefine the copysign macro so that at::copysign works as intended with MSVC
7 // https://github.com/python/cpython/blob/c60394c7fc9cc09b16e9675a3eeb5844b6d8523f/PC/pyconfig.h#L196
8 #ifdef _MSC_VER
9 #undef copysign
10 #endif // _MSC_VER
11 
12 #include "torch/csrc/DynamicTypes.h"
13 #include "torch/csrc/Exceptions.h"
14 #include "torch/csrc/Size.h"
15 #include "torch/csrc/autograd/generated/VariableType.h"
16 #include "torch/csrc/autograd/python_variable.h"
17 #include "torch/csrc/autograd/utils/python_arg_parsing.h"
18 #include "torch/csrc/autograd/utils/error_messages.h"
19 #include "torch/csrc/autograd/utils/wrap_outputs.h"
20 #include "torch/csrc/jit/frontend/tracer.h"
21 #ifdef USE_CUDA
22 #include "torch/csrc/cuda/Event.h"
23 #endif
24 #include "torch/csrc/utils/device_lazy_init.h"
25 #include <torch/csrc/utils/numpy_stub.h>
26 #include "torch/csrc/utils/object_ptr.h"
27 #include "torch/csrc/utils/pycfunction_helpers.h"
28 #include "torch/csrc/utils/python_arg_parser.h"
29 #include "torch/csrc/utils/python_numbers.h"
30 #include "torch/csrc/utils/python_strings.h"
31 #include "torch/csrc/utils/python_tuples.h"
32 #include "torch/csrc/utils/tensor_apply.h"
33 #include "torch/csrc/utils/tensor_list.h"
34 #include "torch/csrc/utils/tensor_new.h"
35 #include "torch/csrc/utils/tensor_numpy.h"
36 #include "torch/csrc/utils/tensor_types.h"
37 #include "torch/csrc/utils/structseq.h"
38 #include "torch/csrc/autograd/generated/python_return_types.h"
39 
40 #include <ATen/core/Tensor.h>
41 #include <ATen/FuncTorchTLS.h>
42 #include "c10/util/Optional.h"
43 #include "c10/core/Stream.h"
44 
45 #include <stdexcept>
46 
47 #ifndef AT_PER_OPERATOR_HEADERS
48 #include <ATen/Functions.h>
49 #else
50 $ops_headers
51 #include <ATen/ops/_local_scalar_dense.h>
52 #endif
53 
54 using at::DeviceGuard;
55 using at::device_of;
56 using at::OptionalDeviceGuard;
57 using at::Backend;
58 using at::Scalar;
59 using at::ScalarType;
60 using at::Tensor;
61 using c10::Stream;
62 using namespace torch::autograd::utils;
63 
64 namespace torch::autograd {
65 
THPVariable__is_view(PyObject * self,PyObject * args)66 static PyObject * THPVariable__is_view(PyObject *self, PyObject* args)
67 {
68   HANDLE_TH_ERRORS
69   if (check_has_torch_function(self)) {
70     return handle_torch_function(self, "_is_view", args);
71   }
72   auto& self_ = THPVariable_Unpack(self);
73   if (self_.is_view()) {
74     Py_RETURN_TRUE;
75   } else {
76     Py_RETURN_FALSE;
77   }
78   END_HANDLE_TH_ERRORS
79 }
80 
81 // implemented on the python object bc no support for first-class functions in native_functions.yaml
82 // See: ATen/native/README.md for more context
THPVariable_apply_(PyObject * self,PyObject * arg)83 static PyObject * THPVariable_apply_(PyObject* self, PyObject* arg)
84 {
85   HANDLE_TH_ERRORS
86   if (check_has_torch_function(self)) {
87     auto args = py::make_tuple(py::handle(arg));
88     return handle_torch_function(self, "apply_", args.ptr());
89   }
90   auto& self_ = THPVariable_Unpack(self);
91   if (self_.requires_grad()) {
92     throw std::runtime_error(
93         "Can't call apply_() on Variable that requires grad. Use "
94         "var.detach().apply_() instead.");
95   }
96   return THPVariable_Wrap(torch::utils::apply_(self_, arg));
97   END_HANDLE_TH_ERRORS
98 }
99 
THPVariable_size(PyObject * self,PyObject * args,PyObject * kwargs)100 static PyObject * THPVariable_size(PyObject* self, PyObject* args, PyObject* kwargs)
101 {
102   HANDLE_TH_ERRORS
103   static PythonArgParser parser({
104     "size(int64_t? dim=None)",
105     "size(Dimname dim)",
106   });
107   auto& self_ = THPVariable_Unpack(self);
108   ParsedArgs<3> parsed_args;
109   auto r = parser.parse(self, args, kwargs, parsed_args);
110 
111   if(r.has_torch_function()){
112     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
113   }
114   if (r.idx == 0) {
115     if (!r.toInt64Optional(0).has_value()) {
116       return THPSize_NewFromSymSizes(self_);
117     }
118     if (jit::tracer::isTracing()) {
119       // will error out if a tensor has symints
120       return wrap(jit::tracer::getSizeOf(self_, r.toInt64(0)));
121     } else {
122       return torch::toPyObject(self_.sym_size(r.toInt64(0)));
123     }
124   } else if (r.idx == 1) {
125     if (jit::tracer::isTracing()) {
126       TORCH_INTERNAL_ASSERT(false, "NYI: Named tensors w/ JIT");
127     }
128     return wrap(self_.size(r.dimname(0)));
129   }
130   Py_RETURN_NONE;
131   END_HANDLE_TH_ERRORS
132 }
133 
THPVariable_stride(PyObject * self,PyObject * args,PyObject * kwargs)134 static PyObject * THPVariable_stride(PyObject* self, PyObject* args, PyObject* kwargs)
135 {
136   HANDLE_TH_ERRORS
137   static PythonArgParser parser({
138     "stride(int64_t? dim=None)",
139     "stride(Dimname dim)",
140   });
141   auto& self_ = THPVariable_Unpack(self);
142   ParsedArgs<3> parsed_args;
143   auto r = parser.parse(self, args, kwargs, parsed_args);
144 
145   if(r.has_torch_function()){
146     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
147   }
148 
149   if (r.idx == 0) {
150     if (r.toInt64Optional(0).has_value()) {
151       return torch::toPyObject(self_.sym_stride(r.toInt64(0)));
152     }
153     // yes, this is called strides in ATen.
154     at::SymIntArrayRef strides = self_.sym_strides();
155     // we can't do the normal wrapping here because IntArrayRef maps to both
156     // torch.Size and tuple in python
157     // TODO: consider factoring this out
158     THPObjectPtr tuple(PyTuple_New(strides.size()));
159     if (!tuple) throw python_error();
160     for (size_t i = 0; i != strides.size(); i++) {
161       PyObject* s = torch::toPyObject(strides[i]);
162       if (!s) throw python_error();
163       PyTuple_SET_ITEM(tuple.get(), i, s);
164     }
165     return tuple.release();
166   } else if (r.idx == 1) {
167     return wrap(self_.stride(r.dimname(0)));
168   }
169   Py_RETURN_NONE;
170   END_HANDLE_TH_ERRORS
171 }
172 
173 // implemented on the python object to avoid dispatch overhead
THPVariable_get_device(PyObject * self_,PyObject * args)174 static PyObject * THPVariable_get_device(PyObject* self_, PyObject* args)
175 {
176   HANDLE_TH_ERRORS
177   if (check_has_torch_function(self_)) {
178     return handle_torch_function(self_, "get_device", args, nullptr);
179   }
180   auto& self = THPVariable_Unpack(self_);
181   return wrap(self.get_device());
182   END_HANDLE_TH_ERRORS
183 }
184 
THPVariable_has_names(PyObject * self_,PyObject * args)185 static PyObject * THPVariable_has_names(PyObject* self_, PyObject* args)
186 {
187   HANDLE_TH_ERRORS
188   if (check_has_torch_function(self_)) {
189     return handle_torch_function(self_, "has_names", args);
190   }
191   auto& self = THPVariable_Unpack(self_);
192   return wrap(self.has_names());
193   END_HANDLE_TH_ERRORS
194 }
195 
196 // implemented on the python object to avoid dispatch overhead
THPVariable_data_ptr(PyObject * self_,PyObject * args)197 static PyObject * THPVariable_data_ptr(PyObject* self_, PyObject* args)
198 {
199   HANDLE_TH_ERRORS
200   if (check_has_torch_function(self_)) {
201     return handle_torch_function(self_, "data_ptr", args);
202   }
203   auto& self = THPVariable_Unpack(self_);
204   return wrap(self.data_ptr());
205   END_HANDLE_TH_ERRORS
206 }
207 
208 // implemented on the python object to avoid dispatch overhead
THPVariable_storage_offset(PyObject * self_,PyObject * args)209 static PyObject * THPVariable_storage_offset(PyObject* self_, PyObject* args)
210 {
211   HANDLE_TH_ERRORS
212   if (check_has_torch_function(self_)) {
213     return handle_torch_function(self_, "storage_offset");
214   }
215   auto& self = THPVariable_Unpack(self_);
216   return py::cast(self.sym_storage_offset()).release().ptr();
217   END_HANDLE_TH_ERRORS
218 }
219 
220 // implemented on the python object to avoid dispatch overhead
THPVariable_dim(PyObject * self,PyObject * args)221 static PyObject * THPVariable_dim(PyObject* self, PyObject* args)
222 {
223    HANDLE_TH_ERRORS
224    if (check_has_torch_function(self)) {
225      return handle_torch_function(self, "dim", args);
226    }
227    auto& self_ = THPVariable_Unpack(self);
228    return THPUtils_packInt64(self_.dim());
229    END_HANDLE_TH_ERRORS
230 }
231 
232 // implemented on the python object to avoid dispatch overhead
THPVariable_numel(PyObject * self,PyObject * args)233 static PyObject * THPVariable_numel(PyObject* self, PyObject* args)
234 {
235    HANDLE_TH_ERRORS
236    if (check_has_torch_function(self)) {
237      return handle_torch_function(self, "numel", args);
238    }
239    auto& self_ = THPVariable_Unpack(self);
240    if (jit::tracer::isTracing()) {
241      return wrap(jit::tracer::getNumelOf(self_));
242    } else {
243      return py::cast(self_.sym_numel()).release().ptr();
244    }
245    END_HANDLE_TH_ERRORS
246 }
247 
dispatch_contiguous(const Tensor & self,at::MemoryFormat memory_format)248 static Tensor dispatch_contiguous(const Tensor & self, at::MemoryFormat memory_format) {
249   pybind11::gil_scoped_release no_gil;
250   OptionalDeviceGuard device_guard(device_of(self));
251   return self.contiguous(memory_format);
252 }
253 
THPVariable_contiguous(PyObject * self,PyObject * args,PyObject * kwargs)254 static PyObject * THPVariable_contiguous(PyObject* self, PyObject* args, PyObject* kwargs)
255 {
256   HANDLE_TH_ERRORS
257   static PythonArgParser parser({
258     "contiguous(*, MemoryFormat memory_format=contiguous_format)",
259   });
260   ParsedArgs<1> parsed_args;
261   auto r = parser.parse(self, args, kwargs, parsed_args);
262 
263   if(r.has_torch_function()){
264     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
265   }
266 
267   auto& self_ = THPVariable_Unpack(self);
268   auto memory_format = r.memoryformat(0);
269   // avoids touching the GIL or current device if self is already contiguous
270   if (self_.is_contiguous(memory_format)) {
271     // NOTE: this logic is duplicated from VariableType.cpp. Since we need to
272     // record this call to contiguous() in the trace regardless of whether
273     // we actually call contiguous here, we need to record this information
274     // manually.
275     if (jit::tracer::isTracing()) {
276       auto tracer_state = jit::tracer::getTracingState();
277       auto op_name = c10::Symbol::fromQualString("aten::contiguous");
278       auto node = tracer_state->createNode(op_name, /*num_outputs=*/0);
279       jit::tracer::recordSourceLocation(node);
280       jit::tracer::addInputs(node, "self", self_);
281       jit::tracer::addInputs(node, "memory_format", memory_format);
282       tracer_state->insertNode(node);
283       jit::tracer::addOutput(node, self_);
284     }
285     Py_INCREF(self);
286     return self;
287   }
288   return THPVariable_Wrap(dispatch_contiguous(self_, memory_format));
289   END_HANDLE_TH_ERRORS
290 }
291 
dispatch_copy_(const Tensor & self,const Tensor & other,bool non_blocking)292 static Tensor dispatch_copy_(const Tensor & self, const Tensor & other, bool non_blocking) {
293   pybind11::gil_scoped_release no_gil;
294   OptionalDeviceGuard device_guard(device_of(self));
295   return self.copy_(other, non_blocking);
296 }
297 
THPVariable_copy_(PyObject * self,PyObject * args,PyObject * kwargs)298  static PyObject * THPVariable_copy_(PyObject* self, PyObject* args, PyObject* kwargs)
299 {
300   HANDLE_TH_ERRORS
301   static PythonArgParser parser({
302     "copy_(Tensor other, bool non_blocking=False)",
303     "copy_(Tensor other, bool async=False)|deprecated"
304   });
305   auto& self_ = THPVariable_Unpack(self);
306   ParsedArgs<2> parsed_args;
307   auto r = parser.parse(self, args, kwargs, parsed_args);
308 
309   if(r.has_torch_function()){
310     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
311   }
312 
313   return THPVariable_Wrap(dispatch_copy_(self_, r.tensor(0), r.toBool(1)));
314   END_HANDLE_TH_ERRORS
315 }
316 
317 template<typename T>
dispatch_to(const Tensor & self)318 static T dispatch_to(const Tensor & self) {
319   pybind11::gil_scoped_release no_gil;
320   OptionalDeviceGuard device_guard(device_of(self));
321   TORCH_CHECK_VALUE(self.sym_numel() == 1, "only one element tensors can be converted to Python scalars");
322   return self.template item<T>();
323 }
324 
THPVariable_float_scalar(PyObject * self,PyObject * args)325 static PyObject * THPVariable_float_scalar(PyObject* self, PyObject* args) {
326   HANDLE_TH_ERRORS
327   if (check_has_torch_function(self)) {
328     return handle_torch_function(self, "__float__", args);
329   }
330   jit::tracer::warn("Converting a tensor to a Python float", jit::tracer::WARN_PYTHON_DATAFLOW);
331   auto& self_ = THPVariable_Unpack(self);
332   return wrap(dispatch_to<double>(self_));
333   END_HANDLE_TH_ERRORS
334 }
335 
THPVariable_complex_scalar(PyObject * self,PyObject * args)336 static PyObject * THPVariable_complex_scalar(PyObject* self, PyObject* args) {
337   HANDLE_TH_ERRORS
338   if (check_has_torch_function(self)) {
339     return handle_torch_function(self, "__complex__", args);
340   }
341   jit::tracer::warn("Converting a tensor to a Python complex", jit::tracer::WARN_PYTHON_DATAFLOW);
342   auto& self_ = THPVariable_Unpack(self);
343   return wrap(dispatch_to<c10::complex<double>>(self_));
344   END_HANDLE_TH_ERRORS
345 }
346 
THPVariable_integral_scalar(PyObject * self,PyObject * args)347 static PyObject * THPVariable_integral_scalar(PyObject* self, PyObject* args) {
348   HANDLE_TH_ERRORS
349   if (check_has_torch_function(self)) {
350     return handle_torch_function(self, "__int__", args);
351   }
352   jit::tracer::warn("Converting a tensor to a Python integer", jit::tracer::WARN_PYTHON_DATAFLOW);
353   auto& self_ = THPVariable_Unpack(self);
354   if (isFloatingType(self_.scalar_type())) {
355     // we can't dispatch to item<int64_t> here because we want to avoid ATen overflow checks;
356     // the python integral type (long in python2) can't overflow.
357     return THPUtils_packDoubleAsInt(dispatch_to<double>(self_));
358   } else {
359     return wrap(dispatch_to<int64_t>(self_));
360   }
361   END_HANDLE_TH_ERRORS
362 }
363 
364 // This is the __index__ function in Python which is similar to __int__, but
365 // called when used as a slice.
THPVariable_index_scalar(PyObject * self,PyObject * args)366 static PyObject * THPVariable_index_scalar(PyObject* self, PyObject* args) {
367   HANDLE_TH_ERRORS
368   if (check_has_torch_function(self)) {
369     return handle_torch_function(self, "__index__", args);
370   }
371   auto& self_ = THPVariable_Unpack(self);
372   // TODO: change the condition to `self_.dim() != 0` once we expose scalars
373   // in PyTorch.
374   if (!isIntegralType(self_.scalar_type(), /*includeBool=*/true) || self_.sym_numel() != 1) {
375     throw TypeError("only integer tensors of a single element can be converted to an index");
376   }
377   return wrap(dispatch_to<int64_t>(self_));
378   END_HANDLE_TH_ERRORS
379 }
380 
dispatch_invert(const Tensor & self)381 static Tensor dispatch_invert(const Tensor & self) {
382   pybind11::gil_scoped_release no_gil;
383   OptionalDeviceGuard device_guard(device_of(self));
384   return self.bitwise_not();
385 }
386 
THPVariable_invert(PyObject * self,PyObject * args)387 static PyObject * THPVariable_invert(PyObject* self, PyObject* args) {
388   HANDLE_TH_ERRORS
389   if (check_has_torch_function(self)) {
390     return handle_torch_function(self, "__invert__", args);
391   }
392   auto& self_ = THPVariable_Unpack(self);
393   if (!isIntegralType(self_.scalar_type(), /*includeBool=*/true)) {
394     throw TypeError("~ (operator.invert) is only implemented on integer and Boolean-type tensors");
395   }
396   return THPVariable_Wrap(dispatch_invert(self_));
397   END_HANDLE_TH_ERRORS
398 }
399 
dispatch_to(const Tensor & self,Device device,bool non_blocking,bool copy,std::optional<c10::MemoryFormat> optional_memory_format)400 static Tensor dispatch_to(const Tensor & self, Device device, bool non_blocking, bool copy, std::optional<c10::MemoryFormat> optional_memory_format) {
401   pybind11::gil_scoped_release no_gil;
402   // NOTE: this is where we record aten::to in the graph during tracing. However, the behavior of aten::to
403   // is different with respect to TensorOptions fields that are not present: aten::to inherits fields that
404   // are missing from the self argument while the tracer assumes that they should be populated with the
405   // default values (eg. float for scalar type). By explicitly copying over the tensor options here we fully
406   // specify all tensor options and thus record the proper trace
407   return self.to(self.options().device(device).memory_format(optional_memory_format), non_blocking, copy);
408 }
409 
dispatch_to(const Tensor & self,bool non_blocking,bool copy,std::optional<c10::MemoryFormat> optional_memory_format)410 static Tensor dispatch_to(const Tensor & self, bool non_blocking, bool copy, std::optional<c10::MemoryFormat> optional_memory_format) {
411   pybind11::gil_scoped_release no_gil;
412   return self.to(self.options().memory_format(optional_memory_format), non_blocking, copy);
413 }
414 
dispatch_to(const Tensor & self,ScalarType dtype,bool non_blocking,bool copy,std::optional<c10::MemoryFormat> optional_memory_format)415 static Tensor dispatch_to(const Tensor & self, ScalarType dtype, bool non_blocking, bool copy, std::optional<c10::MemoryFormat> optional_memory_format) {
416   pybind11::gil_scoped_release no_gil;
417   // TODO: Make this call the TensorOptions version, maybe?
418   return self.to(dtype, non_blocking, copy, optional_memory_format);
419 }
420 
dispatch_to(const Tensor & self,Device device,ScalarType dtype,bool non_blocking,bool copy,std::optional<c10::MemoryFormat> optional_memory_format)421 static Tensor dispatch_to(const Tensor & self, Device device, ScalarType dtype, bool non_blocking, bool copy, std::optional<c10::MemoryFormat> optional_memory_format) {
422   pybind11::gil_scoped_release no_gil;
423   // TODO: Make this call the TensorOptions version, maybe?
424   return self.to(device, dtype, non_blocking, copy, optional_memory_format);
425 }
426 
THPVariable_cpu(PyObject * self,PyObject * args,PyObject * kwargs)427 static PyObject * THPVariable_cpu(PyObject* self, PyObject* args, PyObject* kwargs)
428 {
429    HANDLE_TH_ERRORS
430    static PythonArgParser parser({
431      "cpu(*, MemoryFormat? memory_format=None)"
432    });
433    auto& self_ = THPVariable_Unpack(self);
434    ParsedArgs<1> parsed_args;
435    auto r = parser.parse(self, args, kwargs, parsed_args);
436 
437    if(r.has_torch_function()){
438     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
439     }
440 
441    auto opt_memory_format = r.memoryformatOptional(0);
442    return THPVariable_Wrap(dispatch_to(self_, at::Device(at::DeviceType::CPU), false, false, opt_memory_format));
443    END_HANDLE_TH_ERRORS
444 }
445 
dispatch_nonzero(const Tensor & self)446 static Tensor dispatch_nonzero(const Tensor & self) {
447   pybind11::gil_scoped_release no_gil;
448   OptionalDeviceGuard device_guard(device_of(self));
449   return self.nonzero();
450 }
451 
dispatch_nonzero_numpy(const Tensor & self)452 static std::vector<Tensor> dispatch_nonzero_numpy(const Tensor & self) {
453   pybind11::gil_scoped_release no_gil;
454   OptionalDeviceGuard device_guard(device_of(self));
455   return self.nonzero_numpy();
456 }
457 
THPVariable_nonzero(PyObject * self,PyObject * args,PyObject * kwargs)458 static PyObject * THPVariable_nonzero(PyObject* self, PyObject* args, PyObject* kwargs)
459 {
460   HANDLE_TH_ERRORS
461   static PythonArgParser parser({
462     "nonzero()",
463     "nonzero(*, bool as_tuple)",
464   });
465   auto& self_ = THPVariable_Unpack(self);
466   ParsedArgs<2> parsed_args;
467   auto r = parser.parse(self, args, kwargs, parsed_args);
468 
469   if(r.has_torch_function()){
470     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
471   }
472 
473   if (r.idx == 0 || (r.idx == 1 && !r.toBool(0))) {
474     return wrap(dispatch_nonzero(self_));
475   } else {
476     return wrap(dispatch_nonzero_numpy(self_));
477   }
478   END_HANDLE_TH_ERRORS
479 }
480 
THPVariable_cuda(PyObject * self,PyObject * args,PyObject * kwargs)481 static PyObject * THPVariable_cuda(PyObject* self, PyObject* args, PyObject* kwargs)
482 {
483   HANDLE_TH_ERRORS
484   static PythonArgParser parser({
485     "cuda(Device? device=None, bool non_blocking=False, *, MemoryFormat? memory_format=None)",
486     "cuda(Device? device=None, bool async=False, *, MemoryFormat? memory_format=None)|deprecated"
487   });
488   auto& self_ = THPVariable_Unpack(self);
489   ParsedArgs<3> parsed_args;
490   auto r = parser.parse(self, args, kwargs, parsed_args);
491 
492   if(r.has_torch_function()){
493     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
494   }
495 
496   auto device = r.isNone(0) ? at::Device(at::DeviceType::CUDA) : r.device(0);
497   auto opt_memory_format = r.memoryformatOptional(2);
498   TORCH_CHECK(device.is_cuda(), "Invalid device, must be cuda device");
499   torch::utils::device_lazy_init(at::kCUDA);
500   return THPVariable_Wrap(dispatch_to(self_, device, r.toBool(1), false, opt_memory_format));
501   END_HANDLE_TH_ERRORS
502 }
503 
THPVariable_mtia(PyObject * self,PyObject * args,PyObject * kwargs)504 static PyObject * THPVariable_mtia(PyObject* self, PyObject* args, PyObject* kwargs)
505 {
506   HANDLE_TH_ERRORS
507   static PythonArgParser parser({
508     "mtia(Device? device=None, bool non_blocking=False, *, MemoryFormat? memory_format=None)",
509     "mtia(Device? device=None, bool async=False, *, MemoryFormat? memory_format=None)|deprecated"
510   });
511   auto& self_ = THPVariable_Unpack(self);
512   ParsedArgs<3> parsed_args;
513   auto r = parser.parse(self, args, kwargs, parsed_args);
514 
515   if (r.has_torch_function()) {
516     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
517   }
518 
519   auto device = r.isNone(0) ? at::Device(at::DeviceType::MTIA) : r.device(0);
520   auto opt_memory_format = r.memoryformatOptional(2);
521   TORCH_CHECK(device.is_mtia(), "Invalid device, must be MTIA device");
522   torch::utils::device_lazy_init(at::kMTIA);
523   return THPVariable_Wrap(dispatch_to(self_, device, r.toBool(1), false, opt_memory_format));
524   END_HANDLE_TH_ERRORS
525 }
526 
THPVariable_xpu(PyObject * self,PyObject * args,PyObject * kwargs)527 static PyObject * THPVariable_xpu(PyObject* self, PyObject* args, PyObject* kwargs)
528 {
529   HANDLE_TH_ERRORS
530   static PythonArgParser parser({
531     "xpu(Device? device=None, bool non_blocking=False, *, MemoryFormat? memory_format=None)",
532     "xpu(Device? device=None, bool async=False, *, MemoryFormat? memory_format=None)|deprecated"
533   });
534   auto& self_ = THPVariable_Unpack(self);
535   ParsedArgs<3> parsed_args;
536   auto r = parser.parse(self, args, kwargs, parsed_args);
537 
538   if (r.has_torch_function()) {
539     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
540   }
541 
542   auto device = r.isNone(0) ? at::Device(at::DeviceType::XPU) : r.device(0);
543   auto opt_memory_format = r.memoryformatOptional(2);
544   TORCH_CHECK(device.is_xpu(), "Invalid device, must be xpu device");
545   torch::utils::device_lazy_init(at::kXPU);
546   return THPVariable_Wrap(dispatch_to(self_, device, r.toBool(1), false, opt_memory_format));
547   END_HANDLE_TH_ERRORS
548 }
549 
THPVariable_ipu(PyObject * self,PyObject * args,PyObject * kwargs)550 static PyObject * THPVariable_ipu(PyObject* self, PyObject* args, PyObject* kwargs)
551 {
552   HANDLE_TH_ERRORS
553   static PythonArgParser parser({
554     "ipu(Device? device=None, bool non_blocking=False, *, MemoryFormat? memory_format=None)",
555     "ipu(Device? device=None, bool async=False, *, MemoryFormat? memory_format=None)|deprecated"
556   });
557   auto& self_ = THPVariable_Unpack(self);
558   ParsedArgs<3> parsed_args;
559   auto r = parser.parse(self, args, kwargs, parsed_args);
560 
561   if (r.has_torch_function()) {
562     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
563   }
564 
565   auto device = r.isNone(0) ? at::Device(at::DeviceType::IPU) : r.device(0);
566   auto opt_memory_format = r.memoryformatOptional(2);
567   TORCH_CHECK(device.is_ipu(), "Invalid device, must be ipu device");
568   return THPVariable_Wrap(dispatch_to(self_, device, r.toBool(1), false, opt_memory_format));
569   END_HANDLE_TH_ERRORS
570 }
571 
THPVariable_to_type(PyObject * self,ScalarType scalarType,std::optional<c10::MemoryFormat> optional_memory_format)572 static PyObject * THPVariable_to_type(PyObject* self, ScalarType scalarType, std::optional<c10::MemoryFormat> optional_memory_format) {
573   HANDLE_TH_ERRORS
574   auto& self_ = THPVariable_Unpack(self);
575   return THPVariable_Wrap(dispatch_to(self_, scalarType, false, false, optional_memory_format));
576   END_HANDLE_TH_ERRORS
577 }
578 
THPVariable_byte(PyObject * self,PyObject * args,PyObject * kwargs)579 static PyObject * THPVariable_byte(PyObject* self, PyObject* args, PyObject* kwargs)  {
580   HANDLE_TH_ERRORS
581   static PythonArgParser parser({
582     "byte(*, MemoryFormat? memory_format=None)"
583   });
584   ParsedArgs<1> parsed_args;
585   auto r = parser.parse(self, args, kwargs, parsed_args);
586 
587   if(r.has_torch_function()){
588     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
589   }
590 
591   auto opt_memory_format = r.memoryformatOptional(0);
592   return THPVariable_to_type(self, ScalarType::Byte, opt_memory_format);
593   END_HANDLE_TH_ERRORS
594 }
595 
THPVariable_char(PyObject * self,PyObject * args,PyObject * kwargs)596 static PyObject * THPVariable_char(PyObject* self, PyObject* args, PyObject* kwargs)  {
597   HANDLE_TH_ERRORS
598   static PythonArgParser parser({
599     "char(*, MemoryFormat? memory_format=None)"
600   });
601   ParsedArgs<1> parsed_args;
602   auto r = parser.parse(self, args, kwargs, parsed_args);
603 
604   if(r.has_torch_function()){
605     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
606   }
607 
608   auto opt_memory_format = r.memoryformatOptional(0);
609   return THPVariable_to_type(self, ScalarType::Char, opt_memory_format);
610   END_HANDLE_TH_ERRORS
611 }
612 
THPVariable_double(PyObject * self,PyObject * args,PyObject * kwargs)613 static PyObject * THPVariable_double(PyObject* self, PyObject* args, PyObject* kwargs) {
614   HANDLE_TH_ERRORS
615   static PythonArgParser parser({
616     "double(*, MemoryFormat? memory_format=None)"
617   });
618   ParsedArgs<1> parsed_args;
619   auto r = parser.parse(self, args, kwargs, parsed_args);
620 
621   if(r.has_torch_function()){
622     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
623   }
624 
625   auto opt_memory_format = r.memoryformatOptional(0);
626   return THPVariable_to_type(self, ScalarType::Double, opt_memory_format);
627   END_HANDLE_TH_ERRORS
628 }
629 
THPVariable_float(PyObject * self,PyObject * args,PyObject * kwargs)630 static PyObject * THPVariable_float(PyObject* self, PyObject* args, PyObject* kwargs) {
631   HANDLE_TH_ERRORS
632   static PythonArgParser parser({
633     "float(*, MemoryFormat? memory_format=None)"
634   });
635   ParsedArgs<1> parsed_args;
636   auto r = parser.parse(self, args, kwargs, parsed_args);
637 
638   if(r.has_torch_function()){
639     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
640   }
641 
642   auto opt_memory_format = r.memoryformatOptional(0);
643   return THPVariable_to_type(self, ScalarType::Float, opt_memory_format);
644   END_HANDLE_TH_ERRORS
645 }
646 
THPVariable_cdouble(PyObject * self,PyObject * args,PyObject * kwargs)647 static PyObject * THPVariable_cdouble(PyObject* self, PyObject* args, PyObject* kwargs) {
648   HANDLE_TH_ERRORS
649   static PythonArgParser parser({
650     "cdouble(*, MemoryFormat? memory_format=None)"
651   });
652   ParsedArgs<1> parsed_args;
653   auto r = parser.parse(self, args, kwargs, parsed_args);
654 
655   if(r.has_torch_function()){
656     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
657   }
658 
659   auto opt_memory_format = r.memoryformatOptional(0);
660   return THPVariable_to_type(self, ScalarType::ComplexDouble, opt_memory_format);
661   END_HANDLE_TH_ERRORS
662 }
663 
THPVariable_cfloat(PyObject * self,PyObject * args,PyObject * kwargs)664 static PyObject * THPVariable_cfloat(PyObject* self, PyObject* args, PyObject* kwargs) {
665   HANDLE_TH_ERRORS
666   static PythonArgParser parser({
667     "cfloat(*, MemoryFormat? memory_format=None)"
668   });
669   ParsedArgs<1> parsed_args;
670   auto r = parser.parse(self, args, kwargs, parsed_args);
671 
672   if(r.has_torch_function()){
673     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
674   }
675 
676   auto opt_memory_format = r.memoryformatOptional(0);
677   return THPVariable_to_type(self, ScalarType::ComplexFloat, opt_memory_format);
678   END_HANDLE_TH_ERRORS
679 }
680 
THPVariable_half(PyObject * self,PyObject * args,PyObject * kwargs)681 static PyObject * THPVariable_half(PyObject* self, PyObject* args, PyObject* kwargs) {
682   HANDLE_TH_ERRORS
683   static PythonArgParser parser({
684     "half(*, MemoryFormat? memory_format=None)"
685   });
686   ParsedArgs<1> parsed_args;
687   auto r = parser.parse(self, args, kwargs, parsed_args);
688 
689   if(r.has_torch_function()){
690     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
691   }
692 
693   auto opt_memory_format = r.memoryformatOptional(0);
694   return THPVariable_to_type(self, ScalarType::Half, opt_memory_format);
695   END_HANDLE_TH_ERRORS
696 }
697 
THPVariable_int(PyObject * self,PyObject * args,PyObject * kwargs)698 static PyObject * THPVariable_int(PyObject* self, PyObject* args, PyObject* kwargs) {
699   HANDLE_TH_ERRORS
700   static PythonArgParser parser({
701     "int(*, MemoryFormat? memory_format=None)"
702   });
703   ParsedArgs<1> parsed_args;
704   auto r = parser.parse(self, args, kwargs, parsed_args);
705 
706   if(r.has_torch_function()){
707     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
708   }
709 
710   auto opt_memory_format = r.memoryformatOptional(0);
711   return THPVariable_to_type(self, ScalarType::Int, opt_memory_format);
712   END_HANDLE_TH_ERRORS
713 }
714 
THPVariable_long(PyObject * self,PyObject * args,PyObject * kwargs)715 static PyObject * THPVariable_long(PyObject* self, PyObject* args, PyObject* kwargs) {
716   HANDLE_TH_ERRORS
717   static PythonArgParser parser({
718     "long(*, MemoryFormat? memory_format=None)"
719   });
720   ParsedArgs<1> parsed_args;
721   auto r = parser.parse(self, args, kwargs, parsed_args);
722 
723   if(r.has_torch_function()){
724     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
725   }
726 
727   auto opt_memory_format = r.memoryformatOptional(0);
728   return THPVariable_to_type(self, ScalarType::Long, opt_memory_format);
729   END_HANDLE_TH_ERRORS
730 }
731 
THPVariable_short(PyObject * self,PyObject * args,PyObject * kwargs)732 static PyObject * THPVariable_short(PyObject* self, PyObject* args, PyObject* kwargs) {
733   HANDLE_TH_ERRORS
734   static PythonArgParser parser({
735     "short(*, MemoryFormat? memory_format=None)"
736   });
737   ParsedArgs<1> parsed_args;
738   auto r = parser.parse(self, args, kwargs, parsed_args);
739 
740   if(r.has_torch_function()){
741     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
742   }
743 
744   auto opt_memory_format = r.memoryformatOptional(0);
745   return THPVariable_to_type(self, ScalarType::Short, opt_memory_format);
746   END_HANDLE_TH_ERRORS
747 }
748 
THPVariable_bool(PyObject * self,PyObject * args,PyObject * kwargs)749 static PyObject * THPVariable_bool(PyObject* self, PyObject* args, PyObject* kwargs) {
750   HANDLE_TH_ERRORS
751   static PythonArgParser parser({
752     "bool(*, MemoryFormat? memory_format=None)"
753   });
754   ParsedArgs<1> parsed_args;
755   auto r = parser.parse(self, args, kwargs, parsed_args);
756 
757   if(r.has_torch_function()){
758     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
759   }
760 
761   auto opt_memory_format = r.memoryformatOptional(0);
762   return THPVariable_to_type(self, ScalarType::Bool, opt_memory_format);
763   END_HANDLE_TH_ERRORS
764 }
765 
THPVariable_bfloat16(PyObject * self,PyObject * args,PyObject * kwargs)766 static PyObject * THPVariable_bfloat16(PyObject* self, PyObject* args, PyObject* kwargs) {
767   HANDLE_TH_ERRORS
768   static PythonArgParser parser({
769     "bfloat16(*, MemoryFormat? memory_format=None)"
770   });
771   ParsedArgs<1> parsed_args;
772   auto r = parser.parse(self, args, kwargs, parsed_args);
773 
774   if(r.has_torch_function()){
775     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
776   }
777 
778   auto opt_memory_format = r.memoryformatOptional(0);
779   return THPVariable_to_type(self, ScalarType::BFloat16, opt_memory_format);
780   END_HANDLE_TH_ERRORS
781 }
782 
THPVariable_element_size(PyObject * self,PyObject * args)783 static PyObject * THPVariable_element_size(PyObject* self, PyObject* args)
784 {
785   HANDLE_TH_ERRORS
786   if (check_has_torch_function(self)) {
787     return handle_torch_function(self, "element_size", args);
788   }
789   auto& self_ = THPVariable_Unpack(self);
790   return THPUtils_packInt64(self_.element_size());
791   END_HANDLE_TH_ERRORS
792 }
793 
794 // implemented on the python object bc PyObjects not declarable in native_functions.yaml
795 // See: ATen/native/README.md for more context
THPVariable_numpy(PyObject * self,PyObject * args,PyObject * kwargs)796 static PyObject * THPVariable_numpy(PyObject* self, PyObject* args, PyObject* kwargs)
797 {
798   HANDLE_TH_ERRORS
799   static PythonArgParser parser({
800     "numpy(*, bool force=False)"
801   });
802   auto& self_ = THPVariable_Unpack(self);
803   ParsedArgs<1> parsed_args;
804   auto r = parser.parse(self, args, kwargs, parsed_args);
805 
806   if (r.has_torch_function()) {
807     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
808   }
809 
810   jit::tracer::warn("Converting a tensor to a NumPy array", jit::tracer::WARN_PYTHON_DATAFLOW);
811   return torch::utils::tensor_to_numpy(self_, r.toBool(0));
812   END_HANDLE_TH_ERRORS
813 }
814 
THPVariable_requires_grad_(PyObject * self,PyObject * args,PyObject * kwargs)815 static PyObject * THPVariable_requires_grad_(PyObject* self, PyObject* args, PyObject* kwargs)
816 {
817   HANDLE_TH_ERRORS
818   static PythonArgParser parser({
819     "requires_grad_(bool requires_grad=True)",
820   });
821   auto& self_ = THPVariable_Unpack(self);
822   ParsedArgs<1> parsed_args;
823   auto r = parser.parse(self, args, kwargs, parsed_args);
824 
825   if(r.has_torch_function()){
826     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
827   }
828 
829   // temporary hack to improve functorch UX.
830   const auto& functorch_tls = at::functorch::functorchTLSAccessor();
831   if (functorch_tls) {
832     functorch_tls->checkSupportsInplaceRequiresGrad();
833   }
834 
835   auto requires_grad = r.toBool(0);
836   // should we throw if requires_grad is true?  var.requires_grad = True throws here
837   // but it's nice to let this be a no-op.
838   if (!self_.is_leaf() && !requires_grad) {
839     throw std::runtime_error(autograd::utils::requires_grad_leaf_error(requires_grad));
840   }
841   if (requires_grad && ! isDifferentiableType(at::typeMetaToScalarType(self_.dtype()))) {
842     throw std::runtime_error("only Tensors of floating point dtype can require gradients");
843   }
844   self_.set_requires_grad(requires_grad);
845   return THPVariable_Wrap(self_);
846   END_HANDLE_TH_ERRORS
847 }
848 
dispatch_is_contiguous(const Tensor & self,MemoryFormat memory_format)849 inline bool dispatch_is_contiguous(const Tensor & self, MemoryFormat memory_format) {
850   return self.is_contiguous(memory_format);
851 }
852 
853 // implemented on the python object to avoid dispatch overhead
THPVariable_is_contiguous(PyObject * self_,PyObject * args,PyObject * kwargs)854 static PyObject * THPVariable_is_contiguous(PyObject* self_, PyObject* args, PyObject* kwargs)
855 {
856   HANDLE_TH_ERRORS
857   static PythonArgParser parser({
858     "is_contiguous(*, MemoryFormat memory_format=contiguous_format)",
859   });
860   ParsedArgs<1> parsed_args;
861   auto r = parser.parse(self_, args, kwargs, parsed_args);
862 
863   if(r.has_torch_function()){
864     return handle_torch_function(r, self_, args, kwargs, PyObject_Type(self_), "torch.Tensor");
865   }
866 
867   auto memory_format = r.memoryformat(0);
868   auto& self = THPVariable_Unpack(self_);
869   return wrap(dispatch_is_contiguous(self, memory_format));
870   END_HANDLE_TH_ERRORS
871 }
872 
873 // implemented on the python object to avoid dispatch overhead
THPVariable_item(PyObject * self,PyObject * args)874 static PyObject * THPVariable_item(PyObject* self, PyObject* args)
875 {
876   HANDLE_TH_ERRORS
877   if (check_has_torch_function(self)) {
878     return handle_torch_function(self, "item", args);
879   }
880   jit::tracer::warn("Converting a tensor to a Python number", jit::tracer::WARN_PYTHON_DATAFLOW);
881   auto& self_ = THPVariable_Unpack(self);
882   auto dispatch_item_ = [](const Tensor& self) -> at::Scalar {
883     pybind11::gil_scoped_release no_gil;
884     return self.item();
885   };
886   return py::cast(dispatch_item_(self_)).release().ptr();
887   END_HANDLE_TH_ERRORS
888 }
889 
890 // implemented on the python object bc no support for first class functions in native_functions.yaml
891 // See: ATen/native/README.md for more context
THPVariable_map_(PyObject * self,PyObject * args,PyObject * kwargs)892 static PyObject * THPVariable_map_(PyObject* self, PyObject* args, PyObject* kwargs)
893 {
894   HANDLE_TH_ERRORS
895   static PythonArgParser parser({ "map_(Tensor other, PyObject* callable)" });
896   auto& self_ = THPVariable_Unpack(self);
897   ParsedArgs<2> parsed_args;
898   auto r = parser.parse(self, args, kwargs, parsed_args);
899 
900   if(r.has_torch_function()){
901     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
902   }
903 
904   Variable other = r.tensor(0);
905   if (self_.requires_grad() || other.requires_grad()) {
906     throw std::runtime_error(
907         "Can't call map_() on Variable that requires grad. Use "
908         "var.detach().map_() instead.");
909   }
910   TORCH_CHECK(
911       !self_.unsafeGetTensorImpl()->is_python_dispatch() && !other.unsafeGetTensorImpl()->is_python_dispatch(),
912       ".map_ is not supported for tensor subclasses.");
913 
914   return THPVariable_Wrap(torch::utils::map_(self_, other, r.pyobject(1)));
915   END_HANDLE_TH_ERRORS
916 }
917 
918 // implemented on the python object bc no support for first class functions in native_functions.yaml
919 // See: ATen/native/README.md for more context
THPVariable_map2_(PyObject * self,PyObject * args,PyObject * kwargs)920 static PyObject * THPVariable_map2_(PyObject* self, PyObject* args, PyObject* kwargs)
921 {
922   HANDLE_TH_ERRORS
923   static PythonArgParser parser({ "map2_(Tensor x, Tensor y, PyObject* callable)" });
924   auto& self_ = THPVariable_Unpack(self);
925   ParsedArgs<3> parsed_args;
926   auto r = parser.parse(self, args, kwargs, parsed_args);
927 
928   if(r.has_torch_function()){
929     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
930   }
931 
932   Variable x = r.tensor(0);
933   Variable y = r.tensor(1);
934   if (self_.requires_grad() || x.requires_grad() || y.requires_grad()) {
935     throw std::runtime_error(
936         "Can't call map2_() on Variable that requires grad. Use "
937         "var.detach().map2_() instead.");
938   }
939   TORCH_CHECK(
940       !x.unsafeGetTensorImpl()->is_python_dispatch() && !y.unsafeGetTensorImpl()->is_python_dispatch(),
941       ".map2_ is not supported for tensor subclasses.");
942   return THPVariable_Wrap(torch::utils::map2_(self_, x, y, r.pyobject(2)));
943   END_HANDLE_TH_ERRORS
944 }
945 
THPVariable_new(PyObject * self,PyObject * args,PyObject * kwargs)946 static PyObject * THPVariable_new(PyObject* self, PyObject* args, PyObject* kwargs)
947 {
948   HANDLE_TH_ERRORS
949   if (check_has_torch_function(self)) {
950     return handle_torch_function(self, "new", args, kwargs);
951   }
952   auto& self_ = THPVariable_Unpack(self);
953   OptionalDeviceGuard device_guard(device_of(self_));
954   return THPVariable_Wrap(torch::utils::legacy_tensor_new(legacyExtractDispatchKey(self_), self_.scalar_type(), args, kwargs));
955   END_HANDLE_TH_ERRORS
956 }
957 
THPVariable_new_tensor(PyObject * self,PyObject * args,PyObject * kwargs)958 static PyObject * THPVariable_new_tensor(PyObject* self, PyObject* args, PyObject* kwargs)
959 {
960   HANDLE_TH_ERRORS
961   if (check_has_torch_function(self)) {
962     return handle_torch_function(self, "new_tensor", args, kwargs);
963   }
964   auto& self_ = THPVariable_Unpack(self);
965   OptionalDeviceGuard device_guard(device_of(self_));
966   return THPVariable_Wrap(torch::utils::new_tensor(legacyExtractDispatchKey(self_), self_.scalar_type(), args, kwargs));
967   END_HANDLE_TH_ERRORS
968 }
969 
THPVariable_storage(PyObject * self,PyObject * arg)970 static PyObject * THPVariable_storage(PyObject* self, PyObject* arg)
971 {
972   HANDLE_TH_ERRORS
973   if (check_has_torch_function(self)) {
974     return handle_torch_function(self, "untyped_storage");
975   }
976   auto& self_ = THPVariable_Unpack(self);
977   return createPyObject(self_.storage());
978   END_HANDLE_TH_ERRORS
979 }
980 
THPVariable_to(PyObject * self,PyObject * args,PyObject * kwargs)981 static PyObject * THPVariable_to(PyObject* self, PyObject* args, PyObject* kwargs)
982 {
983   HANDLE_TH_ERRORS
984   static PythonArgParser parser({
985     "to(Device device=None, ScalarType dtype=None, bool non_blocking=False, bool copy=False, *, MemoryFormat? memory_format=None)",
986     "to(ScalarType dtype, bool non_blocking=False, bool copy=False, *, MemoryFormat? memory_format=None)",
987     "to(Tensor tensor, bool non_blocking=False, bool copy=False, *, MemoryFormat? memory_format=None)",
988   });
989   ParsedArgs<5> parsed_args;
990   auto r = parser.parse(self, args, kwargs, parsed_args);
991   if (r.has_torch_function()) {
992     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
993   }
994   auto parsed = parse_to_conversion(r, /*allow_copy*/ true);
995   auto& device = std::get<0>(parsed);
996   auto& scalarType = std::get<1>(parsed);
997   auto non_blocking = std::get<2>(parsed);
998   auto copy = std::get<3>(parsed);
999   auto opt_memory_format = std::get<4>(parsed);
1000   auto& self_ = THPVariable_Unpack(self);
1001   torch::utils::maybe_initialize_device(device);
1002   if (!device && !scalarType && !copy && !opt_memory_format.has_value()) {
1003     Py_INCREF(self);
1004     return self;
1005   } else if (!device && !scalarType) {
1006     return THPVariable_Wrap(
1007         dispatch_to(self_, non_blocking, copy, opt_memory_format));
1008   } else if (!device) {
1009     return THPVariable_Wrap(dispatch_to(self_, *scalarType, non_blocking, copy, opt_memory_format));
1010   } else if (!scalarType) {
1011     return THPVariable_Wrap(dispatch_to(self_, *device, non_blocking, copy, opt_memory_format));
1012   } else {
1013     return THPVariable_Wrap(dispatch_to(self_, *device, *scalarType, non_blocking, copy, opt_memory_format));
1014   }
1015   Py_RETURN_NONE;
1016   END_HANDLE_TH_ERRORS
1017 }
1018 
1019 // implemented on the python object b/c arbitrarily nested list not declarable in native_functions.yaml
1020 // See: ATen/native/README.md for more context
THPVariable_tolist(PyObject * self,PyObject * args)1021 static PyObject * THPVariable_tolist(PyObject* self, PyObject* args)
1022 {
1023   HANDLE_TH_ERRORS
1024   if (check_has_torch_function(self)) {
1025     return handle_torch_function(self, "tolist", args);
1026   }
1027   jit::tracer::warn("Converting a tensor to a Python list", jit::tracer::WARN_PYTHON_DATAFLOW);
1028   auto self_ = THPVariable_Unpack(self);
1029   return torch::utils::tensor_to_list(self_);
1030   END_HANDLE_TH_ERRORS
1031 }
1032 
THPVariable_type(PyObject * self,PyObject * args,PyObject * kwargs)1033 static PyObject * THPVariable_type(PyObject* self, PyObject* args, PyObject* kwargs)
1034 {
1035   HANDLE_TH_ERRORS
1036   static PythonArgParser parser({
1037     "type(PyObject* dtype=None, bool non_blocking=False, *, MemoryFormat? memory_format=None)",
1038     "type(PyObject* dtype=None, bool async=False, *, MemoryFormat? memory_format=None)|deprecated"
1039   });
1040   auto& self_ = THPVariable_Unpack(self);
1041   ParsedArgs<3> parsed_args;
1042   auto r = parser.parse(self, args, kwargs, parsed_args);
1043 
1044   if(r.has_torch_function()){
1045     return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
1046   }
1047 
1048   if (r.isNone(0)) {
1049     return THPUtils_packString(torch::utils::options_to_string(self_.options()));
1050   }
1051   auto obj = r.pyobject(0);
1052   auto opt_memory_format = r.memoryformatOptional(2);
1053   std::string type_name;
1054   bool is_dtype = false;
1055   if (PyType_Check(obj)) {
1056     if (obj == THPVariableClass) {
1057       type_name = "torch.Tensor";
1058     } else {
1059       type_name = ((PyTypeObject*)obj)->tp_name;
1060     }
1061   } else if (THPUtils_checkString(obj)) {
1062     type_name = THPUtils_unpackString(obj);
1063   } else if (THPDtype_Check(obj)) {
1064     is_dtype = true;
1065   } else {
1066     throw TypeError("dtype must be a type, str, or dtype object");
1067   }
1068   ScalarType scalar_type;
1069   Device device = self_.device();
1070   if (is_dtype) {
1071     scalar_type = r.scalartype(0);
1072     return THPVariable_Wrap(dispatch_to(self_, scalar_type, /*non_blocking=*/ r.toBool(1), /*copy=*/ false, opt_memory_format));
1073   }
1074   at::TensorOptions options = torch::utils::options_from_string(type_name);
1075   scalar_type = at::typeMetaToScalarType(options.dtype());
1076   auto device_type = options.device().type();
1077   if (device_type != device.type()) {
1078     device = at::Device(device_type);
1079   }
1080   torch::utils::maybe_initialize_device(device);
1081   return THPVariable_Wrap(dispatch_to(self_, device, scalar_type, /*non_blocking=*/ r.toBool(1), /*copy=*/ false, opt_memory_format));
1082   END_HANDLE_TH_ERRORS
1083 }
1084 
1085 // generated methods start here
1086 
1087 ${py_methods}
1088 
1089 static PyObject * THPVariable_bool_scalar(PyObject* self, PyObject* args) {
1090   if (check_has_torch_function(self)) {
1091     HANDLE_TH_ERRORS
1092     return handle_torch_function(self, "__bool__", args);
1093     END_HANDLE_TH_ERRORS
1094   }
1095   jit::tracer::warn("Converting a tensor to a Python boolean", jit::tracer::WARN_PYTHON_DATAFLOW);
1096   return THPVariable_is_nonzero(self, args);
1097 }
1098 
THPVariable___eq__(PyObject * self_,PyObject * args,PyObject * kwargs)1099 static PyObject * THPVariable___eq__(PyObject* self_, PyObject* args, PyObject* kwargs)
1100 {
1101   HANDLE_TH_ERRORS
1102 #ifdef USE_NUMPY
1103   if (torch::utils::is_numpy_available()) {
1104     static PythonArgParser parser({
1105       "__eq__(PyObject* other)",
1106     }, /*traceable=*/true);
1107 
1108     ParsedArgs<1> parsed_args;
1109     auto _r = parser.parse(self_, args, kwargs, parsed_args);
1110     if(_r.has_torch_function()) {
1111       return handle_torch_function(_r, self_, args, kwargs, THPVariableClass, "torch.Tensor");
1112     }
1113     switch (_r.idx) {
1114       case 0: {
1115         auto other = _r.pyobject(0);
1116         if (PyArray_Check(other)) {
1117           auto other_tensor = torch::utils::tensor_from_numpy(other);
1118           auto dispatch_eq = [](const at::Tensor & self, const at::Tensor & other) -> at::Tensor {
1119             pybind11::gil_scoped_release no_gil;
1120             return self.eq(other);
1121           };
1122           const Tensor& self = THPVariable_Unpack(self_);
1123           return wrap(dispatch_eq(self, other_tensor));
1124         }
1125       }
1126     }
1127   }
1128 #endif
1129   return THPVariable_eq(self_, args, kwargs);
1130   Py_RETURN_NONE;
1131   END_HANDLE_TH_ERRORS
1132 }
1133 
1134 // Wrapper converts a raised TypeError into returning NotImplemented
1135 // Used to implement binary arithmetic operators
1136 template <PyObject* (*Func)(PyObject*, PyObject*, PyObject*)>
TypeError_to_NotImplemented_(PyObject * self,PyObject * args,PyObject * kwargs)1137 static PyObject * TypeError_to_NotImplemented_(PyObject* self, PyObject* args, PyObject* kwargs) {
1138 
1139   PyObject* ret = Func(self, args, kwargs);
1140   if (!ret && PyErr_ExceptionMatches(PyExc_TypeError)) {
1141     PyErr_Clear();
1142     Py_INCREF(Py_NotImplemented);
1143     ret = Py_NotImplemented;
1144   }
1145   return ret;
1146 }
1147 
1148 // set_ has to be defined in the template because the c10::Storage object
1149 // does not have a type, and we need to make sure the Python storage object's
1150 // type matches the tensor's type
THPVariable_set_(PyObject * self_,PyObject * args,PyObject * kwargs)1151 static PyObject* THPVariable_set_(
1152     PyObject* self_,
1153     PyObject* args,
1154     PyObject* kwargs) {
1155   HANDLE_TH_ERRORS
1156   const Tensor& self = THPVariable_Unpack(self_);
1157   static PythonArgParser parser(
1158       {
1159           "set_()",
1160           "set_(Storage source)",
1161           "set_(Storage source, SymInt storage_offset, SymIntArrayRef size, SymIntArrayRef stride=None)",
1162           "set_(Tensor source)",
1163           "set_(Tensor source, SymInt storage_offset, SymIntArrayRef size, SymIntArrayRef stride=None)",
1164       },
1165       /*traceable=*/false);
1166 
1167   ParsedArgs<4> parsed_args;
1168   auto _r = parser.parse(args, kwargs, parsed_args);
1169 
1170   switch (_r.idx) {
1171     case 0: {
1172       // aten::set_(Tensor(a!) self) -> Tensor(a!)
1173       auto dispatch_set_ = [](const Tensor& self) -> Tensor {
1174         pybind11::gil_scoped_release no_gil;
1175         return self.set_();
1176       };
1177       return wrap(dispatch_set_(self));
1178     }
1179     case 1: {
1180       // aten::set_.source_Storage(Tensor(a!) self, Storage source) ->
1181       // Tensor(a!)
1182       at::ScalarType storage_scalar_type;
1183       bool is_typed_storage = true;
1184       at::Storage storage = _r.storage(0, storage_scalar_type, is_typed_storage);
1185       TORCH_CHECK(storage_scalar_type == self.dtype() || !is_typed_storage,
1186         "Expected a Storage of type ", self.dtype(),
1187         " or an UntypedStorage, but got type ", storage_scalar_type,
1188         " for argument 1 'storage'");
1189       auto dispatch_set_ = [](const Tensor& self, Storage source) -> Tensor {
1190         pybind11::gil_scoped_release no_gil;
1191         return self.set_(source);
1192       };
1193       return wrap(dispatch_set_(self, storage));
1194     }
1195     case 2: {
1196       // aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage
1197       // source, int storage_offset, int[] size, int[] stride=[]) -> Tensor(a!)
1198       at::ScalarType storage_scalar_type;
1199       bool is_typed_storage = true;
1200       at::Storage storage = _r.storage(0, storage_scalar_type, is_typed_storage);
1201       TORCH_CHECK(storage_scalar_type == self.dtype() || !is_typed_storage,
1202         "Expected a Storage of type ", self.dtype(),
1203         " or an UntypedStorage, but got type ", storage_scalar_type,
1204         " for argument 1 'storage'");
1205       auto dispatch_set_ = [](const Tensor& self,
1206                               Storage source,
1207                               c10::SymInt storage_offset,
1208                               c10::SymIntArrayRef size,
1209                               c10::SymIntArrayRef stride) -> Tensor {
1210         pybind11::gil_scoped_release no_gil;
1211         return self.set__symint(source, storage_offset, size, stride);
1212       };
1213       return wrap(dispatch_set_(
1214           self, storage, _r.toSymInt(1), _r.symintlist(2), _r.symintlist(3)));
1215     }
1216     case 3: {
1217       // aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!)
1218       auto dispatch_set_ = [](const Tensor& self, const Tensor& source) -> Tensor {
1219         TORCH_CHECK(source.dtype() == self.dtype(), "Could not set tensor of type ", source.dtype(), " to a tensor of type ", self.dtype());
1220         pybind11::gil_scoped_release no_gil;
1221         return self.set_(source);
1222       };
1223       return wrap(dispatch_set_(self, _r.tensor(0)));
1224     }
1225     case 4: {
1226       // aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor
1227       // source, int storage_offset, int[] size, int[] stride=[]) -> Tensor(a!)
1228       at::Tensor storage = _r.tensor(0);
1229       auto dispatch_set_ = [](const Tensor& self,
1230                               const Tensor& source,
1231                               c10::SymInt storage_offset,
1232                               c10::SymIntArrayRef size,
1233                               c10::SymIntArrayRef stride) -> Tensor {
1234         pybind11::gil_scoped_release no_gil;
1235         return self.set__symint(source, storage_offset, size, stride);
1236       };
1237       return wrap(dispatch_set_(
1238           self, storage, _r.toSymInt(1), _r.symintlist(2), _r.symintlist(3)));
1239     }
1240   }
1241   Py_RETURN_NONE;
1242   END_HANDLE_TH_ERRORS
1243 }
1244 
1245 // XXX: ops that are bound here are not exposed to the C++ api nor the JIT.
1246 // Any new ops added here should be accompanied with a comment why they are not
1247 // being registered through native_functions.yaml, and be tagged cpp / JIT
1248 PyMethodDef variable_methods[] = {
1249   // These magic methods are all implemented on python object to wrap NotImplementedError
1250   {"__add__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_add>), METH_VARARGS | METH_KEYWORDS, NULL},
1251   {"__radd__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_add>), METH_VARARGS | METH_KEYWORDS, NULL},
1252   {"__iadd__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_add_>), METH_VARARGS | METH_KEYWORDS, NULL},
1253   {"__rmul__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_mul>), METH_VARARGS | METH_KEYWORDS, NULL},
1254   {"__mul__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_mul>), METH_VARARGS | METH_KEYWORDS, NULL},
1255   {"__imul__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_mul_>), METH_VARARGS | METH_KEYWORDS, NULL},
1256   {"__sub__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_sub>), METH_VARARGS | METH_KEYWORDS, NULL},
1257   {"__isub__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_sub_>), METH_VARARGS | METH_KEYWORDS, NULL},
1258   {"__div__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_div>), METH_VARARGS | METH_KEYWORDS, NULL},
1259   {"__truediv__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_div>), METH_VARARGS | METH_KEYWORDS, NULL},
1260   {"__floordiv__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_floor_divide>), METH_VARARGS | METH_KEYWORDS, NULL},
1261   {"__idiv__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_div_>), METH_VARARGS | METH_KEYWORDS, NULL},
1262   {"__ifloordiv__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_floor_divide_>), METH_VARARGS | METH_KEYWORDS, NULL},
1263   {"__mod__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_remainder>), METH_VARARGS | METH_KEYWORDS, NULL},
1264   {"__imod__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_remainder_>), METH_VARARGS | METH_KEYWORDS, NULL},
1265   {"__eq__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable___eq__>), METH_VARARGS | METH_KEYWORDS, NULL},
1266   {"__ne__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_ne>), METH_VARARGS | METH_KEYWORDS, NULL},
1267   {"__lt__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_lt>), METH_VARARGS | METH_KEYWORDS, NULL},
1268   {"__le__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_le>), METH_VARARGS | METH_KEYWORDS, NULL},
1269   {"__gt__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_gt>), METH_VARARGS | METH_KEYWORDS, NULL},
1270   {"__ge__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_ge>), METH_VARARGS | METH_KEYWORDS, NULL},
1271   {"__rand__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_bitwise_and>), METH_VARARGS | METH_KEYWORDS, NULL},
1272   {"__ror__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_bitwise_or>), METH_VARARGS | METH_KEYWORDS, NULL},
1273   {"__rxor__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_bitwise_xor>), METH_VARARGS | METH_KEYWORDS, NULL},
1274   {"__bool__", THPVariable_bool_scalar, METH_NOARGS, NULL},
1275   {"__float__", THPVariable_float_scalar, METH_NOARGS, NULL},
1276   {"__complex__", THPVariable_complex_scalar, METH_NOARGS, NULL},
1277   {"__int__", THPVariable_integral_scalar, METH_NOARGS, NULL},
1278   {"__long__", THPVariable_integral_scalar, METH_NOARGS, NULL},
1279   {"__index__", THPVariable_index_scalar, METH_NOARGS, NULL},
1280   {"__nonzero__", THPVariable_bool_scalar, METH_NOARGS, NULL},
1281   {"__invert__", THPVariable_invert, METH_NOARGS, NULL},
1282   {"__matmul__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_matmul>), METH_VARARGS | METH_KEYWORDS, NULL},
1283   {"_is_view", THPVariable__is_view, METH_NOARGS, NULL},
1284   {"apply_", THPVariable_apply_, METH_O, NULL},
1285   {"bfloat16", castPyCFunctionWithKeywords(THPVariable_bfloat16), METH_VARARGS | METH_KEYWORDS, NULL},
1286   {"byte", castPyCFunctionWithKeywords(THPVariable_byte), METH_VARARGS | METH_KEYWORDS, NULL},
1287   {"char", castPyCFunctionWithKeywords(THPVariable_char), METH_VARARGS | METH_KEYWORDS, NULL},
1288   {"contiguous", castPyCFunctionWithKeywords(THPVariable_contiguous), METH_VARARGS | METH_KEYWORDS, NULL},
1289   {"copy_", castPyCFunctionWithKeywords(THPVariable_copy_), METH_VARARGS | METH_KEYWORDS, NULL},
1290   {"cpu", castPyCFunctionWithKeywords(THPVariable_cpu), METH_VARARGS | METH_KEYWORDS, NULL},
1291   {"cuda", castPyCFunctionWithKeywords(THPVariable_cuda), METH_VARARGS | METH_KEYWORDS, NULL},
1292   {"mtia", castPyCFunctionWithKeywords(THPVariable_mtia), METH_VARARGS | METH_KEYWORDS, NULL},
1293   {"xpu", castPyCFunctionWithKeywords(THPVariable_xpu), METH_VARARGS | METH_KEYWORDS, NULL},
1294   {"ipu", castPyCFunctionWithKeywords(THPVariable_ipu), METH_VARARGS | METH_KEYWORDS, NULL},
1295   {"data_ptr", THPVariable_data_ptr, METH_NOARGS, NULL},
1296   {"dim", THPVariable_dim, METH_NOARGS, NULL},
1297   {"has_names", THPVariable_has_names, METH_NOARGS, NULL},
1298   {"double", castPyCFunctionWithKeywords(THPVariable_double), METH_VARARGS | METH_KEYWORDS, NULL},
1299   {"cdouble", castPyCFunctionWithKeywords(THPVariable_cdouble), METH_VARARGS | METH_KEYWORDS, NULL},
1300   {"element_size", THPVariable_element_size, METH_NOARGS, NULL},
1301   {"float", castPyCFunctionWithKeywords(THPVariable_float), METH_VARARGS | METH_KEYWORDS, NULL},
1302   {"cfloat", castPyCFunctionWithKeywords(THPVariable_cfloat), METH_VARARGS | METH_KEYWORDS, NULL},
1303   {"get_device", THPVariable_get_device, METH_NOARGS, NULL},
1304   {"bool", castPyCFunctionWithKeywords(THPVariable_bool), METH_VARARGS | METH_KEYWORDS, NULL},
1305   {"half", castPyCFunctionWithKeywords(THPVariable_half), METH_VARARGS | METH_KEYWORDS, NULL},
1306   {"int", castPyCFunctionWithKeywords(THPVariable_int), METH_VARARGS | METH_KEYWORDS, NULL},
1307   {"is_contiguous", castPyCFunctionWithKeywords(THPVariable_is_contiguous), METH_VARARGS | METH_KEYWORDS, NULL},
1308   {"item", THPVariable_item, METH_NOARGS, NULL},
1309   {"long", castPyCFunctionWithKeywords(THPVariable_long), METH_VARARGS | METH_KEYWORDS, NULL},
1310   {"map_", castPyCFunctionWithKeywords(THPVariable_map_), METH_VARARGS | METH_KEYWORDS, NULL},
1311   {"map2_", castPyCFunctionWithKeywords(THPVariable_map2_), METH_VARARGS | METH_KEYWORDS, NULL},
1312   {"ndimension", THPVariable_dim, METH_NOARGS, NULL},
1313   {"nelement", THPVariable_numel, METH_NOARGS, NULL},
1314   {"new", castPyCFunctionWithKeywords(THPVariable_new), METH_VARARGS | METH_KEYWORDS, NULL},
1315   {"new_tensor", castPyCFunctionWithKeywords(THPVariable_new_tensor), METH_VARARGS | METH_KEYWORDS, NULL},
1316   {"nonzero", castPyCFunctionWithKeywords(THPVariable_nonzero), METH_VARARGS | METH_KEYWORDS, NULL},
1317   {"numel", THPVariable_numel, METH_NOARGS, NULL},
1318   {"numpy", castPyCFunctionWithKeywords(THPVariable_numpy), METH_VARARGS | METH_KEYWORDS, NULL},
1319   {"requires_grad_", castPyCFunctionWithKeywords(THPVariable_requires_grad_), METH_VARARGS | METH_KEYWORDS, NULL},
1320   {"set_", castPyCFunctionWithKeywords(THPVariable_set_), METH_VARARGS | METH_KEYWORDS, NULL},
1321   {"short", castPyCFunctionWithKeywords(THPVariable_short), METH_VARARGS | METH_KEYWORDS, NULL},
1322   {"size", castPyCFunctionWithKeywords(THPVariable_size), METH_VARARGS | METH_KEYWORDS, NULL},
1323   {"untyped_storage", THPVariable_storage, METH_NOARGS, NULL},
1324   {"storage_offset", THPVariable_storage_offset, METH_NOARGS, NULL},
1325   {"stride", castPyCFunctionWithKeywords(THPVariable_stride), METH_VARARGS | METH_KEYWORDS, NULL},
1326   {"to", castPyCFunctionWithKeywords(THPVariable_to), METH_VARARGS | METH_KEYWORDS, NULL},
1327   {"tolist", THPVariable_tolist, METH_NOARGS, NULL},
1328   {"type", castPyCFunctionWithKeywords(THPVariable_type), METH_VARARGS | METH_KEYWORDS, NULL},
1329   ${py_method_defs}
1330   {NULL}
1331 };
1332 
1333 } // namespace torch::autograd
1334