1 #pragma once
2
3 #ifdef TORCH_ASSERT_NO_OPERATORS
4 #error This change adds a dependency on native_functions.yaml, \
5 meaning the file will need to be re-compiled every time an operator \
6 is changed or added. Consider if your change would be better placed in \
7 another file, or if a more specific header might achieve the same goal. \
8 See NOTE: [Tensor vs. TensorBase]
9 #endif
10
11 #include <c10/core/Device.h>
12 #include <c10/core/Layout.h>
13 #include <c10/core/MemoryFormat.h>
14 #include <c10/core/QScheme.h>
15 #include <c10/core/Stream.h>
16 #include <c10/core/Scalar.h>
17 #include <c10/core/ScalarType.h>
18 #include <c10/core/ScalarTypeToTypeMeta.h>
19 #include <c10/core/Storage.h>
20 #include <c10/core/TensorImpl.h>
21 #include <c10/core/UndefinedTensorImpl.h>
22 #include <c10/core/WrapDimMinimal.h>
23 #include <c10/util/Exception.h>
24 #include <c10/util/ExclusivelyOwned.h>
25 #include <c10/util/Deprecated.h>
26 #include <c10/util/MaybeOwned.h>
27 #include <optional>
28 #include <c10/util/OptionalArrayRef.h>
29 #include <c10/util/intrusive_ptr.h>
30 #include <c10/macros/Export.h>
31 #include <ATen/core/CheckMemoryFormat.h>
32 #include <ATen/core/DeprecatedTypePropertiesRegistry.h>
33 #include <ATen/core/DeprecatedTypeProperties.h>
34 #include <ATen/core/NamedTensor.h>
35 #include <ATen/core/QuantizerBase.h>
36 #include <c10/core/SymInt.h>
37 #include <ATen/core/TensorAccessor.h>
38 #include <ATen/core/TensorBase.h>
39
40
41 #include <ATen/MethodOperators.h>
42
43 namespace c10{
44 template<class T> class List;
45 template<class T> class IListRef;
46 }
47 namespace at {
48 struct Generator;
49 struct Type;
50 class DeprecatedTypeProperties;
51 class Tensor;
52 } // namespace at
53 namespace at {
54 namespace indexing {
55 struct TensorIndex;
56 } // namespace indexing
57 } // namespace at
58
59 namespace torch { namespace autograd {
60
61 struct Node;
62
63 }} // namespace torch::autograd
64
65 namespace at {
66
67 class OptionalTensorRef;
68 class TensorRef;
69 class Tensor;
70 using TensorList = ArrayRef<Tensor>;
71 using ITensorList = c10::IListRef<Tensor>;
72
73 using Stream = c10::Stream;
74
75 // Tensor is a "generic" object holding a pointer to the underlying TensorImpl object, which
76 // has an embedded reference count. In this way, Tensor is similar to boost::intrusive_ptr.
77 //
78 // For example:
79 //
80 // void func(Tensor a) {
81 // Tensor b = a;
82 // ...
83 // }
84 //
85 // In this example, when we say Tensor b = a, we are creating a new object that points to the
86 // same underlying TensorImpl, and bumps its reference count. When b goes out of scope, the
87 // destructor decrements the reference count by calling release() on the TensorImpl it points to.
88 // The existing constructors, operator overloads, etc. take care to implement the correct semantics.
89 //
90 // Note that Tensor can also be NULL, i.e. it is not associated with any underlying TensorImpl, and
91 // special care must be taken to handle this.
92 class TORCH_API Tensor: public TensorBase {
93 protected:
94 // Create a Tensor with a +0 reference count. Special care must be
95 // taken to avoid decrementing this reference count at destruction
96 // time. Intended to support MaybeOwnedTraits<Tensor>.
Tensor(unsafe_borrow_t,const TensorBase & rhs)97 explicit Tensor(unsafe_borrow_t, const TensorBase& rhs): TensorBase(unsafe_borrow_t{}, rhs) {}
98 friend MaybeOwnedTraits<Tensor>;
99 friend OptionalTensorRef;
100 friend TensorRef;
101
102 public:
103 Tensor() = default;
104 // This constructor should not be used by end users and is an implementation
105 // detail invoked by autogenerated code.
Tensor(c10::intrusive_ptr<TensorImpl,UndefinedTensorImpl> tensor_impl)106 explicit Tensor(
107 c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> tensor_impl)
108 : TensorBase(std::move(tensor_impl)) {}
109 Tensor(const Tensor &tensor) = default;
110 Tensor(Tensor &&tensor) = default;
111
112 // Implicitly move-constructible from TensorBase, but must be explicit to increase refcount
Tensor(const TensorBase & base)113 explicit Tensor(const TensorBase &base): TensorBase(base) {}
Tensor(TensorBase && base)114 /*implicit*/ Tensor(TensorBase &&base): TensorBase(std::move(base)) {}
115
116 // Creates a new wrapper from TensorImpl. Intentionally a free method because
117 // it should be used with care. Checks necessary invariants
wrap_tensor_impl(c10::intrusive_ptr<TensorImpl,UndefinedTensorImpl> tensor_impl)118 static Tensor wrap_tensor_impl(
119 c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> tensor_impl) {
120 return TensorBase::wrap_tensor_impl(std::move(tensor_impl));
121 }
122
123 Tensor contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) const {
124 return TensorBase::contiguous(memory_format);
125 }
126
conj()127 Tensor conj() const {
128 if (!this->is_complex()) {
129 return *this;
130 }
131
132 switch (this->layout()) {
133 case at::kSparse:
134 case at::kSparseCsr:
135 case at::kSparseCsc:
136 case at::kSparseBsr:
137 case at::kSparseBsc:
138 return this->conj_physical();
139 default:
140 return this->_conj();
141 }
142 }
143
144 // Aliased by Dimname overloads, so need explicit using
145 using TensorBase::size;
146 using TensorBase::sym_size;
147 using TensorBase::stride;
148
149 /// Should be used if *this can reasonably be expected to be contiguous and
150 /// performance is important.
151 /// Compared to contiguous, it saves a reference count
152 /// increment/decrement if *this is already contiguous, at the cost
153 /// in all cases of an extra pointer of stack usage, an extra branch
154 /// to access, and an extra branch at destruction time.
155 c10::MaybeOwned<Tensor> expect_contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) const &;
156
157 // Use .contiguous() instead. Trying to borrow from a prvalue Tensor
158 // will only lead to trouble and dangling references.
159 c10::MaybeOwned<Tensor> expect_contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) && = delete;
160
161 // The following overloads are very intruiging. Consider the following
162 // program:
163 //
164 // x[1] = 3;
165 //
166 // We would expect that the first entry of x is written to 3. But how can we
167 // actually achieve this? x[1] evaluates to a tensor...
168 //
169 // The answer is, using a ref-qualifier. x[1] is an rvalue, which cannot be
170 // (profitably) assigned to in the traditional sense, so we overload
171 // assignment to mean, "Actually, copy 3 into the tensor data." This is done
172 // with an rvalue-reference ref-qualified overload (the methods with && at the
173 // end of their type.)
174 //
175 // There's one more fly in the ointment: We also want
176 //
177 // Tensor x = y;
178 //
179 // to work, and we want it NOT to copy. So we need a traditional operator=
180 // overload. But we MUST specify a mutable lvalue ref-qualifier, to
181 // disambiguate the traditional overload from the rvalue-reference
182 // ref-qualified overload. Otherwise, it will be ambiguous, because
183 // a non ref-qualified method is eligible for all situations.
184
185 // Unfortunately, we have to write these constructors out manually
186 // to work around an MSVC bug:
187 // error C2580: 'at::Tensor &at::Tensor::operator =(const at::Tensor &) &':
188 // multiple versions of a defaulted special member functions are not allowed
189 // Tensor& operator=(const Tensor&) & = default;
190 // Tensor& operator=(Tensor&&) & = default;
191
192 // Also MSVC will wrongly issue the following warning with the aforementioned fix
193 // warning C4522: 'at::Tensor': multiple assignment operators specified
194 // Let's just skip the warning.
195 //
196 // TODO: temporarily disabled
197
198 Tensor& operator=(const TensorBase& x) & {
199 impl_ = x.getIntrusivePtr();
200 return *this;
201 }
202 Tensor& operator=(TensorBase&& x) & noexcept {
203 impl_ = x.unsafeReleaseIntrusivePtr();
204 return *this;
205 }
206
207 Tensor& operator=(const Tensor &x) & {
208 return operator=(static_cast<const TensorBase&>(x));
209 }
210 Tensor& operator=(Tensor &&x) & noexcept {
211 return operator=(static_cast<TensorBase&&>(x));
212 }
213
214 Tensor& operator=(const Scalar &v) && {
215 return fill_(v);
216 }
217 Tensor& operator=(const Tensor &rhs) && {
218 return copy_(rhs);
219 }
220 Tensor& operator=(Tensor&& rhs) && {
221 return copy_(rhs);
222 }
223
224 C10_DEPRECATED_MESSAGE("Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device().")
type()225 DeprecatedTypeProperties & type() const {
226 return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
227 dispatchKeyToBackend(legacyExtractDispatchKey(key_set())),
228 scalar_type());
229 }
230
toType(ScalarType t)231 Tensor toType(ScalarType t) const {
232 return to(options().dtype(t), /*non_blocking*/ false, /*copy*/ false);
233 }
234
235 // TODO: Deprecate me
toBackend(Backend b)236 Tensor toBackend(Backend b) const {
237 return to(options().device(backendToDeviceType(b)).layout(layout_from_backend(b)), /*non_blocking*/ false, /*copy*/ false);
238 }
239
240 C10_DEPRECATED_MESSAGE("Tensor.is_variable() is deprecated; everything is a variable now. (If you want to assert that variable has been appropriately handled already, use at::impl::variable_excluded_from_dispatch())")
is_variable()241 bool is_variable() const noexcept {
242 return !at::impl::variable_excluded_from_dispatch();
243 }
244
245 template<typename T>
246 C10_DEPRECATED_MESSAGE("Tensor.data<T>() is deprecated. Please use Tensor.data_ptr<T>() instead.")
data()247 T * data() const {
248 return data_ptr<T>();
249 }
250
251 template <typename T>
252 T item() const;
253
254 template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
255 C10_DEPRECATED_MESSAGE("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead")
packed_accessor()256 GenericPackedTensorAccessor<T,N,PtrTraits,index_t> packed_accessor() const & {
257 return generic_packed_accessor<T,N,PtrTraits,index_t>();
258 }
259 template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
260 C10_DEPRECATED_MESSAGE("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead")
261 GenericPackedTensorAccessor<T,N,PtrTraits,index_t> packed_accessor() && = delete;
262
263 Tensor operator~() const {
264 return bitwise_not();
265 }
266 Tensor operator-() const {
267 return neg();
268 }
269 Tensor& operator+=(const Tensor & other) {
270 return add_(other);
271 }
272 Tensor& operator+=(const Scalar & other) {
273 return add_(other);
274 }
275 Tensor& operator-=(const Tensor & other) {
276 return sub_(other);
277 }
278 Tensor& operator-=(const Scalar & other) {
279 return sub_(other);
280 }
281 Tensor& operator*=(const Tensor & other) {
282 return mul_(other);
283 }
284 Tensor& operator*=(const Scalar & other) {
285 return mul_(other);
286 }
287 Tensor& operator/=(const Tensor & other) {
288 return div_(other);
289 }
290 Tensor& operator/=(const Scalar & other) {
291 return div_(other);
292 }
293 Tensor& operator&=(const Tensor & other) {
294 return bitwise_and_(other);
295 }
296 Tensor& operator|=(const Tensor & other) {
297 return bitwise_or_(other);
298 }
299 Tensor& operator^=(const Tensor & other) {
300 return bitwise_xor_(other);
301 }
302 Tensor operator[](const Scalar & index) const {
303 if (!index.isIntegral(false)) {
304 TORCH_CHECK_INDEX(false, "Can only index tensors with integral scalars");
305 }
306 return this->operator[](index.toLong());
307 }
308 Tensor operator[](const Tensor & index) const {
309 // These properties are checked in the Scalar constructor, but we already
310 // check them here to provide more useful diagnostics for the user.
311 if (!index.defined()) {
312 TORCH_CHECK_INDEX(false, "Can only index with tensors that are defined");
313 }
314 if (index.dim() != 0) {
315 TORCH_CHECK_INDEX(false,
316 "Can only index with tensors that are scalars (zero-dim)");
317 }
318 // The Scalar(Tensor) constructor is explicit, so we need to call it.
319 return this->operator[](index.item());
320 }
321 Tensor operator[](int64_t index) const {
322 return select(0, index);
323 }
324
325 Tensor index(ArrayRef<at::indexing::TensorIndex> indices) const;
326 Tensor index(std::initializer_list<at::indexing::TensorIndex> indices) const;
327
328 Tensor & index_put_(ArrayRef<at::indexing::TensorIndex> indices, Tensor const & rhs);
329 Tensor & index_put_(ArrayRef<at::indexing::TensorIndex> indices, const Scalar& v);
330 Tensor & index_put_(std::initializer_list<at::indexing::TensorIndex> indices, Tensor const & rhs);
331 Tensor & index_put_(std::initializer_list<at::indexing::TensorIndex> indices, const Scalar& v);
332
cpu()333 Tensor cpu() const {
334 return to(options().device(c10::DeviceType::CPU), /*non_blocking*/ false, /*copy*/ false);
335 }
336
337 // TODO: The Python version also accepts arguments
cuda()338 Tensor cuda() const {
339 return to(options().device(c10::DeviceType::CUDA), /*non_blocking*/ false, /*copy*/ false);
340 }
341
hip()342 Tensor hip() const {
343 return to(options().device(c10::DeviceType::HIP), /*non_blocking*/ false, /*copy*/ false);
344 }
345
ve()346 Tensor ve() const {
347 return to(options().device(c10::DeviceType::VE), /*non_blocking*/ false, /*copy*/ false);
348 }
349
vulkan()350 Tensor vulkan() const {
351 return to(options().device(c10::DeviceType::Vulkan), /*non_blocking*/ false, /*copy*/ false);
352 }
353
metal()354 Tensor metal() const {
355 return to(options().device(c10::DeviceType::Metal), /*non_blocking*/ false, /*copy*/ false);
356 }
357
meta()358 Tensor meta() const {
359 return to(options().device(c10::DeviceType::Meta), /*non_blocking*/ false, /*copy*/ false);
360 }
361
362 // ~~~~~ Autograd API ~~~~~
363
364 /// \fn bool is_leaf() const;
365 ///
366 /// All Tensors that have `requires_grad()` which is ``false`` will be leaf Tensors by convention.
367 ///
368 /// For Tensors that have `requires_grad()` which is ``true``, they will be leaf Tensors if they were
369 /// created by the user. This means that they are not the result of an operation and so
370 /// `grad_fn()` is `nullptr`.
371 ///
372 /// Only leaf Tensors will have their `grad()` populated during a call to `backward()`.
373 /// To get `grad()` populated for non-leaf Tensors, you can use `retain_grad()`.
374 ///
375 /// Example:
376 /// @code
377 /// auto a = torch::rand(10, torch::requires_grad());
378 /// std::cout << a.is_leaf() << std::endl; // prints `true`
379 ///
380 /// auto b = torch::rand(10, torch::requires_grad()).to(torch::kCUDA);
381 /// std::cout << b.is_leaf() << std::endl; // prints `false`
382 /// // b was created by the operation that cast a cpu Tensor into a cuda Tensor
383 ///
384 /// auto c = torch::rand(10, torch::requires_grad()) + 2;
385 /// std::cout << c.is_leaf() << std::endl; // prints `false`
386 /// // c was created by the addition operation
387 ///
388 /// auto d = torch::rand(10).cuda();
389 /// std::cout << d.is_leaf() << std::endl; // prints `true`
390 /// // d does not require gradients and so has no operation creating it (that is tracked by the autograd engine)
391 ///
392 /// auto e = torch::rand(10).cuda().requires_grad_();
393 /// std::cout << e.is_leaf() << std::endl; // prints `true`
394 /// // e requires gradients and has no operations creating it
395 ///
396 /// auto f = torch::rand(10, torch::device(torch::kCUDA).requires_grad(true));
397 /// std::cout << f.is_leaf() << std::endl; // prints `true`
398 /// // f requires grad, has no operation creating it
399 /// @endcode
400
401 /// \fn void backward(const Tensor & gradient={}, std::optional<bool> retain_graph=std::nullopt, bool create_graph=false, std::optional<TensorList> inputs=std::nullopt) const;
402 ///
403 /// Computes the gradient of current tensor with respect to graph leaves.
404 ///
405 /// The graph is differentiated using the chain rule. If the tensor is
406 /// non-scalar (i.e. its data has more than one element) and requires
407 /// gradient, the function additionally requires specifying ``gradient``.
408 /// It should be a tensor of matching type and location, that contains
409 /// the gradient of the differentiated function w.r.t. this Tensor.
410 ///
411 /// This function accumulates gradients in the leaves - you might need to
412 /// zero them before calling it.
413 ///
414 /// \param gradient Gradient w.r.t. the
415 /// tensor. If it is a tensor, it will be automatically converted
416 /// to a Tensor that does not require grad unless ``create_graph`` is True.
417 /// None values can be specified for scalar Tensors or ones that
418 /// don't require grad. If a None value would be acceptable then
419 /// this argument is optional.
420 /// \param retain_graph If ``false``, the graph used to compute
421 /// the grads will be freed. Note that in nearly all cases setting
422 /// this option to True is not needed and often can be worked around
423 /// in a much more efficient way. Defaults to the value of
424 /// ``create_graph``.
425 /// \param create_graph If ``true``, graph of the derivative will
426 /// be constructed, allowing to compute higher order derivative
427 /// products. Defaults to ``false``.
428 /// \param inputs Inputs w.r.t. which the gradient will be accumulated into
429 /// ``at::Tensor::grad``. All other Tensors will be ignored. If not
430 /// provided, the gradient is accumulated into all the leaf Tensors
431 /// that were used to compute the current tensor.
432 /// When inputs are provided and a given input is not a leaf,
433 /// the current implementation will call its grad_fn (even though it is not strictly needed to get this gradients).
434 /// It is an implementation detail on which the user should not rely.
435 /// See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details.
436 void backward(const Tensor & gradient={}, std::optional<bool> retain_graph=std::nullopt, bool create_graph=false, std::optional<TensorList> inputs=std::nullopt) const {
437 // NB: Adding this wrapper to _backward here because we'd like our
438 // 'backwards' api to accept the 'inputs' argument optionally. Since code gen
439 // currently does not support optional of TensorList our approach is to replace
440 // backward in native_functions.yaml with _backward and call it here instead.
441 if (inputs.has_value()) {
442 TORCH_CHECK(inputs.value().size() > 0, "'inputs' argument to backward cannot be empty")
443 this->_backward(inputs.value(), gradient, retain_graph, create_graph);
444 } else {
445 this->_backward({}, gradient, retain_graph, create_graph);
446 }
447 }
448
449 /// \fn Tensor detach() const;
450 ///
451 /// Returns a new Tensor, detached from the current graph.
452 /// The result will never require gradient.
453
454 /// \fn Tensor & detach_() const;
455 ///
456 /// Detaches the Tensor from the graph that created it, making it a leaf.
457 /// Views cannot be detached in-place.
458
459 /// \fn void retain_grad() const;
460 ///
461 /// Enables this Tensor to have their :attr:`grad` populated during
462 /// :func:`backward`. This is a no-op for leaf tensors.
463
464 /// \fn bool retains_grad() const;
465 ///
466 /// Is ``true`` if this Tensor is non-leaf and its :attr:`grad` is enabled to be
467 /// populated during :func:`backward`, ``false`` otherwise.
468
set_requires_grad(bool requires_grad)469 const Tensor& set_requires_grad(bool requires_grad) const {
470 TensorBase::set_requires_grad(requires_grad);
471 return *this;
472 }
473
474 /// Return a mutable reference to the gradient. This is conventionally
475 /// used as `t.grad() = x` to set a gradient to a completely new tensor.
476 /// Note that this function work with a non-const Tensor and is not
477 /// thread safe.
mutable_grad()478 Tensor& mutable_grad() const {
479 return impl_->mutable_grad();
480 }
481
482 /// This function returns an undefined tensor by default and returns a defined tensor
483 /// the first time a call to `backward()` computes gradients for this Tensor.
484 /// The attribute will then contain the gradients computed and future calls
485 /// to `backward()` will accumulate (add) gradients into it.
grad()486 const Tensor& grad() const {
487 const Tensor& maybe_grad = impl_->grad();
488 if (!is_leaf() && !retains_grad() && !maybe_grad.defined()) {
489 TORCH_WARN(
490 "The .grad attribute of a Tensor that is not a leaf Tensor is being accessed. Its .grad "
491 "attribute won't be populated during autograd.backward(). If you indeed want the .grad "
492 "field to be populated for a non-leaf Tensor, use .retain_grad() on the non-leaf Tensor. "
493 "If you access the non-leaf Tensor by mistake, make sure you access the leaf Tensor "
494 "instead. See github.com/pytorch/pytorch/pull/30531 for more informations.");
495 }
496 return maybe_grad;
497 }
498
499 // The Forward AD API functions below are low level and are not to be used by end
500 // users who should use the API provided in torch/csrc/autograd.h
501
502 /// This function returns the forward gradient for this Tensor at the given level.
_fw_grad(uint64_t level)503 const Tensor& _fw_grad(uint64_t level) const {
504 return impl_->_fw_grad(level, *this);
505 }
506
507 /// This function can be used to set the value of the forward grad.
508 /// Note that the given new_grad might not be used directly if it has different
509 /// metadata (size/stride/storage offset) compared to this Tensor. In that case,
510 /// new_grad content will be copied into a new Tensor
_set_fw_grad(const TensorBase & new_grad,uint64_t level,bool is_inplace_op)511 void _set_fw_grad(const TensorBase& new_grad, uint64_t level, bool is_inplace_op) const {
512 impl_->_set_fw_grad(new_grad, *this, level, is_inplace_op);
513 }
514
515
516 // STOP. Thinking of adding a method here, which only makes use
517 // of other ATen methods? Define it in native_functions.yaml.
518
519 //example
520 //Tensor * add(Tensor & b);
521 ${tensor_method_declarations}
522
523 // Special C++ only overloads for std()-like functions (See gh-40287)
524 // These are needed because int -> bool conversion takes precedence over int -> IntArrayRef
525 // So, for example std(0) would select the std(unbiased=False) overload
526
var(int dim)527 Tensor var(int dim) const {
528 return var(IntArrayRef{dim});
529 }
530
std(int dim)531 Tensor std(int dim) const {
532 return std(IntArrayRef{dim});
533 }
534
535 // We changed .dtype() to return a TypeMeta in #12766. Ideally, we want the
536 // at::kDouble and its friends to be TypeMeta's, but that hasn't happened yet.
537 // Before that change, we make this method to maintain BC for C++ usage like
538 // `x.to(y.dtype)`.
539 // TODO: remove following two after at::kDouble and its friends are TypeMeta's.
540 inline Tensor to(caffe2::TypeMeta type_meta, bool non_blocking=false, bool copy=false) const {
541 return this->to(/*scalar_type=*/typeMetaToScalarType(type_meta), non_blocking, copy);
542 }
543 inline Tensor to(Device device, caffe2::TypeMeta type_meta, bool non_blocking=false, bool copy=false) const {
544 return this->to(device, /*scalar_type=*/typeMetaToScalarType(type_meta), non_blocking, copy);
545 }
546
547 template <typename F, typename... Args>
decltype(auto)548 decltype(auto) m(F func, Args&&... params) const {
549 return func(*this, std::forward<Args>(params)...);
550 }
551
552 /// NOTE: This is similar to the legacy `.data()` function on `Variable`, and is intended
553 /// to be used from functions that need to access the `Variable`'s equivalent `Tensor`
554 /// (i.e. `Tensor` that shares the same storage and tensor metadata with the `Variable`).
555 ///
556 /// One notable difference with the legacy `.data()` function is that changes to the
557 /// returned `Tensor`'s tensor metadata (e.g. sizes / strides / storage / storage_offset)
558 /// will not update the original `Variable`, due to the fact that this function
559 /// shallow-copies the `Variable`'s underlying TensorImpl.
tensor_data()560 at::Tensor tensor_data() const {
561 return TensorBase::tensor_data();
562 }
563
564 /// NOTE: `var.variable_data()` in C++ has the same semantics as `tensor.data`
565 /// in Python, which create a new `Variable` that shares the same storage and
566 /// tensor metadata with the original `Variable`, but with a completely new
567 /// autograd history.
568 ///
569 /// NOTE: If we change the tensor metadata (e.g. sizes / strides /
570 /// storage / storage_offset) of a variable created from `var.variable_data()`, those
571 /// changes will not update the original variable `var`. In `.variable_data()`, we set
572 /// `allow_tensor_metadata_change_` to false to make such changes explicitly illegal,
573 /// in order to prevent users from changing metadata of `var.variable_data()`
574 /// and expecting the original variable `var` to also be updated.
variable_data()575 at::Tensor variable_data() const {
576 return TensorBase::variable_data();
577 }
578
579 // Hooks
580 //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
581
582 template <typename T>
583 using hook_return_void_t = std::enable_if_t<std::is_void<typename std::invoke_result_t<T&, Tensor>>::value, unsigned>;
584 template <typename T>
585 using hook_return_var_t = std::enable_if_t<std::is_same<typename std::invoke_result_t<T&, Tensor>, Tensor>::value, unsigned>;
586
587 /// Registers a backward hook.
588 ///
589 /// The hook will be called every time a gradient with respect to the Tensor is computed.
590 /// The hook should have one of the following signature:
591 /// ```
592 /// hook(Tensor grad) -> Tensor
593 /// ```
594 /// ```
595 /// hook(Tensor grad) -> void
596 /// ```
597 /// The hook should not modify its argument, but it can optionally return a new gradient
598 /// which will be used in place of `grad`.
599 ///
600 /// This function returns the index of the hook in the list which can be used to remove hook.
601 ///
602 /// Example:
603 /// @code
604 /// auto v = torch::tensor({0., 0., 0.}, torch::requires_grad());
605 /// auto h = v.register_hook([](torch::Tensor grad){ return grad * 2; }); // double the gradient
606 /// v.backward(torch::tensor({1., 2., 3.}));
607 /// // This prints:
608 /// // ```
609 /// // 2
610 /// // 4
611 /// // 6
612 /// // [ CPUFloatType{3} ]
613 /// // ```
614 /// std::cout << v.grad() << std::endl;
615 /// v.remove_hook(h); // removes the hook
616 /// @endcode
617 template <typename T>
618 hook_return_void_t<T> register_hook(T&& hook) const;
619 template <typename T>
620 hook_return_var_t<T> register_hook(T&& hook) const;
621
622 // Variable methods
623 //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
624
data()625 Tensor data() const {
626 return TensorBase::data();
627 }
628
629 void _backward(TensorList inputs, const std::optional<Tensor>& gradient, std::optional<bool> keep_graph, bool create_graph) const;
630
631 const Tensor& requires_grad_(bool _requires_grad=true) const {
632 TensorBase::requires_grad_(_requires_grad);
633 return *this;
634 }
635 };
636
637 namespace detail {
638 // Helper creator for Tensor class which doesn't requires the users to pass
639 // in an intrusive_ptr instead it just converts the argument passed to
640 // requested intrusive_ptr type.
641 template <typename T, typename... Args>
make_tensor(Args &&...args)642 Tensor make_tensor(Args&&... args) {
643 return Tensor(c10::make_intrusive<T>(std::forward<Args>(args)...));
644 }
645
646 } // namespace detail
647
648 } // namespace at
649
650
651 namespace at {
652 ${tensor_method_definitions}
653 } // namespace at
654
655
656 namespace c10 {
657 template <>
658 struct MaybeOwnedTraits<at::Tensor> {
659 using owned_type = at::Tensor;
660 using borrow_type = at::Tensor;
661
662 static borrow_type createBorrow(const owned_type& from) {
663 // NOTE: this can be implemented without the special
664 // unsafe_borrow_t Tensor constructor as
665 //
666 // return borrow_type(c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(from.unsafeGetTensorImpl()));
667 //
668 // but that hurts inlining due to the nullptr check in the
669 // Tensor(c10::intrusive_ptr<...>) constructor. We already know
670 // that from.impl_ isn't null because from is a valid Tensor, so
671 // we needn't do the check again. (using __builtin_assume can
672 // avoid this, but wouldn't be portable to MSVC.)
673 return borrow_type(borrow_type::unsafe_borrow_t{}, from);
674 }
675
676 static void assignBorrow(borrow_type& lhs, const borrow_type& rhs) {
677 lhs.unsafeReleaseTensorImpl();
678 // See above note: this can be implemented with public API
679 // similarly to createBorrow(), but that would hurt inlining.
680 lhs = borrow_type(borrow_type::unsafe_borrow_t{}, rhs);
681 }
682
683 static void destroyBorrow(borrow_type& toDestroy) {
684 toDestroy.unsafeReleaseTensorImpl(); // "leak" it, but it was already +0.
685 }
686
687 static const owned_type& referenceFromBorrow(const borrow_type& borrow) {
688 return borrow;
689 }
690
691 static const owned_type* pointerFromBorrow(const borrow_type& borrow) {
692 return &borrow;
693 }
694
695 static bool debugBorrowIsValid(const borrow_type& /*borrow*/) {
696 return true;
697 }
698 };
699
700 template <>
701 struct ExclusivelyOwnedTraits<at::Tensor> {
702 using repr_type = at::Tensor;
703 using pointer_type = at::Tensor*;
704 using const_pointer_type = const at::Tensor*;
705
706 static repr_type nullRepr() {
707 return at::Tensor();
708 }
709
710 template <class... Args>
711 static repr_type createInPlace(Args&&... args) {
712 return at::Tensor(std::forward<Args>(args)...);
713 }
714
715 static repr_type moveToRepr(at::Tensor&& x) {
716 return std::move(x);
717 }
718
719 static void destroyOwned(at::Tensor& x) {
720 return ExclusivelyOwnedTraits<at::TensorBase>::destroyOwned(x);
721 }
722
723 static at::Tensor take(at::Tensor& x) {
724 return std::move(x);
725 }
726
727 static pointer_type getImpl(repr_type& x) {
728 return &x;
729 }
730
731 static const_pointer_type getImpl(const repr_type& x) {
732 return &x;
733 }
734 };
735 } // namespace c10
736
737 namespace at {
738
739 inline c10::MaybeOwned<Tensor> borrow_from_optional_tensor(
740 const std::optional<Tensor>& opt) {
741 return opt.has_value()
742 ? c10::MaybeOwned<Tensor>::borrowed(*opt)
743 : c10::MaybeOwned<Tensor>::owned(std::in_place);
744 }
745
746 inline c10::MaybeOwned<Tensor> Tensor::expect_contiguous(MemoryFormat memory_format) const & {
747 if (is_contiguous(memory_format)) {
748 return c10::MaybeOwned<Tensor>::borrowed(*this);
749 } else {
750 return c10::MaybeOwned<Tensor>::owned(__dispatch_contiguous(memory_format));
751 }
752 }
753 } // namespace at
754