1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <executorch/runtime/core/portable_type/tensor_impl.h>
10
11 #include <algorithm>
12 #include <cstdint>
13
14 #include <executorch/runtime/core/exec_aten/util/dim_order_util.h>
15 #include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
16 #include <executorch/runtime/core/portable_type/qint_types.h>
17 #include <executorch/runtime/core/portable_type/scalar_type.h>
18 #include <executorch/runtime/platform/assert.h>
19
20 namespace executorch {
21 namespace runtime {
22 namespace etensor {
23
24 /**
25 * Compute the number of elements based on the sizes of a tensor.
26 */
compute_numel(const TensorImpl::SizesType * sizes,ssize_t dim)27 ssize_t compute_numel(const TensorImpl::SizesType* sizes, ssize_t dim) {
28 ET_CHECK_MSG(
29 dim == 0 || sizes != nullptr,
30 "Sizes must be provided for non-scalar tensors");
31 ssize_t numel = 1; // Zero-dimensional tensors (scalars) have numel == 1.
32 for (ssize_t i = 0; i < dim; ++i) {
33 ET_CHECK_MSG(
34 sizes[i] >= 0,
35 "Size must be non-negative, got %d at dimension %zd",
36 sizes[i],
37 i);
38 numel *= sizes[i];
39 }
40 return numel;
41 }
42
TensorImpl(ScalarType type,ssize_t dim,SizesType * sizes,void * data,DimOrderType * dim_order,StridesType * strides,TensorShapeDynamism dynamism)43 TensorImpl::TensorImpl(
44 ScalarType type,
45 ssize_t dim,
46 SizesType* sizes,
47 void* data,
48 DimOrderType* dim_order,
49 StridesType* strides,
50 TensorShapeDynamism dynamism)
51 : sizes_(sizes),
52 dim_order_(dim_order),
53 strides_(strides),
54 data_(data),
55 dim_(dim),
56 numel_(compute_numel(sizes, dim)),
57 numel_bound_(numel_),
58 type_(type),
59 shape_dynamism_(dynamism) {
60 ET_CHECK_MSG(
61 isValid(type_), "Invalid type %" PRId8, static_cast<int8_t>(type_));
62 ET_CHECK_MSG(dim_ >= 0, "Dimension must be non-negative, got %zd", dim_);
63 }
64
nbytes() const65 size_t TensorImpl::nbytes() const {
66 return numel_ * elementSize(type_);
67 }
68
69 // Return the size of one element of the tensor
element_size() const70 ssize_t TensorImpl::element_size() const {
71 return elementSize(type_);
72 }
73
internal_resize_contiguous(ArrayRef<SizesType> new_sizes)74 Error TensorImpl::internal_resize_contiguous(ArrayRef<SizesType> new_sizes) {
75 ET_CHECK_OR_RETURN_ERROR(
76 new_sizes.size() == dim_,
77 NotSupported,
78 "Attempted to change the tensor rank which is immutable: old=%zu, new=%zu",
79 dim_,
80 new_sizes.size());
81
82 // Kernels don't check that the provided out tensors have the right size.
83 // Instead they always attempt to resize the out tensor to the right size,
84 // even when the out tensor already had the right size. Therefore, if we call
85 // an op with inputs that will produce a zero-dimensional output, and the out
86 // tensor that we pass has non-STATIC dynamism, then we will end up here.
87 // Since we have already checked above that the out tensor has the right
88 // number of dimensions, it must be that the provided out tensor has zero
89 // rank, therefore it already has the right size and we should just return.
90 if (dim_ == 0) {
91 return Error::Ok;
92 }
93 switch (shape_dynamism_) {
94 case TensorShapeDynamism::STATIC:
95 ET_CHECK_OR_RETURN_ERROR(
96 std::equal(sizes_, sizes_ + dim_, new_sizes.begin()),
97 NotSupported,
98 "Attempted to resize a static tensor");
99 break;
100 case TensorShapeDynamism::DYNAMIC_BOUND:
101 // TODO(T175194371): Unbounded dynamic tensor resizing is not yet
102 // supported: treat them as upper-bounded.
103 case TensorShapeDynamism::DYNAMIC_UNBOUND: {
104 const auto new_numel = compute_numel(new_sizes.data(), dim_);
105 ET_CHECK_OR_RETURN_ERROR(
106 new_numel <= numel_bound_,
107 NotSupported,
108 "Attempted to resize a bounded tensor with capacity of %zu elements to %zu elements.",
109 new_numel,
110 numel_bound_);
111
112 if (strides_ && dim_order_) {
113 auto error =
114 dim_order_to_stride(new_sizes.data(), dim_order_, dim_, strides_);
115 if (error != Error::Ok) {
116 return error;
117 }
118 }
119 numel_ = new_numel;
120 std::copy(new_sizes.begin(), new_sizes.end(), sizes_);
121 }
122 }
123 return Error::Ok;
124 }
125
126 } // namespace etensor
127 } // namespace runtime
128 } // namespace executorch
129