xref: /aosp_15_r20/external/pytorch/aten/src/ATen/native/sparse/SparseUnaryOps.cpp (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 // #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
2 #include <ATen/native/SparseTensorUtils.h>
3 #include <ATen/core/Tensor.h>
4 
5 #ifndef AT_PER_OPERATOR_HEADERS
6 #include <ATen/Functions.h>
7 #include <ATen/NativeFunctions.h>
8 #else
9 #include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h>
10 #include <ATen/ops/_sparse_mm_reduce_impl_native.h>
11 #include <ATen/ops/abs.h>
12 #include <ATen/ops/abs_native.h>
13 #include <ATen/ops/asin.h>
14 #include <ATen/ops/asin_native.h>
15 #include <ATen/ops/asinh.h>
16 #include <ATen/ops/asinh_native.h>
17 #include <ATen/ops/atan.h>
18 #include <ATen/ops/atan_native.h>
19 #include <ATen/ops/atanh.h>
20 #include <ATen/ops/atanh_native.h>
21 #include <ATen/ops/ceil.h>
22 #include <ATen/ops/ceil_native.h>
23 #include <ATen/ops/deg2rad.h>
24 #include <ATen/ops/deg2rad_native.h>
25 #include <ATen/ops/erf.h>
26 #include <ATen/ops/erf_native.h>
27 #include <ATen/ops/erfinv.h>
28 #include <ATen/ops/erfinv_native.h>
29 #include <ATen/ops/expm1.h>
30 #include <ATen/ops/expm1_native.h>
31 #include <ATen/ops/floor.h>
32 #include <ATen/ops/floor_native.h>
33 #include <ATen/ops/frac.h>
34 #include <ATen/ops/frac_native.h>
35 #include <ATen/ops/isinf.h>
36 #include <ATen/ops/isinf_native.h>
37 #include <ATen/ops/isnan.h>
38 #include <ATen/ops/isnan_native.h>
39 #include <ATen/ops/isneginf.h>
40 #include <ATen/ops/isneginf_native.h>
41 #include <ATen/ops/isposinf.h>
42 #include <ATen/ops/isposinf_native.h>
43 #include <ATen/ops/log1p.h>
44 #include <ATen/ops/log1p_native.h>
45 #include <ATen/ops/nan_to_num.h>
46 #include <ATen/ops/nan_to_num_native.h>
47 #include <ATen/ops/rad2deg.h>
48 #include <ATen/ops/rad2deg_native.h>
49 #include <ATen/ops/relu.h>
50 #include <ATen/ops/relu_native.h>
51 #include <ATen/ops/round.h>
52 #include <ATen/ops/round_native.h>
53 #include <ATen/ops/sgn.h>
54 #include <ATen/ops/sgn_native.h>
55 #include <ATen/ops/sign.h>
56 #include <ATen/ops/sign_native.h>
57 #include <ATen/ops/signbit.h>
58 #include <ATen/ops/signbit_native.h>
59 #include <ATen/ops/sin.h>
60 #include <ATen/ops/sin_native.h>
61 #include <ATen/ops/sinh.h>
62 #include <ATen/ops/sinh_native.h>
63 #include <ATen/ops/sparse_resize_native.h>
64 #include <ATen/ops/sqrt.h>
65 #include <ATen/ops/sqrt_native.h>
66 #include <ATen/ops/tan.h>
67 #include <ATen/ops/tan_native.h>
68 #include <ATen/ops/tanh.h>
69 #include <ATen/ops/tanh_native.h>
70 #include <ATen/ops/threshold_backward.h>
71 #include <ATen/ops/threshold_backward_native.h>
72 #include <ATen/ops/trunc.h>
73 #include <ATen/ops/trunc_native.h>
74 #include <ATen/ops/is_pinned_native.h>
75 #include <ATen/ops/_pin_memory_native.h>
76 #endif
77 
78 namespace at::native {
79 namespace {
80 
81 template <typename Ufunc>
coalesced_unary_ufunc(const Tensor & self,const Ufunc & ufunc)82 Tensor coalesced_unary_ufunc(const Tensor &self, const Ufunc &ufunc) {
83   TORCH_INTERNAL_ASSERT(self.is_sparse());
84   const auto input = self.coalesce();
85   Tensor out_values = ufunc(input.values());
86   Tensor result = at::_sparse_coo_tensor_with_dims_and_tensors(
87       input.sparse_dim(),
88       input.dense_dim(),
89       input.sizes(),
90       input.indices().clone(),
91       out_values,
92       input.options().dtype(out_values.scalar_type()),
93       /*is_coalesced=*/ true);
94   return result;
95 }
96 
97 template <typename Ufunc>
coalesced_unary_ufunc_(Tensor & self,const Ufunc & ufunc)98 Tensor& coalesced_unary_ufunc_(Tensor &self, const Ufunc &ufunc) {
99   TORCH_INTERNAL_ASSERT(self.is_sparse());
100   auto values = self._values();
101   ufunc(values);
102   return self;
103 }
104 
105 template <typename Ufunc>
coalesced_unary_ufunc_out(const Tensor & self,Tensor & result,const Ufunc & ufunc)106 Tensor& coalesced_unary_ufunc_out(const Tensor &self, Tensor &result, const Ufunc &ufunc) {
107   if (self.is_same(result)) {
108     TORCH_CHECK(self.is_coalesced(), "expected coalesced tensor for inplace operation");
109     auto values = self._values();
110     ufunc(values, values);
111     return result;
112   }
113 
114   TORCH_CHECK(self.is_sparse() && result.is_sparse());
115   const auto input = self.coalesce();
116   sparse_resize_(result, input.sizes(), input.sparse_dim(), input.dense_dim());
117   auto *input_impl = sparse::get_sparse_impl(input);
118   auto *result_impl = sparse::get_sparse_impl(result);
119 
120   auto input_values = input_impl->values();
121   auto result_values = result_impl->values();
122   result_values.resize_(input_values.sizes());
123   ufunc(input_values, result_values);
124 
125   auto input_indices = input_impl->indices();
126   auto result_indices = result_impl->indices();
127   result_indices.resize_(input_indices.sizes());
128   result_indices.copy_(input_indices);
129   result._coalesced_(true);
130   return result;
131 }
132 
133 }  // namespace (anonymous)
134 
135 // Generic formulation for unary operators which map 0 -> 0 so
136 // we can just transform self.values() and preserve the sparsity pattern.
137 //
138 // Any non-linear function requires the tensor to be coalesced before
139 // we can calculate the result. This also means inplace calculations
140 // are only possible on coalesced tensors.
141 
142 #define COALESCED_UNARY_UFUNC_FUNCTIONAL(op_name)   \
143   Tensor op_name##_sparse(const Tensor &self) {     \
144     return coalesced_unary_ufunc(                   \
145         self, [](const Tensor &t) {                 \
146           return at::op_name(t);                    \
147         });                                         \
148   }
149 
150 #define COALESCED_UNARY_UFUNC_NO_INPLACE(op_name)                       \
151   COALESCED_UNARY_UFUNC_FUNCTIONAL(op_name)                             \
152   Tensor& op_name##_sparse_out(const Tensor &self,                      \
153                                Tensor &out) {                           \
154     return coalesced_unary_ufunc_out(                                   \
155         self, out, [](const Tensor &t, Tensor &out) {                   \
156           return at::op_name##_outf(t, out);                            \
157         });                                                             \
158   }
159 
160 #define COALESCED_UNARY_UFUNC(op_name)                                  \
161   COALESCED_UNARY_UFUNC_NO_INPLACE(op_name)                             \
162   Tensor& op_name##_sparse_(Tensor &self) {                             \
163     TORCH_CHECK(self.is_coalesced(),                                    \
164                 #op_name "_ requires coalesced input");                 \
165     return coalesced_unary_ufunc_(self, [](Tensor &t) {                 \
166       return t.op_name##_();                                            \
167     });                                                                 \
168   }
169 
170 COALESCED_UNARY_UFUNC(abs);
171 COALESCED_UNARY_UFUNC(asin);
172 COALESCED_UNARY_UFUNC(asinh);
173 COALESCED_UNARY_UFUNC(atan);
174 COALESCED_UNARY_UFUNC(atanh);
175 COALESCED_UNARY_UFUNC(ceil);
176 COALESCED_UNARY_UFUNC(deg2rad);
177 COALESCED_UNARY_UFUNC(erf);
178 COALESCED_UNARY_UFUNC(erfinv);
179 COALESCED_UNARY_UFUNC(expm1);
180 COALESCED_UNARY_UFUNC(floor);
181 COALESCED_UNARY_UFUNC(frac);
182 COALESCED_UNARY_UFUNC(log1p);
183 COALESCED_UNARY_UFUNC(round);
184 COALESCED_UNARY_UFUNC(rad2deg);
185 COALESCED_UNARY_UFUNC(sign);
186 COALESCED_UNARY_UFUNC(sgn);
187 COALESCED_UNARY_UFUNC(sin);
188 COALESCED_UNARY_UFUNC(sinh);
189 COALESCED_UNARY_UFUNC(sqrt);
190 COALESCED_UNARY_UFUNC(tan);
191 COALESCED_UNARY_UFUNC(tanh);
192 COALESCED_UNARY_UFUNC(trunc);
193 // relu function has no declaration, it may be unused in Pytorch.
194 // But we keep it and ignore the warning here until verified in the future.
195 #pragma clang diagnostic push
196 #pragma clang diagnostic ignored "-Wmissing-prototypes"
197 COALESCED_UNARY_UFUNC(relu);
198 #pragma clang diagnostic pop
199 
200 COALESCED_UNARY_UFUNC_NO_INPLACE(signbit);
201 COALESCED_UNARY_UFUNC_NO_INPLACE(isneginf);
202 COALESCED_UNARY_UFUNC_NO_INPLACE(isposinf);
203 
204 COALESCED_UNARY_UFUNC_FUNCTIONAL(isnan);
205 COALESCED_UNARY_UFUNC_FUNCTIONAL(isinf);
206 
isinf_sparse_meta(const Tensor & self)207 Tensor isinf_sparse_meta(const Tensor& self) {
208   TORCH_CHECK_NOT_IMPLEMENTED(0, "nyi isinf for SparseMeta");
209 }
210 
211 // Threshold_backward is not unary but it is the backward used for relu which is
212 // unary
threshold_backward_sparse(const Tensor & grad_output,const Tensor & self,const Scalar & threshold)213 Tensor threshold_backward_sparse(
214     const Tensor& grad_output,
215     const Tensor& self,
216     const Scalar& threshold) {
217   const auto grad = [&]() {
218     if (!grad_output._nnz() && self._nnz() > 0) {
219       return at::sparse::zeros_like_with_indices(self);
220     } else {
221       return grad_output;
222     }
223   }();
224   const auto self_v = [&self]() {
225     if (self.is_coalesced()) {
226       return self.values();
227     } else {
228       return self.coalesce().values();
229     }
230   }();
231   return coalesced_unary_ufunc(grad, [&](const Tensor& t) {
232     return at::threshold_backward(t, self_v, threshold);
233   });
234 }
235 
threshold_backward_sparse_out(const Tensor & grad_output,const Tensor & self,const Scalar & threshold,Tensor & grad_input)236 Tensor& threshold_backward_sparse_out(
237     const Tensor& grad_output,
238     const Tensor& self,
239     const Scalar& threshold,
240     Tensor& grad_input) {
241   const auto grad = [&]() {
242     if (!grad_output._nnz() && self._nnz() > 0) {
243       return at::sparse::zeros_like_with_indices(self);
244     } else {
245       return grad_output;
246     }
247   }();
248   auto self_v = [&self]() {
249     if (self.is_coalesced()) {
250       return self.values();
251     } else {
252       return self.coalesce().values();
253     }
254   }();
255   return coalesced_unary_ufunc_out(
256       grad, grad_input, [&](const Tensor& t, Tensor& out) {
257         return at::threshold_backward_outf(t, self_v, threshold, out);
258       });
259 }
260 
nan_to_num_sparse(const Tensor & self,std::optional<double> nan,std::optional<double> posinf,std::optional<double> neginf)261 Tensor nan_to_num_sparse(
262     const Tensor &self, std::optional<double> nan,
263     std::optional<double> posinf, std::optional<double> neginf) {
264   return coalesced_unary_ufunc(
265       self, [&](const Tensor &t) {
266         return at::nan_to_num(t, nan, posinf, neginf);
267       });
268 }
nan_to_num_sparse_out(const Tensor & self,std::optional<double> nan,std::optional<double> posinf,std::optional<double> neginf,Tensor & out)269 Tensor& nan_to_num_sparse_out(
270     const Tensor &self, std::optional<double> nan,
271     std::optional<double> posinf, std::optional<double> neginf,
272     Tensor &out) {
273   return coalesced_unary_ufunc_out(
274       self, out, [&](const Tensor &t, Tensor &out) {
275         return at::nan_to_num_outf(t, nan, posinf, neginf, out);
276       });
277 }
nan_to_num_sparse_(Tensor & self,std::optional<double> nan,std::optional<double> posinf,std::optional<double> neginf)278 Tensor& nan_to_num_sparse_(
279     Tensor &self, std::optional<double> nan,
280     std::optional<double> posinf, std::optional<double> neginf) {
281   TORCH_CHECK(self.is_coalesced(), "nan_to_num_ requires coalesced input");
282   return nan_to_num_sparse_out(self, nan, posinf, neginf, self);
283 }
284 
285 }  // namespace at::native
286