xref: /aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ActivationHardshrinkKernel.cu (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #define TORCH_ASSERT_NO_OPERATORS
2 #define _USE_MATH_DEFINES
3 
4 #include <ATen/native/Activation.h>
5 
6 #include <cmath>
7 
8 #include <thrust/tuple.h>
9 
10 #include <ATen/AccumulateType.h>
11 #include <ATen/Dispatch.h>
12 #include <ATen/core/TensorBase.h>
13 #include <c10/core/Scalar.h>
14 #include <c10/cuda/CUDAMathCompat.h>
15 #include <ATen/cuda/ApplyGridUtils.cuh>
16 #include <ATen/cuda/detail/OffsetCalculator.cuh>
17 #include <ATen/native/cuda/Loops.cuh>
18 
19 namespace at::native {
20 namespace {
21 
hardshrink_kernel(TensorIteratorBase & iter,const Scalar & value)22 void hardshrink_kernel(TensorIteratorBase& iter, const Scalar& value) {
23   AT_DISPATCH_FLOATING_TYPES_AND2(
24       at::ScalarType::Half,
25       at::ScalarType::BFloat16,
26       iter.dtype(),
27       "hardshrink_cuda",
28       [&]() {
29         auto lambd = value.to<scalar_t>();
30         gpu_kernel(iter, [lambd] GPU_LAMBDA(scalar_t a) -> scalar_t {
31           return (a >= -lambd && a <= lambd) ? scalar_t(0) : a;
32         });
33       });
34 }
35 } // namespace
36 
37 REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel);
38 
39 } // namespace at::native
40