xref: /aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ActivationHardsigmoidKernel.cu (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #define TORCH_ASSERT_NO_OPERATORS
2 #define _USE_MATH_DEFINES
3 
4 #include <ATen/native/Activation.h>
5 
6 #include <cmath>
7 
8 #include <thrust/tuple.h>
9 
10 #include <ATen/AccumulateType.h>
11 #include <ATen/Dispatch.h>
12 #include <ATen/core/TensorBase.h>
13 #include <c10/core/Scalar.h>
14 #include <c10/cuda/CUDAMathCompat.h>
15 #include <ATen/cuda/ApplyGridUtils.cuh>
16 #include <ATen/cuda/detail/OffsetCalculator.cuh>
17 #include <ATen/native/cuda/Loops.cuh>
18 
19 namespace at::native {
20 namespace {
21 
hardsigmoid_kernel(TensorIteratorBase & iter)22 void hardsigmoid_kernel(TensorIteratorBase& iter) {
23   AT_DISPATCH_FLOATING_TYPES_AND2(
24       at::ScalarType::Half,
25       at::ScalarType::BFloat16,
26       iter.dtype(),
27       "hardsigmoid_cuda",
28       [&]() {
29         using opmath_t = at::opmath_type<scalar_t>;
30         const opmath_t zero(0.0f);
31         const opmath_t one_sixth(1.0f / 6.0f);
32         const opmath_t three(3.0f);
33         const opmath_t six(6.0f);
34         gpu_kernel(
35             iter,
36             [zero, one_sixth, three, six] GPU_LAMBDA(
37                 scalar_t self_val) -> scalar_t {
38               opmath_t x = static_cast<opmath_t>(self_val);
39               return std::min(std::max(x + three, zero), six) * one_sixth;
40             });
41       });
42 }
43 
hardsigmoid_backward_kernel(TensorIteratorBase & iter)44 void hardsigmoid_backward_kernel(TensorIteratorBase& iter) {
45   AT_DISPATCH_FLOATING_TYPES_AND2(
46       at::ScalarType::Half,
47       at::ScalarType::BFloat16,
48       iter.dtype(),
49       "hardsigmoid_backward_cuda",
50       [&]() {
51         using opmath_t = at::opmath_type<scalar_t>;
52         const opmath_t zero(0.0f);
53         const opmath_t three(3.0f);
54         const opmath_t neg_three(-3.0f);
55         const opmath_t one_sixth(1.0f / 6.0f);
56         gpu_kernel(
57             iter,
58             [zero, three, neg_three, one_sixth] GPU_LAMBDA(
59                 scalar_t grad_val_, scalar_t self_val_) -> scalar_t {
60               opmath_t grad_val = static_cast<opmath_t>(grad_val_);
61               opmath_t self_val = static_cast<opmath_t>(self_val_);
62               return (self_val > neg_three && self_val < three)
63                   ? grad_val * one_sixth
64                   : zero;
65             });
66       });
67 }
68 
69 } // namespace
70 
71 REGISTER_DISPATCH(hardsigmoid_stub, &hardsigmoid_kernel);
72 REGISTER_DISPATCH(hardsigmoid_backward_stub, &hardsigmoid_backward_kernel);
73 
74 } // namespace at::native
75