xref: /aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ActivationPreluKernel.cu (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #define TORCH_ASSERT_NO_OPERATORS
2 #define _USE_MATH_DEFINES
3 
4 #include <ATen/native/Activation.h>
5 
6 #include <cmath>
7 
8 #include <thrust/tuple.h>
9 
10 #include <ATen/AccumulateType.h>
11 #include <ATen/Dispatch.h>
12 #include <ATen/core/TensorBase.h>
13 #include <c10/core/Scalar.h>
14 #include <c10/cuda/CUDAMathCompat.h>
15 #include <ATen/cuda/ApplyGridUtils.cuh>
16 #include <ATen/cuda/detail/OffsetCalculator.cuh>
17 #include <ATen/native/cuda/Loops.cuh>
18 
19 namespace at::native {
20 
21 // -----------------------------------
22 // prelu
23 // -----------------------------------
prelu_kernel(TensorIterator & iter)24 void prelu_kernel(TensorIterator &iter) {
25   AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, iter.dtype(), "prelu_cuda", [&] {
26     gpu_kernel(iter,
27       [] GPU_LAMBDA (scalar_t input, scalar_t weight) -> scalar_t {
28         return (input > 0) ? input : weight * input;
29       });
30   });
31 }
32 
prelu_backward_kernel(TensorIterator & iter)33 void prelu_backward_kernel(TensorIterator &iter) {
34   AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, iter.dtype(), "prelu_backward_cuda", [&] {
35     gpu_kernel_multiple_outputs(iter,
36       [] GPU_LAMBDA (scalar_t input, scalar_t weight, scalar_t grad) -> thrust::tuple<scalar_t, scalar_t> {
37         auto mask = input > 0;
38         auto grad_input = mask ? grad : weight * grad;
39         auto grad_weight = mask ? scalar_t{0} : input * grad;
40         return {grad_input, grad_weight};
41       });
42   });
43 }
44 
45 REGISTER_DISPATCH(prelu_stub, &prelu_kernel);
46 REGISTER_DISPATCH(prelu_backward_stub, &prelu_backward_kernel);
47 
48 } // namespace at::native
49