1 #define TORCH_ASSERT_NO_OPERATORS
2 #define _USE_MATH_DEFINES
3
4 #include <ATen/native/Activation.h>
5
6 #include <cmath>
7
8 #include <thrust/tuple.h>
9
10 #include <ATen/AccumulateType.h>
11 #include <ATen/Dispatch.h>
12 #include <ATen/core/TensorBase.h>
13 #include <c10/core/Scalar.h>
14 #include <c10/cuda/CUDAMathCompat.h>
15 #include <ATen/cuda/ApplyGridUtils.cuh>
16 #include <ATen/cuda/detail/OffsetCalculator.cuh>
17 #include <ATen/native/cuda/Loops.cuh>
18
19 namespace at::native {
20 namespace {
21
softshrink_kernel(TensorIteratorBase & iter,const Scalar & value)22 void softshrink_kernel(TensorIteratorBase& iter, const Scalar& value) {
23 AT_DISPATCH_FLOATING_TYPES_AND2(
24 at::ScalarType::Half,
25 at::ScalarType::BFloat16,
26 iter.dtype(),
27 "softshrink_cuda",
28 [&]() {
29 auto lambd = value.to<scalar_t>();
30 gpu_kernel(iter, [lambd] GPU_LAMBDA(scalar_t a) -> scalar_t {
31 return a > lambd ? a - lambd : (a < -lambd ? a + lambd : scalar_t(0));
32 });
33 });
34 }
35
shrink_backward_kernel(TensorIteratorBase & iter,const Scalar & value)36 void shrink_backward_kernel(TensorIteratorBase& iter, const Scalar& value) {
37 AT_DISPATCH_FLOATING_TYPES_AND2(
38 at::ScalarType::Half,
39 at::ScalarType::BFloat16,
40 iter.dtype(),
41 "shrink_backward_cuda",
42 [&]() {
43 auto lambd = value.to<scalar_t>();
44 gpu_kernel(
45 iter,
46 [lambd] GPU_LAMBDA(
47 scalar_t grad_val, scalar_t self_val) -> scalar_t {
48 return (self_val >= -lambd && self_val <= lambd) ? scalar_t(0)
49 : grad_val;
50 });
51 });
52 }
53 } // namespace
54
55 REGISTER_DISPATCH(softshrink_stub, &softshrink_kernel);
56 REGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel);
57
58 } // namespace at::native
59