xref: /aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ZetaKernel.cu (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #define TORCH_ASSERT_NO_OPERATORS
2 #include <ATen/Dispatch.h>
3 #include <ATen/native/cuda/JitLoops.cuh>
4 #include <ATen/native/cuda/Loops.cuh>
5 #include <ATen/native/BinaryOps.h>
6 #include <ATen/native/Math.h>
7 #include <ATen/native/cuda/Math.cuh>
8 #include <ATen/native/cuda/jit_utils.h>
9 
10 namespace at::native {
11 namespace {
12 
13 /*
14  * This function is derived from the implementation of the zeta function in the Cephes Math Library.
15  * See note [3-Clause BSD License for the Cephes Math Library].
16  */
17 // See note [Jiterator]
18 CONSTEXPR_EXCEPT_WIN_CUDA char zeta_name[] = "zeta";
zeta_kernel_cuda(TensorIteratorBase & iter)19 void zeta_kernel_cuda(TensorIteratorBase& iter) {
20   #if AT_USE_JITERATOR()
21     AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "zeta_cuda", [&]() {
22       opmath_jitted_gpu_kernel_with_scalars</*name=*/zeta_name,
23                                      /*return_dtype=*/ scalar_t,
24                                      /*f_inputs_dtype=*/ scalar_t>(iter, zeta_string);
25       });
26   #else
27     AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "zeta_cuda", [&]() {
28       gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t x, scalar_t q) -> scalar_t {
29         return zeta<scalar_t, /*is_cuda=*/true>(x, q);
30       });
31     });
32   #endif //jiterator
33 }
34 
35 }  // namespace (anonymous)
36 
37 REGISTER_DISPATCH(zeta_stub, &zeta_kernel_cuda);
38 
39 } // namespace at::native
40