xref: /aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/FillKernel.cu (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #define TORCH_ASSERT_NO_OPERATORS
2 #include <ATen/Dispatch.h>
3 #include <ATen/Dispatch_v2.h>
4 #include <ATen/native/cuda/Loops.cuh>
5 #include <ATen/native/DispatchStub.h>
6 #include <ATen/native/TensorIterator.h>
7 #include <ATen/native/Fill.h>
8 #include <c10/core/Scalar.h>
9 
10 namespace at::native {
11 
12 template<typename scalar_t>
13 struct FillFunctor {
FillFunctorat::native::FillFunctor14   FillFunctor(scalar_t v): value(v) {}
operator ()at::native::FillFunctor15   __device__ __forceinline__ scalar_t operator() () const {
16     return value;
17   }
18   private:
19     scalar_t value;
20 };
21 
fill_kernel_cuda(TensorIterator & iter,const Scalar & value)22 void fill_kernel_cuda(TensorIterator& iter, const Scalar& value) {
23   AT_DISPATCH_V2(iter.dtype(), "fill_cuda", AT_WRAP([&]() {
24     gpu_kernel(iter, FillFunctor<scalar_t>(value.to<scalar_t>()));
25   }), AT_EXPAND(AT_ALL_TYPES_AND_COMPLEX), kComplexHalf, kBool, kHalf, kBFloat16, AT_EXPAND(AT_FLOAT8_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
26 }
27 
28 REGISTER_DISPATCH(fill_stub, &fill_kernel_cuda);
29 
30 } // namespace at::native
31