xref: /aosp_15_r20/external/pytorch/aten/src/ATen/native/UpSampleLinear1d.cpp (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 // Adapted from interp.cpp from Caffe util by Pauline Luc
2 // Originally developed by George Papandreou
3 #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
4 
5 #include <ATen/core/Tensor.h>
6 #include <ATen/TensorMeta.h>
7 #include <ATen/TensorUtils.h>
8 #include <ATen/native/UpSample.h>
9 
10 #ifndef AT_PER_OPERATOR_HEADERS
11 #include <ATen/Functions.h>
12 #include <ATen/NativeFunctions.h>
13 #else
14 #include <ATen/ops/upsample_linear1d.h>
15 #include <ATen/ops/upsample_linear1d_backward.h>
16 #include <ATen/ops/upsample_linear1d_backward_native.h>
17 #include <ATen/ops/upsample_linear1d_native.h>
18 #endif
19 
20 namespace at::meta {
21 
TORCH_META_FUNC(upsample_linear1d)22 TORCH_META_FUNC(upsample_linear1d) (
23     const Tensor& input,
24     IntArrayRef output_size,
25     bool align_corners,
26     std::optional<double> scales
27 ) {
28   auto full_output_size = native::upsample_1d_common_check(input.sizes(), output_size);
29 
30   // Allow for empty batch size but not other dimensions
31   TORCH_CHECK(
32       (input.size(1) != 0 && input.size(2) != 0) && input.dim() == 3,
33       "Non-empty 3D data tensor expected but got a tensor with sizes ",
34       input.sizes());
35 
36   set_output_raw_strided(0, full_output_size, {}, input.options());
37 }
38 
TORCH_META_FUNC(upsample_linear1d_backward)39 TORCH_META_FUNC(upsample_linear1d_backward) (
40     const Tensor& grad_output,
41     IntArrayRef output_size,
42     IntArrayRef input_size,
43     bool align_corners,
44     std::optional<double> scales
45 ) {
46   auto full_output_size = native::upsample_1d_common_check(input_size, output_size);
47 
48   TORCH_CHECK(
49       input_size.size() == 3,
50       "It is expected input_size equals to 3, but got size ",
51       input_size.size());
52 
53   check_dim_size(grad_output, 3, 0, full_output_size[0]);
54   check_dim_size(grad_output, 3, 1, full_output_size[1]);
55   check_dim_size(grad_output, 3, 2, full_output_size[2]);
56 
57   set_output_raw_strided(0, input_size, {}, grad_output.options());
58 }
59 
60 } // namespace at::meta
61 
62 namespace at::native {
63 
TORCH_IMPL_FUNC(upsample_linear1d_out_cpu)64 TORCH_IMPL_FUNC(upsample_linear1d_out_cpu) (
65     const Tensor& input,
66     IntArrayRef output_size,
67     bool align_corners,
68     std::optional<double> scales,
69     const Tensor& output
70 ) {
71   upsample_linear1d_kernel(kCPU, output, input, align_corners, scales);
72 }
73 
TORCH_IMPL_FUNC(upsample_linear1d_backward_out_cpu)74 TORCH_IMPL_FUNC(upsample_linear1d_backward_out_cpu) (
75     const Tensor& grad_output,
76     IntArrayRef output_size,
77     IntArrayRef input_size,
78     bool align_corners,
79     std::optional<double> scales,
80     const Tensor& grad_input
81 ) {
82   grad_input.zero_();
83   upsample_linear1d_backward_kernel(kCPU, grad_input, grad_output, align_corners, scales);
84 }
85 
86 // vec variants
87 
88 using at::native::upsample::compute_output_size;
89 using at::native::upsample::get_scale_value;
90 
upsample_linear1d(const Tensor & input,at::OptionalIntArrayRef output_size,bool align_corners,std::optional<ArrayRef<double>> scale_factors)91 Tensor upsample_linear1d(
92     const Tensor& input,
93     at::OptionalIntArrayRef output_size,
94     bool align_corners,
95     std::optional<ArrayRef<double>> scale_factors) {
96   auto osize = compute_output_size(input.sizes(), output_size, scale_factors);
97   auto scale_w = get_scale_value(scale_factors, 0);
98   return at::upsample_linear1d(input, osize, align_corners, scale_w);
99 }
100 
101 DEFINE_DISPATCH(upsample_linear1d_kernel);
102 DEFINE_DISPATCH(upsample_linear1d_backward_kernel);
103 
104 } // namespace at::native
105