1 #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
2 #include <ATen/native/cpu/MaxUnpoolKernel.h>
3
4 #include <ATen/core/Tensor.h>
5 #include <ATen/Dispatch.h>
6 #include <ATen/Parallel.h>
7 #include <ATen/native/cpu/utils.h>
8 #include <c10/util/irange.h>
9
10 #include <optional>
11
12 namespace at::native {
13
14 namespace {
15
16 template <typename scalar_t, bool is_3d = false>
cpu_max_unpool(Tensor & output_,const Tensor & input,const Tensor & indices)17 void cpu_max_unpool(
18 Tensor& output_,
19 const Tensor& input,
20 const Tensor& indices) {
21 auto output = output_.contiguous();
22
23 auto input_data = input.const_data_ptr<scalar_t>();
24 auto indices_data = indices.const_data_ptr<int64_t>();
25 auto output_data = output.data_ptr<scalar_t>();
26
27 // NB: input tensor dimensions:
28 // MaxUnpool2d:
29 // dim = 3: CHW
30 // dim = 4: NCHW
31 // MaxUnpool3d:
32 // dim = 4: CDHW
33 // dim = 5: NCDHW
34
35 int64_t numel = input.numel();
36 int64_t ndim = input.ndimension();
37
38 // treat batch size and channels as one dimension
39 // and the feature map as another dimension
40 [[maybe_unused]] int64_t channels, output_depth, output_height, output_width;
41 if constexpr (is_3d) {
42 TORCH_CHECK(ndim == 4 || ndim == 5, "MaxUnpool3d: expect input to be 4d or 5d tensor.");
43 channels = ndim == 4 ? input.size(0) : input.size(0) * input.size(1);
44 output_depth = output.size(-3);
45 output_height = output.size(-2);
46 output_width = output.size(-1);
47 } else {
48 TORCH_CHECK(ndim == 3 || ndim == 4, "MaxUnpool2d: expect input to be 3d or 4d tensor.");
49 channels = ndim == 3 ? input.size(0) : input.size(0) * input.size(1);
50 output_depth = 1;
51 output_height = output.size(-2);
52 output_width = output.size(-1);
53 }
54 int64_t input_image_size = numel / channels;
55 int64_t output_image_size = output.numel() / channels;
56
57 std::optional<int64_t> optional_error_index;
58
59 // parallel on dim N, C, D, H, W: [channels, input_image_size]
60 at::parallel_for(0, numel, 0, [&](int64_t begin, int64_t end) {
61 int64_t c = 0;
62 int64_t ip = 0;
63 data_index_init(begin, c, channels, ip, input_image_size);
64
65 for (const auto i : c10::irange(begin, end)) {
66 scalar_t* output_ptr = output_data + c * output_image_size;
67
68 int64_t maxp = indices_data[i];
69 if (maxp < 0 || maxp >= output_image_size) {
70 optional_error_index = maxp;
71 std::atomic_thread_fence(std::memory_order_release);
72 } else {
73 output_ptr[maxp] = input_data[i];
74 }
75
76 // move on to next input index
77 data_index_step(c, channels, ip, input_image_size);
78 }
79 });
80
81 if (optional_error_index) {
82 if constexpr (is_3d) {
83 AT_ERROR("Found an invalid max index: ", optional_error_index.value(),
84 " (output volumes are of size ", output_depth,
85 "x", output_height, "x", output_width);
86 } else {
87 AT_ERROR("Found an invalid max index: ", optional_error_index.value(),
88 " (output volumes are of size ", output_height,
89 "x", output_width);
90 }
91 }
92
93 if (!output_.is_contiguous()) {
94 output_.copy_(output);
95 }
96 }
97
98 template <typename scalar_t>
cpu_max_unpool_channels_last(Tensor & output_,const Tensor & input,const Tensor & indices)99 void cpu_max_unpool_channels_last(
100 Tensor& output_,
101 const Tensor& input,
102 const Tensor& indices) {
103 TORCH_CHECK(input.ndimension() == 4,
104 "max_unpool2d with channels last format supports tensors with 4 dims");
105 auto memory_format = at::MemoryFormat::ChannelsLast;
106 auto output = output_.contiguous(memory_format);
107
108 auto input_data = input.const_data_ptr<scalar_t>();
109 auto indices_data = indices.const_data_ptr<int64_t>();
110 auto output_data = output.data_ptr<scalar_t>();
111
112 int64_t nbatch = input.size(0);
113 int64_t channels = input.size(1);
114 int64_t input_height = input.size(2);
115 int64_t input_width = input.size(3);
116 int64_t output_height = output.size(2);
117 int64_t output_width = output.size(3);
118 int64_t input_image_size = input_height * input_width;
119 int64_t output_image_size = output_height * output_width;
120
121 std::optional<int64_t> optional_error_index;
122
123 // parallel on dim N, H, W
124 at::parallel_for(0, nbatch * input_image_size, 0, [&](int64_t begin, int64_t end) {
125 int64_t n = 0;
126 int64_t ip = 0;
127 data_index_init(begin, n, nbatch, ip, input_image_size);
128
129 for (const auto i : c10::irange(begin, end)) {
130 const scalar_t* input_ptr = input_data + i * channels;
131 const int64_t* indices_ptr = indices_data + i * channels;
132 scalar_t* output_ptr = output_data + n * output_image_size * channels;
133
134 // can't do scatter on avx2 (only available on avx512)
135 for (const auto c : c10::irange(channels)) {
136 int64_t maxp = indices_ptr[c];
137 if (maxp < 0 || maxp >= output_image_size) {
138 optional_error_index = maxp;
139 std::atomic_thread_fence(std::memory_order_release);
140 } else {
141 output_ptr[maxp * channels + c] = input_ptr[c];
142 }
143 }
144
145 // move on to next input index
146 data_index_step(n, nbatch, ip, input_image_size);
147 }
148 });
149
150 if (optional_error_index) {
151 AT_ERROR("Found an invalid max index: ", optional_error_index.value(),
152 " (output volumes are of size ", output_height,
153 "x", output_width, ")");
154 }
155
156 if (!output_.is_contiguous(memory_format)) {
157 output_.copy_(output);
158 }
159 }
160
161 template <typename scalar_t, bool is_3d = false>
cpu_max_unpool_backward(Tensor & grad_input_,const Tensor & grad_output,const Tensor & indices)162 void cpu_max_unpool_backward(
163 Tensor& grad_input_,
164 const Tensor& grad_output,
165 const Tensor& indices) {
166 auto grad_input = grad_input_.contiguous();
167
168 auto grad_output_data = grad_output.data_ptr<scalar_t>();
169 auto indices_data = indices.data_ptr<int64_t>();
170 auto grad_input_data = grad_input.mutable_data_ptr<scalar_t>();
171
172 int64_t numel = grad_input.numel();
173 int64_t ndim = grad_output.ndimension();
174
175 // treat batch size and channels as one dimension
176 // and the feature map as another dimension
177 int64_t channels, output_depth, output_height, output_width;
178 if (is_3d) {
179 TORCH_CHECK(ndim == 4 || ndim == 5, "MaxUnpool3d_backward: expect grad_output to be 4d or 5d tensor.");
180 channels = ndim == 4 ? grad_output.size(0) : grad_output.size(0) * grad_output.size(1);
181 output_depth = grad_output.size(-3);
182 output_height = grad_output.size(-2);
183 output_width = grad_output.size(-1);
184 } else {
185 TORCH_CHECK(ndim == 3 || ndim == 4, "MaxUnpool2d_backward: expect grad_output to be 3d or 4d tensor.");
186 channels = ndim == 3 ? grad_output.size(0) : grad_output.size(0) * grad_output.size(1);
187 output_depth = 1;
188 output_height = grad_output.size(-2);
189 output_width = grad_output.size(-1);
190 }
191 int64_t input_image_size = numel / channels;
192 int64_t output_image_size = grad_output.numel() / channels;
193
194 std::optional<int64_t> optional_error_index;
195
196 // parallel on dim N, C, D, H, W
197 at::parallel_for(0, numel, 0, [&](int64_t begin, int64_t end) {
198 int64_t c = 0;
199 int64_t ip = 0;
200 data_index_init(begin, c, channels, ip, input_image_size);
201
202 for (const auto i : c10::irange(begin, end)) {
203 scalar_t* grad_output_ptr = grad_output_data + c * output_image_size;
204
205 int64_t maxp = indices_data[i];
206 if (maxp < 0 || maxp >= output_image_size) {
207 optional_error_index = maxp;
208 std::atomic_thread_fence(std::memory_order_release);
209 } else {
210 grad_input_data[i] = grad_output_ptr[maxp];
211 }
212
213 // move on to next input index
214 data_index_step(c, channels, ip, input_image_size);
215 }
216 });
217
218 if (optional_error_index) {
219 if (is_3d) {
220 AT_ERROR("invalid max index ", optional_error_index.value(),
221 ", odepth= ", output_depth,
222 ", owidth= ", output_width,
223 ", oheight= ", output_height);
224 } else {
225 AT_ERROR("invalid max index ", optional_error_index.value(),
226 ", owidth= ", output_width,
227 ", oheight= ", output_height);
228 }
229 }
230
231 if (!grad_input_.is_contiguous()) {
232 grad_input_.copy_(grad_input);
233 }
234 }
235
max_unpool2d_kernel_impl(Tensor & output,const Tensor & input,const Tensor & indices)236 void max_unpool2d_kernel_impl(
237 Tensor& output,
238 const Tensor& input,
239 const Tensor& indices) {
240 switch(input.suggest_memory_format()) {
241 case at::MemoryFormat::Contiguous: {
242 AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_unpool2d", [&] {
243 cpu_max_unpool<scalar_t, /*is_3d*/false>(output, input, indices);
244 });
245 break;
246 }
247 case at::MemoryFormat::ChannelsLast: {
248 AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_unpool2d_channels_last", [&] {
249 cpu_max_unpool_channels_last<scalar_t>(output, input, indices);
250 });
251 break;
252 }
253 default:
254 TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous");
255 }
256 }
257
max_unpool3d_kernel_impl(Tensor & output,const Tensor & input,const Tensor & indices)258 void max_unpool3d_kernel_impl(
259 Tensor& output,
260 const Tensor& input,
261 const Tensor& indices) {
262 AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_unpool3d", [&] {
263 cpu_max_unpool<scalar_t, /*is_3d*/true>(output, input, indices);
264 });
265 }
266
267 } // anonymous namespace
268
269 REGISTER_DISPATCH(max_unpool2d_kernel, &max_unpool2d_kernel_impl);
270 REGISTER_DISPATCH(max_unpool3d_kernel, &max_unpool3d_kernel_impl);
271
272 } // at::native
273