1 /*
2 * Copyright (c) 2017-2022 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "src/cpu/kernels/CpuDirectConv2dKernel.h"
25 #include "src/cpu/kernels/directconv2d/list.h"
26
27 #include "arm_compute/core/Validate.h"
28 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
29 #include "src/core/CPP/Validate.h"
30 #include "src/core/helpers/AutoConfiguration.h"
31 #include "src/core/helpers/WindowHelpers.h"
32
33 using namespace arm_compute::detail;
34
35 namespace arm_compute
36 {
37 namespace cpu
38 {
39 namespace kernels
40 {
41 static const std::vector<CpuDirectConv2dKernel::DirectConv2dKernel> available_kernels =
42 {
43 {
44 "neon_fp32_nhwc_directconv2d",
__anon2216963a0102() 45 [](const DataTypeDataLayoutISASelectorData & data) { return data.dt == DataType::F32 && data.dl == DataLayout::NHWC; },
46 REGISTER_FP32_NEON(arm_compute::cpu::kernels::neon_fp32_nhwc_directconv2d)
47 },
48 {
49 "neon_fp32_nchw_directconv2d",
__anon2216963a0202() 50 [](const DataTypeDataLayoutISASelectorData & data) { return data.dt == DataType::F32 && data.dl == DataLayout::NCHW; },
51 REGISTER_FP32_NEON(arm_compute::cpu::kernels::neon_fp32_nchw_directconv2d)
52 },
53 {
54 "neon_fp16_nchw_directconv2d",
__anon2216963a0302() 55 [](const DataTypeDataLayoutISASelectorData & data) { return data.dt == DataType::F16 && data.dl == DataLayout::NCHW && data.isa.fp16; },
56 REGISTER_FP16_NEON(arm_compute::cpu::kernels::neon_fp16_nchw_directconv2d)
57 },
58 };
59
validate_arguments(const ITensorInfo * src,const ITensorInfo * weights,const ITensorInfo * dst,const PadStrideInfo & conv_info)60 Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst, const PadStrideInfo &conv_info)
61 {
62 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, weights, dst);
63 ARM_COMPUTE_RETURN_ERROR_ON(src->data_layout() == DataLayout::UNKNOWN);
64 ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src);
65 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32);
66 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, weights);
67
68 const DataLayout data_layout = src->data_layout();
69 const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
70 const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
71 const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
72
73 ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(channel_idx) != src->dimension(channel_idx));
74 ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(width_idx) != weights->dimension(height_idx));
75 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
76 ARM_COMPUTE_RETURN_ERROR_ON(data_layout == DataLayout::NHWC && src->data_type() != DataType::F32);
77 ARM_COMPUTE_UNUSED(width_idx);
78 // Checks performed when output is configured
79 if(dst->total_size() != 0)
80 {
81 TensorShape output_shape = misc::shape_calculator::compute_deep_convolution_shape(*src, *weights, conv_info);
82
83 DataType data_type = src->data_type();
84
85 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), output_shape);
86 ARM_COMPUTE_RETURN_ERROR_ON(dst->data_type() != data_type);
87 }
88
89 return Status{};
90 }
91
validate_and_configure_window(ITensorInfo * src,ITensorInfo * dst)92 std::pair<Status, Window> validate_and_configure_window(ITensorInfo *src, ITensorInfo *dst)
93 {
94 ARM_COMPUTE_ERROR_ON(src->data_layout() == DataLayout::UNKNOWN);
95 ARM_COMPUTE_UNUSED(src);
96
97 Window win{};
98 bool window_changed = false;
99
100 // Configure window without any padding
101 win = calculate_max_window(*dst, Steps());
102
103 Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
104 return std::make_pair(err, win);
105 }
106
configure(ITensorInfo * src,ITensorInfo * weights,ITensorInfo * dst,const PadStrideInfo & conv_info)107 void CpuDirectConv2dKernel::configure(ITensorInfo *src, ITensorInfo *weights, ITensorInfo *dst, const PadStrideInfo &conv_info)
108 {
109 ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
110
111 _conv_info = conv_info;
112 _data_layout = src->data_layout();
113 _kernel_size = weights->dimension(get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH));
114
115 // Get convolved dimensions
116 TensorShape output_shape = misc::shape_calculator::compute_deep_convolution_shape(*src, *weights, conv_info);
117
118 DataType data_type = src->data_type();
119
120 // Output auto inizialitation if not yet initialized
121 auto_init_if_empty(*dst, output_shape, 1, data_type);
122
123 // Perform validation step
124 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, weights, dst, conv_info));
125
126 // Configure kernel window
127 auto win_config = validate_and_configure_window(src, dst);
128 ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
129 ICpuKernel::configure(win_config.second);
130 }
131
validate(const ITensorInfo * src,const ITensorInfo * weights,const ITensorInfo * dst,const PadStrideInfo & conv_info)132 Status CpuDirectConv2dKernel::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst, const PadStrideInfo &conv_info)
133 {
134 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, weights, dst, conv_info));
135 ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src->clone().get(),
136 dst->clone().get())
137 .first);
138
139 return Status{};
140 }
141
run_op(ITensorPack & tensors,const Window & window,const ThreadInfo & info)142 void CpuDirectConv2dKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
143 {
144 ARM_COMPUTE_UNUSED(info);
145 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
146 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICpuKernel::window(), window);
147
148 auto src = tensors.get_const_tensor(TensorType::ACL_SRC_0);
149 auto weights = tensors.get_const_tensor(TensorType::ACL_SRC_1);
150 auto dst = tensors.get_tensor(TensorType::ACL_DST);
151
152 const auto *uk = CpuDirectConv2dKernel::get_implementation(DataTypeDataLayoutISASelectorData{ src->info()->data_type(), _data_layout, CPUInfo::get().get_isa() });
153 ARM_COMPUTE_ERROR_ON(uk == nullptr);
154
155 uk->ukernel(window, src, weights, dst, _conv_info);
156 }
name() const157 const char *CpuDirectConv2dKernel::name() const
158 {
159 return "CpuDirectConvolutionLayerKernel";
160 }
161
get_available_kernels()162 const std::vector<CpuDirectConv2dKernel::DirectConv2dKernel> &CpuDirectConv2dKernel::get_available_kernels()
163 {
164 return available_kernels;
165 }
166
167 } // namespace kernels
168 } // namespace cpu
169 } // namespace arm_compute
170