1 /* 2 * Copyright (c) 2016-2022 Arm Limited. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in all 14 * copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 */ 24 #ifndef ARM_COMPUTE_CPU_MUL_KERNEL_H 25 #define ARM_COMPUTE_CPU_MUL_KERNEL_H 26 27 #include "src/core/common/Macros.h" 28 #include "src/cpu/ICpuKernel.h" 29 30 namespace arm_compute 31 { 32 namespace cpu 33 { 34 namespace kernels 35 { 36 /** Interface for the kernel to perform multiplication between two tensors */ 37 class CpuMulKernel : public ICpuKernel<CpuMulKernel> 38 { 39 public: 40 CpuMulKernel() = default; 41 ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuMulKernel); 42 /** Initialise the kernel's input, dst and border mode. 43 * 44 * Valid configurations (Src1,Src2) -> Dst : 45 * 46 * Support: Broadcast? Scale=1/255? 47 * - (U8,U8) -> U8, S16 N Y 48 * - (U8,S16) -> S16 N Y 49 * - (S16,U8) -> S16 N Y 50 * - (S16,S16) -> S16 N Y 51 * - (S32,S32) -> S32 Y N 52 * - (F16,F16) -> F16 N Y 53 * - (F32,F32) -> F32 Y Y 54 * - (QASYMM8,QASYMM8) -> QASYMM8 Y Y 55 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED Y Y 56 * - (QSYMM16,QSYMM16) -> QSYMM16, S32 N Y 57 * 58 * @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported. 59 * For all other scale values only round to zero (implemented as round towards minus infinity) is supported. 60 * 61 * @param[in] src1 First input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/QSYMM16/F16/F32 62 * @param[in] src2 Second input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/QSYMM16/F16/F32 63 * @param[out] dst Dst tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/QSYMM16/F16/F32 64 * @param[in] scale Scale to apply after multiplication. 65 * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15. 66 * If both @p src1, @p src2 and @p dst are of datatype S32, scale cannot be 1/255 67 * @param[in] overflow_policy Overflow policy. ConvertPolicy cannot be WRAP if any of the inputs is of quantized datatype 68 * @param[in] rounding_policy Rounding policy. 69 */ 70 void configure(ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy); 71 /** Static function to check if given info will lead to a valid configuration 72 * 73 * Similar to @ref CpuMulKernel::configure() 74 * 75 * @return a status 76 */ 77 static Status validate(const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy); 78 79 // Inherited methods overridden 80 void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override; 81 const char *name() const override; 82 83 /** Return minimum workload size of the relevant kernel 84 * 85 * @param[in] platform The CPU platform used to create the context. 86 * @param[in] thread_count Number of threads in the execution. 87 * 88 * @return[out] mws Minimum workload size for requested configuration. 89 */ 90 size_t get_mws(const CPUInfo &platform, size_t thread_count) const override; 91 92 /** Get the preferred dimension in which the scheduler splits the work into multiple jobs. 93 * 94 * @return The split dimension hint. 95 */ get_split_dimension_hint()96 size_t get_split_dimension_hint() const 97 { 98 return _split_dimension; 99 } 100 101 private: 102 /** Common signature for all the specialised multiplication functions with integer scaling factor 103 * 104 * @param[in] src1 Src1 tensor object. 105 * @param[in] src2 Src2 tensor object. 106 * @param[out] dst Dst tensor object. 107 * @param[in] window Region on which to execute the kernel 108 * @param[in] scale Integer scale factor. 109 */ 110 using MulFunctionInt = void(const ITensor *src1, const ITensor *src2, ITensor *dst, const Window &window, int scale); 111 /** Common signature for all the specialised multiplication functions with float scaling factor 112 * 113 * @param[in] src1 Src1 tensor object. 114 * @param[in] src2 Src2 tensor object. 115 * @param[out] dst Dst tensor object. 116 * @param[in] window Region on which to execute the kernel 117 * @param[in] scale Float scale factor. 118 */ 119 using MulFunctionFloat = void(const ITensor *src1, const ITensor *src2, ITensor *dst, const Window &window, float scale); 120 /** Common signature for all the specialised QASYMM8 multiplication functions with float scaling factor 121 * 122 * @param[in] src1 Src1 tensor object. 123 * @param[in] src2 Src2 tensor object. 124 * @param[out] dst Dst tensor object. 125 * @param[in] window Region on which to execute the kernel 126 * @param[in] scale Float scale factor. 127 * 128 */ 129 using MulFunctionQuantized = void(const ITensor *src1, const ITensor *src2, ITensor *dst, const Window &window, float scale); 130 131 MulFunctionFloat *_func_float{ nullptr }; 132 MulFunctionInt *_func_int{ nullptr }; 133 MulFunctionQuantized *_func_quantized{ nullptr }; 134 float _scale{ 0 }; 135 int _scale_exponent{ 0 }; 136 size_t _split_dimension{ Window::DimY }; 137 }; 138 139 /** Interface for the complex pixelwise multiplication kernel. */ 140 class CpuComplexMulKernel : public ICpuKernel<CpuComplexMulKernel> 141 { 142 public: 143 CpuComplexMulKernel() = default; 144 ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuComplexMulKernel); 145 /** Initialise the kernel's src, dst and border mode. 146 * 147 * @param[in] src1 An src tensor. Data types supported: F32. Number of channels supported: 2 (complex tensor). 148 * @param[in] src2 An src tensor. Data types supported: same as @p src1. Number of channels supported: same as @p src1. 149 * @param[out] dst The dst tensor, Data types supported: same as @p src1. Number of channels supported: same as @p src1. 150 */ 151 void configure(ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst); 152 /** Static function to check if given info will lead to a valid configuration 153 * 154 * Similar to @ref CpuComplexMulKernel::configure() 155 * 156 * @return a status 157 */ 158 static Status validate(const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst); 159 160 // Inherited methods overridden: 161 void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override; 162 const char *name() const override; 163 }; 164 } // namespace kernels 165 } // namespace cpu 166 } // namespace arm_compute 167 #endif /* ARM_COMPUTE_CPU_MUL_KERNEL_H */ 168