1 /* 2 * Copyright (c) 2021-2023 Arm Limited. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in all 14 * copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 */ 24 #ifndef ARM_COMPUTE_CPU_GEMM_H 25 #define ARM_COMPUTE_CPU_GEMM_H 26 27 #include "src/cpu/ICpuOperator.h" 28 29 #include "arm_compute/core/ITensorPack.h" 30 #include "arm_compute/core/TensorInfo.h" 31 #include "arm_compute/core/Types.h" 32 #include "src/cpu/kernels/CpuGemmInterleave4x4Kernel.h" 33 #include "src/cpu/kernels/CpuGemmMatrixAdditionKernel.h" 34 #include "src/cpu/kernels/CpuGemmMatrixMultiplyKernel.h" 35 #include "src/cpu/kernels/CpuGemmTranspose1xWKernel.h" 36 #include "src/cpu/operators/CpuActivation.h" 37 #include "src/cpu/operators/CpuAdd.h" 38 #include "src/cpu/operators/internal/CpuGemmAssemblyDispatch.h" 39 40 #include <memory> 41 42 namespace arm_compute 43 { 44 namespace cpu 45 { 46 /** Basic function to execute GEMM. This function calls the following kernels: 47 * 48 * If optimized assembly is available: 49 * -# @ref cpu::CpuGemmAssemblyDispatch 50 * -# @ref cpu::CpuActivation (if alpha != 1.0) 51 * Else: 52 * -# @ref cpu::kernels::CpuGemmInterleave4x4Kernel (if the output tensor is a matrix) 53 * -# @ref cpu::kernels::CpuGemmTranspose1xWKernel (if the output tensor is a matrix) 54 * -# @ref cpu::kernels::CpuGemmMatrixMultiplyKernel 55 * In both cases: 56 * -# @ref cpu::kernels::CpuGemmMatrixAdditionKernel (if c != nullptr and beta != 0.0 and is not reshaped once) 57 * Else: 58 * -# @ref cpu::CpuAdd (if c != nullptr and is reshaped once and not optimized assembly in place) 59 * 60 * -# @ref cpu::CpuActivation (if activation is specified in GEMMInfo) 61 */ 62 class CpuGemm : public ICpuOperator 63 { 64 public: 65 /** Default constructor */ 66 CpuGemm() = default; 67 /** Default destructor */ 68 ~CpuGemm() = default; 69 /** Configure operator for a given list of arguments 70 * 71 * Valid data layouts: 72 * - All 73 * 74 * Valid data type configurations: 75 * |src0 |src1 |src2 |dst | 76 * |:------------|:-----------|:---------|:--------------| 77 * |F32 |F32 |F32 |F32 | 78 * |F16 |F16 |F16 |F16 | 79 * |BFLOAT16 |BFLOAT16 |BFLOAT16 |FP32 | 80 * 81 * @note GEMM: General Matrix Multiply - [alpha * A * B + beta * C]. 82 * @note GEMM: The tensors a, b, c, d must have the same data type. You should not mix data types when calling this function. 83 * 84 * @note Batched GEMM only supports broadcasting cases where RHS rank < LHS rank but not the other way around 85 * 86 * @param[in] a First input tensor info (Matrix A or Vector A). Data type supported: BFLOAT16/F16/F32 87 * @param[in] b Second input tensor info (Matrix B). Data type supported: same as @p a 88 * @param[in] c Third input tensor info (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a 89 * @param[out] d Output tensor info. Data type supported: same as @p a 90 * @param[in] alpha Weight of the matrix product 91 * @param[in] beta Weight of matrix C 92 * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and 93 * if the reshape of matrix B should happen only for the first run 94 */ 95 void configure(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, ITensorInfo *d, 96 float alpha, float beta, const GEMMInfo &gemm_info = GEMMInfo()); 97 /** Static function to check if given info will lead to a valid configuration of @ref CpuGemm. 98 * 99 * Similar to @ref CpuGemm::configure() 100 * 101 * @return a status 102 */ 103 static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *d, 104 float alpha, float beta, const GEMMInfo &gemm_info = GEMMInfo()); 105 106 /** Indicates whether or not there is an optimal assembly implementation that can be used to process the given parameters. 107 * 108 * This method has the same use of @ref 109 * NEGEMMConvolutionLayer::has_opt_impl, with the only caveat that 110 * the value of arm_compute::WeightFormat need to be passed via the 111 * parameter gemm_info. 112 */ 113 static Status has_opt_impl(arm_compute::WeightFormat &weight_format, const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *d, 114 const GEMMInfo &gemm_info = GEMMInfo()); 115 116 // Inherited methods overridden: 117 void run(ITensorPack &tensors) override; 118 void prepare(ITensorPack &constants) override; 119 experimental::MemoryRequirements workspace() const override; 120 121 /** Indicates if the convolution executes in variable weights mode. 122 * 123 * When ACL executes convolution in variable weights mode, it does 124 * not perform any processing of the weights tensor. Instead, it 125 * utilizes the data as it is given by the user. 126 */ 127 bool isVarWeightsKernel() const; 128 129 private: 130 enum AuxTensorIdx 131 { 132 AsmGemmWorkspace = 0, 133 Pretraspose, 134 InterleavedLHS, 135 TransposedRHS, 136 TempResult, 137 Count 138 }; 139 140 std::unique_ptr<kernels::CpuGemmInterleave4x4Kernel> _interleave_kernel{ nullptr }; 141 std::unique_ptr<kernels::CpuGemmTranspose1xWKernel> _transpose_kernel{ nullptr }; 142 std::unique_ptr<kernels::CpuGemmMatrixMultiplyKernel> _mm_kernel{ nullptr }; 143 std::unique_ptr<CpuGemmAssemblyDispatch> _asm_glue{ nullptr }; 144 std::unique_ptr<kernels::CpuGemmMatrixAdditionKernel> _ma_kernel{ nullptr }; 145 std::unique_ptr<CpuActivation> _alpha_scale_func{ nullptr }; 146 std::unique_ptr<CpuAdd> _add_bias{ nullptr }; 147 std::unique_ptr<CpuActivation> _activation_func{ nullptr }; 148 149 TensorInfo _tmp_a{}; 150 TensorInfo _tmp_b{}; 151 TensorInfo _tmp_d{}; 152 153 bool _run_vector_matrix_multiplication{ false }; 154 bool _run_alpha_scale{ false }; 155 bool _run_addition{ false }; 156 bool _run_bias_addition{ false }; 157 bool _run_activation{ false }; 158 bool _reshape_b_only_on_first_run{ false }; 159 bool _is_prepared{ false }; 160 161 experimental::MemoryRequirements _aux_mem{ Count }; 162 }; 163 } // namespace cpu 164 } // namespace arm_compute 165 #endif /*ARM_COMPUTE_CPU_GEMM_H */ 166