1 /*
2 * Copyright (c) 2017-2020, 2022 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #ifdef __aarch64__
25
26 #include "arm_gemm.hpp"
27 #include "gemm_common.hpp"
28 #include "gemm_hybrid.hpp"
29 #include "gemm_hybrid_indirect.hpp"
30 #include "gemm_implementation.hpp"
31 #include "gemm_interleaved.hpp"
32
33 #include "kernels/a64_gemm_s16_8x12.hpp"
34 #include "kernels/a64_gemm_s8_8x12.hpp"
35 #include "kernels/a64_gemm_s8_4x4.hpp"
36 #include "kernels/a64_hybrid_s8s32_dot_6x16.hpp"
37 #include "kernels/a64_hybrid_s8s32_mmla_6x16.hpp"
38 #include "kernels/a64_interleaved_s8s32_mmla_8x12.hpp"
39 #include "kernels/a64_smallK_hybrid_s8s32_dot_6x4.hpp"
40 #include "kernels/a64_smallK_hybrid_s8s32_dot_8x4.hpp"
41
42 #ifdef ARM_COMPUTE_ENABLE_SVE
43 #ifdef ARM_COMPUTE_ENABLE_SME2
44 #include "kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL.hpp"
45 #include "kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL.hpp"
46 #include "kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL.hpp"
47 #endif // ARM_COMPUTE_ENABLE_SME2
48
49 #include "kernels/sve_hybrid_s8s32_dot_6x4VL.hpp"
50 #include "kernels/sve_hybrid_s8s32_mmla_6x4VL.hpp"
51 #include "kernels/sve_interleaved_s8s32_dot_8x3VL.hpp"
52 #include "kernels/sve_interleaved_s8s32_mmla_8x3VL.hpp"
53 #include "kernels/sve_smallK_hybrid_s8s32_dot_8x1VL.hpp"
54 #endif // ARM_COMPUTE_ENABLE_SVE
55
56 namespace arm_gemm {
57
58 static const GemmImplementation<int8_t, int32_t> gemm_s8_methods[] = {
59 #ifdef ARM_COMPUTE_ENABLE_SVE
60 #ifdef ARM_COMPUTE_ENABLE_SME2
61 // SME kernels
62 {
63 GemmMethod::GEMM_INTERLEAVED,
64 "sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL",
__anonc48f89260102() 65 [](const GemmArgs &args) { return args._ci->has_sme2(); },
__anonc48f89260202() 66 [](const GemmArgs &args) { const auto VL = sme::get_vector_length<int32_t>();
67 return args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
__anonc48f89260302() 68 [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL, int8_t, int32_t>(args); }
69 },
70 {
71 GemmMethod::GEMM_INTERLEAVED,
72 "sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL",
__anonc48f89260402() 73 [](const GemmArgs &args) { return args._ci->has_sme2(); },
__anonc48f89260502() 74 [](const GemmArgs &args) { const auto VL = sme::get_vector_length<int32_t>();
75 return args._Nsize <= VL || (2*VL < args._Nsize && args._Nsize <= 3*VL); },
__anonc48f89260602() 76 [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL, int8_t, int32_t>(args); }
77 },
78 {
79 GemmMethod::GEMM_INTERLEAVED,
80 "sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL",
__anonc48f89260702() 81 [](const GemmArgs &args) { return args._ci->has_sme2(); },
82 nullptr,
__anonc48f89260802() 83 [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL, int8_t, int32_t>(args); }
84 },
85 #endif // ARM_COMPUTE_ENABLE_SME2
86 GemmImplementation<int8_t, int32_t>::with_estimate(
87 GemmMethod::GEMM_HYBRID,
88 "sve_hybrid_s8s32_mmla_6x4VL",
__anonc48f89260902() 89 [](const GemmArgs &args) { return args._ci->has_svei8mm(); },
__anonc48f89260a02() 90 [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_s8s32_mmla_6x4VL, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
__anonc48f89260b02() 91 [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_s8s32_mmla_6x4VL, int8_t, int32_t>(args); }
92 ),
93 GemmImplementation<int8_t, int32_t>::with_estimate(
94 GemmMethod::GEMM_INTERLEAVED,
95 "sve_interleaved_s8s32_mmla_8x3VL",
__anonc48f89260c02() 96 [](const GemmArgs &args) { return args._ci->has_svei8mm() && (args._Ksize>8); },
__anonc48f89260d02() 97 [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_s8s32_mmla_8x3VL, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
__anonc48f89260e02() 98 [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_s8s32_mmla_8x3VL, int8_t, int32_t>(args); }
99 ),
100 {
101 GemmMethod::GEMM_HYBRID,
102 "sve_smallK_hybrid_s8s32_dot_8x1VL",
__anonc48f89260f02() 103 [](const GemmArgs &args) { return args._ci->has_svei8mm() && args._Ksize<=64 && !args._indirect_input; },
__anonc48f89261002() 104 [](const GemmArgs &args) { return !(args._ci->has_svei8mm() || args._ci->has_i8mm()); },
__anonc48f89261102() 105 [](const GemmArgs &args) { return new GemmHybrid<cls_sve_smallK_hybrid_s8s32_dot_8x1VL, int8_t, int32_t>(args); }
106 },
107 GemmImplementation<int8_t, int32_t>::with_estimate(
108 GemmMethod::GEMM_HYBRID,
109 "sve_hybrid_s8s32_dot_6x4VL",
__anonc48f89261202() 110 [](const GemmArgs &args) { return args._ci->has_sve() && args._Ksize>=16; },
__anonc48f89261302() 111 [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_s8s32_dot_6x4VL, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
__anonc48f89261402() 112 [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_s8s32_dot_6x4VL, int8_t, int32_t>(args); }
113 ),
114 GemmImplementation<int8_t, int32_t>::with_estimate(
115 GemmMethod::GEMM_INTERLEAVED,
116 "sve_interleaved_s8s32_dot_8x3VL",
__anonc48f89261502() 117 [](const GemmArgs &args) { return args._ci->has_sve() && (args._Ksize>4); },
__anonc48f89261602() 118 [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_s8s32_dot_8x3VL, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
__anonc48f89261702() 119 [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_s8s32_dot_8x3VL, int8_t, int32_t>(args); }
120 ),
121 #endif // ARM_COMPUTE_ENABLE_SVE
122 GemmImplementation<int8_t, int32_t>::with_estimate(
123 GemmMethod::GEMM_INTERLEAVED,
124 "a64_interleaved_s8s32_mmla_8x12",
__anonc48f89261802() 125 [](const GemmArgs &args) { return args._ci->has_i8mm() && (args._Ksize>8); },
__anonc48f89261902() 126 [](const GemmArgs &args) { return GemmInterleaved<cls_a64_interleaved_s8s32_mmla_8x12, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
__anonc48f89261a02() 127 [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_interleaved_s8s32_mmla_8x12, int8_t, int32_t>(args); }
128 ),
129 GemmImplementation<int8_t, int32_t>::with_estimate(
130 GemmMethod::GEMM_HYBRID,
131 "a64_hybrid_s8s32_mmla_6x16",
__anonc48f89261b02() 132 [](const GemmArgs &args) { return args._ci->has_i8mm(); },
__anonc48f89261c02() 133 [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_s8s32_mmla_6x16, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
__anonc48f89261d02() 134 [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_s8s32_mmla_6x16, int8_t, int32_t>(args); }
135 ),
136 {
137 GemmMethod::GEMM_HYBRID,
138 "a64_smallK_hybrid_s8s32_dot_8x4",
__anonc48f89261e02() 139 [](const GemmArgs &args) { return args._ci->has_dotprod() && (args._Nsize % 4 == 0) && (args._Ksize<=32) && !args._indirect_input; },
__anonc48f89261f02() 140 [](const GemmArgs &args) { return !(args._ci->has_svei8mm() || args._ci->has_i8mm()); },
__anonc48f89262002() 141 [](const GemmArgs &args) { return new GemmHybrid<cls_a64_smallK_hybrid_s8s32_dot_8x4, int8_t, int32_t>(args); }
142 },
143 {
144 GemmMethod::GEMM_HYBRID,
145 "a64_smallK_hybrid_s8s32_dot_6x4",
__anonc48f89262102() 146 [](const GemmArgs &args) { return args._ci->has_dotprod() && (args._Nsize % 4 == 0) && (args._Ksize>32) && (args._Ksize<=64) && !args._indirect_input; },
__anonc48f89262202() 147 [](const GemmArgs &args) { return !(args._ci->has_svei8mm() || args._ci->has_i8mm()); },
__anonc48f89262302() 148 [](const GemmArgs &args) { return new GemmHybrid<cls_a64_smallK_hybrid_s8s32_dot_6x4, int8_t, int32_t>(args); }
149 },
150 {
151 GemmMethod::GEMM_INTERLEAVED,
152 "a64_gemm_s16_8x12",
153 nullptr,
__anonc48f89262402() 154 [](const GemmArgs &args) { return args._ci->get_cpu_model() == CPUModel::A53 && ((args._Msize > 28) || ((args._Msize % 8) > 4)); },
__anonc48f89262502() 155 [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_gemm_s16_8x12, int8_t, int32_t>(args); },
156 },
157 GemmImplementation<int8_t, int32_t>::with_estimate(
158
159 GemmMethod::GEMM_HYBRID,
160 "a64_hybrid_s8s32_dot_6x16",
__anonc48f89262602() 161 [](const GemmArgs &args) { return args._ci->has_dotprod(); },
__anonc48f89262702() 162 [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_s8s32_dot_6x16, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
__anonc48f89262802() 163 [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_s8s32_dot_6x16, int8_t, int32_t>(args); }
164 ),
165 GemmImplementation<int8_t, int32_t>::with_estimate(
166 GemmMethod::GEMM_INTERLEAVED,
167 "a64_gemm_s8_8x12",
__anonc48f89262902() 168 [](const GemmArgs &args) { return args._ci->has_dotprod(); },
__anonc48f89262a02() 169 [](const GemmArgs &args) { return GemmInterleaved<cls_a64_gemm_s8_8x12, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
__anonc48f89262b02() 170 [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_gemm_s8_8x12, int8_t, int32_t>(args); }
171 ),
172 GemmImplementation<int8_t, int32_t>::with_estimate(
173 GemmMethod::GEMM_INTERLEAVED,
174 "a64_gemm_s8_4x4",
175 nullptr,
__anonc48f89262c02() 176 [](const GemmArgs &args) { return GemmInterleaved<cls_a64_gemm_s8_4x4, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
__anonc48f89262d02() 177 [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_gemm_s8_4x4, int8_t, int32_t>(args); }
178 ),
179
180 {
181 GemmMethod::DEFAULT,
182 "",
183 nullptr,
184 nullptr,
185 nullptr
186 }
187 };
188
189 template<>
gemm_implementation_list()190 const GemmImplementation<int8_t, int32_t> *gemm_implementation_list<int8_t, int32_t>() {
191 return gemm_s8_methods;
192 }
193
194 /* Explicitly instantiate the external functions for these types. */
195 template UniqueGemmCommon<int8_t, int32_t> gemm<int8_t, int32_t, Nothing>(const GemmArgs &args, const Nothing &);
196 template bool has_opt_gemm<int8_t, int32_t, Nothing>(WeightFormat &weight_format, const GemmArgs &args, const Nothing &);
197 template std::vector<KernelDescription> get_compatible_kernels<int8_t, int32_t, Nothing> (const GemmArgs &args, const Nothing &);
198
199 } // namespace arm_gemm
200
201 #endif // __aarch64__
202