xref: /aosp_15_r20/external/ComputeLibrary/examples/gemm_tuner/cl_gemm_native.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2019-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_CL /* Needed by Utils.cpp to handle OpenCL exceptions properly */
25 #error "This example needs to be built with -DARM_COMPUTE_CL"
26 #endif /* ARM_COMPUTE_CL */
27 
28 #include "CommonGemmExampleOptions.h"
29 #include "arm_compute/core/Helpers.h"
30 #include "arm_compute/core/KernelDescriptors.h"
31 #include "arm_compute/core/Types.h"
32 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
33 #include "arm_compute/runtime/CL/CLScheduler.h"
34 #include "arm_compute/runtime/CL/CLTuner.h"
35 #include "src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.h"
36 #include "tests/CL/Helper.h"
37 #include "utils/Utils.h"
38 #include "utils/command_line/CommandLineOptions.h"
39 #include "utils/command_line/CommandLineParser.h"
40 
41 #include <cstdlib>
42 
43 using namespace arm_compute;
44 using namespace arm_compute::opencl::kernels;
45 using namespace utils;
46 using namespace arm_compute::misc::shape_calculator;
47 using namespace gemm_tuner;
48 
49 namespace
50 {
51 /** Structure holding all tunable gemm configs specific to this example/strategy */
52 struct GemmConfigs
53 {
54     size_t m0{ 4 }; /**< Number of rows processed by the matrix multiplication */
55     size_t n0{ 4 }; /**< Number of columns processed by the matrix multiplication */
56     size_t k0{ 4 }; /**< Number of partial accumulations performed by the matrix multiplication */
57 };
58 
59 /** Formatted output of the GemmConfigs type
60  *
61  * @param[out] os      Output stream.
62  * @param[in]  configs Tunable configurations to output
63  *
64  * @return Modified output stream.
65  */
operator <<(::std::ostream & os,const GemmConfigs & configs)66 ::std::ostream &operator<<(::std::ostream &os, const GemmConfigs &configs)
67 {
68     std::string false_str = std::string("false");
69     std::string true_str  = std::string("true");
70 
71     os << "m0 : " << configs.m0 << std::endl;
72     os << "n0 : " << configs.n0 << std::endl;
73     os << "k0 : " << configs.k0 << std::endl;
74     return os;
75 }
76 
77 /** Command line options for gemm configs */
78 class GemmConfigOptions
79 {
80 public:
81     /** Constructor
82      *
83      * @param[in,out] parser A parser on which "parse()" hasn't been called yet.
84      */
GemmConfigOptions(CommandLineParser & parser)85     GemmConfigOptions(CommandLineParser &parser)
86         : m0(parser.add_positional_option<SimpleOption<size_t>>("m0", 4)),
87           n0(parser.add_positional_option<SimpleOption<size_t>>("n0", 4)),
88           k0(parser.add_positional_option<SimpleOption<size_t>>("k0", 4))
89     {
90         m0->set_help("Number of rows processed by the matrix multiplication");
91         n0->set_help("Number of columns processed by the matrix multiplication");
92         k0->set_help("Number of partial accumulations performed by the matrix multiplication");
93     }
94     /** Prevent instances of this class from being copied (As this class contains pointers) */
95     GemmConfigOptions(const GemmConfigOptions &) = delete;
96     /** Prevent instances of this class from being copied (As this class contains pointers) */
97     GemmConfigOptions &operator=(const GemmConfigOptions &) = delete;
98     /** Allow instances of this class to be moved */
99     GemmConfigOptions(GemmConfigOptions &&) = default;
100     /** Allow instances of this class to be moved */
101     GemmConfigOptions &operator=(GemmConfigOptions &&) = default;
102     /** Default destructor */
103     ~GemmConfigOptions() = default;
104 
105     SimpleOption<size_t> *m0; /**< Number of rows processed by the matrix multiplication option */
106     SimpleOption<size_t> *n0; /**< Number of columns processed by the matrix multiplication option */
107     SimpleOption<size_t> *k0; /**< Number of partial accumulations performed by the matrix multiplication option */
108 };
109 
110 /** Consumes the gemm configuration options and creates a structure containing all information
111  *
112  * @param[in] options Options to consume
113  *
114  * @return Structure containing the gemm configurations
115  */
consume_gemm_configs(const GemmConfigOptions & options)116 GemmConfigs consume_gemm_configs(const GemmConfigOptions &options)
117 {
118     GemmConfigs configs;
119     configs.m0 = options.m0->value();
120     configs.n0 = options.n0->value();
121     configs.k0 = options.k0->value();
122     return configs;
123 }
124 
125 } // namespace
126 // Create function for ClGemmMatrixMultiplyNativeKernel
127 using CLGEMMMatrixMultiplyNative = test::CLSynthetizeOperator<ClGemmMatrixMultiplyNativeKernel>;
128 
129 class CLGEMMMatrixMultiplyNativeExample : public Example
130 {
131 public:
do_setup(int argc,char ** argv)132     bool do_setup(int argc, char **argv) override
133     {
134         // Default parameters
135         const float               alpha    = 1.0f;
136         const float               beta     = 0.0f;
137         const ActivationLayerInfo act_info = ActivationLayerInfo();
138         CommonGemmExampleParams   params;
139         GemmConfigs               configs;
140 
141         // Set up command line parser and options
142         CommandLineParser        parser;
143         CommonGemmExampleOptions param_options(parser);
144         GemmConfigOptions        config_options(parser);
145 
146         // Parse command line options
147         parser.parse(argc, argv);
148         if(param_options.help->is_set() && param_options.help->value())
149         {
150             // Print help message
151             parser.print_help(argv[0]);
152             return false;
153         }
154         if(!parser.validate())
155         {
156             // Invalid arguments. Use default parameters and configs
157             std::cerr << "Invalid arguments." << std::endl;
158             parser.print_help(argv[0]);
159             std::cerr << "Falling back to default parameters and configs" << std::endl;
160         }
161         else
162         {
163             // Get parameters and configs from command-line options
164             params  = consume_common_gemm_example_parameters(param_options);
165             configs = consume_gemm_configs(config_options);
166         }
167 
168         // Print gemm parameters and configurations
169         std::cout << "Gemm parameters:" << std::endl;
170         std::cout << params << std::endl;
171         std::cout << "Gemm configurations:" << std::endl;
172         std::cout << configs << std::endl;
173 
174         tuner.set_tuner_mode(params.tuner_mode);
175 
176         CLScheduler::get().default_init(&tuner);
177 
178         lhs.allocator()->init(TensorInfo(TensorShape(params.K, params.M, params.B), 1, params.data_type));
179         rhs.allocator()->init(TensorInfo(TensorShape(params.N, params.K, params.B), 1, params.data_type));
180         bias.allocator()->init(TensorInfo(TensorShape(params.N, 1, params.B), 1, params.data_type));
181 
182         GEMMLHSMatrixInfo lhs_info;
183         lhs_info.m0 = configs.m0;
184         lhs_info.k0 = configs.k0;
185 
186         GEMMRHSMatrixInfo rhs_info;
187         rhs_info.n0 = configs.n0;
188         rhs_info.k0 = configs.k0;
189 
190         GEMMKernelInfo kernel_info;
191         kernel_info.m                       = params.M;
192         kernel_info.n                       = params.N;
193         kernel_info.k                       = params.K;
194         kernel_info.depth_output_gemm3d     = 0;
195         kernel_info.reinterpret_input_as_3d = false;
196         kernel_info.broadcast_bias          = true;
197         kernel_info.activation_info         = act_info;
198 
199         // Validate argments
200         Status status{};
201         status = gemm.validate(lhs.info(), rhs.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info);
202         if(!status)
203         {
204             // Unsupported arguments
205             std::cerr << "Unsupported arguments." << std::endl;
206             std::cerr << "Check documentation for supported/unsupported combinations" << std::endl;
207             return false;
208         }
209 
210         // Configure function
211         gemm.configure(lhs.info(), rhs.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info);
212 
213         // Allocate tensors
214         lhs.allocator()->allocate();
215         rhs.allocator()->allocate();
216         bias.allocator()->allocate();
217         dst.allocator()->allocate();
218 
219         return true;
220     }
do_run()221     void do_run() override
222     {
223         // Execute the function
224         ITensorPack gemm_pack({ { ACL_SRC_0, &lhs },
225             { ACL_SRC_1, &rhs },
226             { ACL_SRC_2, &bias },
227             { ACL_DST, &dst }
228         });
229         gemm.run(gemm_pack);
230 
231         // Make sure all the OpenCL jobs are done executing:
232         CLScheduler::get().sync();
233     }
234 
do_teardown()235     void do_teardown() override
236     {
237     }
238 
239 private:
240     CLTensor                   lhs{};
241     CLTensor                   rhs{};
242     CLTensor                   bias{};
243     CLTensor                   dst{};
244     CLTuner                    tuner{};
245     CLGEMMMatrixMultiplyNative gemm{};
246 };
247 
248 /** Main program for gemm native test
249  *
250  * @param[in] argc Number of arguments
251  * @param[in] argv Arguments ( [optional] M, [optional] N, [optional] K, [optional] B, [optional] m0, [optional] n0, [optional] k0 )
252  */
main(int argc,char ** argv)253 int main(int argc, char **argv)
254 {
255     return run_example<CLGEMMMatrixMultiplyNativeExample>(argc, argv);
256 }
257