xref: /aosp_15_r20/external/ComputeLibrary/examples/graph_vgg_vdsr.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2018-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/graph.h"
25 #include "support/ToolchainSupport.h"
26 #include "utils/CommonGraphOptions.h"
27 #include "utils/GraphUtils.h"
28 #include "utils/Utils.h"
29 
30 using namespace arm_compute;
31 using namespace arm_compute::utils;
32 using namespace arm_compute::graph::frontend;
33 using namespace arm_compute::graph_utils;
34 
35 /** Example demonstrating how to implement VGG based VDSR network using the Compute Library's graph API */
36 class GraphVDSRExample : public Example
37 {
38 public:
GraphVDSRExample()39     GraphVDSRExample()
40         : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "VDSR")
41     {
42         model_input_width  = cmd_parser.add_option<SimpleOption<unsigned int>>("image-width", 192);
43         model_input_height = cmd_parser.add_option<SimpleOption<unsigned int>>("image-height", 192);
44 
45         // Add model id option
46         model_input_width->set_help("Input image width.");
47         model_input_height->set_help("Input image height.");
48     }
49     GraphVDSRExample(const GraphVDSRExample &) = delete;
50     GraphVDSRExample &operator=(const GraphVDSRExample &) = delete;
51     ~GraphVDSRExample() override                          = default;
do_setup(int argc,char ** argv)52     bool do_setup(int argc, char **argv) override
53     {
54         // Parse arguments
55         cmd_parser.parse(argc, argv);
56         cmd_parser.validate();
57 
58         // Consume common parameters
59         common_params = consume_common_graph_parameters(common_opts);
60 
61         // Return when help menu is requested
62         if(common_params.help)
63         {
64             cmd_parser.print_help(argv[0]);
65             return false;
66         }
67 
68         // Get input image width and height
69         const unsigned int image_width  = model_input_width->value();
70         const unsigned int image_height = model_input_height->value();
71 
72         // Print parameter values
73         std::cout << common_params << std::endl;
74         std::cout << "Image width: " << image_width << std::endl;
75         std::cout << "Image height: " << image_height << std::endl;
76 
77         // Get trainable parameters data path
78         const std::string data_path  = common_params.data_path;
79         const std::string model_path = "/cnn_data/vdsr_model/";
80 
81         // Create a preprocessor object
82         std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<TFPreproccessor>();
83 
84         // Create input descriptor
85         const TensorShape tensor_shape     = permute_shape(TensorShape(image_width, image_height, 1U, common_params.batches), DataLayout::NCHW, common_params.data_layout);
86         TensorDescriptor  input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(common_params.data_layout);
87 
88         // Set weights trained layout
89         const DataLayout weights_layout = DataLayout::NCHW;
90 
91         // Note: Quantization info are random and used only for benchmarking purposes
92         graph << common_params.target
93               << common_params.fast_math_hint
94               << InputLayer(input_descriptor.set_quantization_info(QuantizationInfo(0.0078125f, 128)),
95                             get_input_accessor(common_params, std::move(preprocessor), false));
96 
97         SubStream left(graph);
98         SubStream right(graph);
99 
100         // Layer 1
101         right << ConvolutionLayer(
102                   3U, 3U, 64U,
103                   get_weights_accessor(data_path, "conv0_w.npy", weights_layout),
104                   get_weights_accessor(data_path, "conv0_b.npy"),
105                   PadStrideInfo(1, 1, 1, 1), 1, QuantizationInfo(0.031778190285f, 156), QuantizationInfo(0.0784313753247f, 128))
106               .set_name("conv0")
107               << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv0/Relu");
108 
109         // Rest 17 layers
110         for(unsigned int i = 1; i < 19; ++i)
111         {
112             const std::string conv_w_path = "conv" + arm_compute::support::cpp11::to_string(i) + "_w.npy";
113             const std::string conv_b_path = "conv" + arm_compute::support::cpp11::to_string(i) + "_b.npy";
114             const std::string conv_name   = "conv" + arm_compute::support::cpp11::to_string(i);
115             right << ConvolutionLayer(
116                       3U, 3U, 64U,
117                       get_weights_accessor(data_path, conv_w_path, weights_layout),
118                       get_weights_accessor(data_path, conv_b_path),
119                       PadStrideInfo(1, 1, 1, 1), 1, QuantizationInfo(0.015851572156f, 93))
120                   .set_name(conv_name)
121                   << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(conv_name + "/Relu");
122         }
123 
124         // Final layer
125         right << ConvolutionLayer(
126                   3U, 3U, 1U,
127                   get_weights_accessor(data_path, "conv20_w.npy", weights_layout),
128                   get_weights_accessor(data_path, "conv20_b.npy"),
129                   PadStrideInfo(1, 1, 1, 1), 1, QuantizationInfo(0.015851572156f, 93))
130               .set_name("conv20")
131               << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv20/Relu");
132 
133         // Add residual to input
134         graph << EltwiseLayer(std::move(left), std::move(right), EltwiseOperation::Add).set_name("add")
135               << OutputLayer(std::make_unique<DummyAccessor>(0));
136 
137         // Finalize graph
138         GraphConfig config;
139         config.num_threads        = common_params.threads;
140         config.use_tuner          = common_params.enable_tuner;
141         config.tuner_mode         = common_params.tuner_mode;
142         config.tuner_file         = common_params.tuner_file;
143         config.mlgo_file          = common_params.mlgo_file;
144         config.use_synthetic_type = arm_compute::is_data_type_quantized(common_params.data_type);
145         config.synthetic_type     = common_params.data_type;
146 
147         graph.finalize(common_params.target, config);
148 
149         return true;
150     }
do_run()151     void do_run() override
152     {
153         // Run graph
154         graph.run();
155     }
156 
157 private:
158     CommandLineParser           cmd_parser;
159     CommonGraphOptions          common_opts;
160     SimpleOption<unsigned int> *model_input_width{ nullptr };
161     SimpleOption<unsigned int> *model_input_height{ nullptr };
162     CommonGraphParams           common_params;
163     Stream                      graph;
164 };
165 
166 /** Main program for VGG-based VDSR
167  *
168  * Model is based on:
169  *      https://arxiv.org/pdf/1511.04587.pdf
170  *      "Accurate Image Super-Resolution Using Very Deep Convolutional Networks"
171  *      Jiwon Kim, Jung Kwon Lee and Kyoung Mu Lee
172  *
173  * @note To list all the possible arguments execute the binary appended with the --help option
174  *
175  * @param[in] argc Number of arguments
176  * @param[in] argv Arguments
177  */
main(int argc,char ** argv)178 int main(int argc, char **argv)
179 {
180     return arm_compute::utils::run_example<GraphVDSRExample>(argc, argv);
181 }
182