xref: /aosp_15_r20/external/ComputeLibrary/tests/validation/Validation.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2017-2019 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "Validation.h"
25 
26 #include "arm_compute/core/Coordinates.h"
27 #include "arm_compute/core/Error.h"
28 #include "arm_compute/core/TensorShape.h"
29 #include "arm_compute/core/Types.h"
30 #include "arm_compute/runtime/Tensor.h"
31 
32 #include <array>
33 #include <cmath>
34 #include <cstddef>
35 #include <cstdint>
36 
37 namespace arm_compute
38 {
39 namespace test
40 {
41 namespace validation
42 {
43 namespace
44 {
45 /** Get the data from *ptr after casting according to @p data_type and then convert the data to double.
46  *
47  * @param[in] ptr       Pointer to value.
48  * @param[in] data_type Data type of both values.
49  *
50  * @return The data from the ptr after converted to double.
51  */
get_double_data(const void * ptr,DataType data_type)52 double get_double_data(const void *ptr, DataType data_type)
53 {
54     if(ptr == nullptr)
55     {
56         ARM_COMPUTE_ERROR("Can't dereference a null pointer!");
57     }
58 
59     switch(data_type)
60     {
61         case DataType::U8:
62             return *reinterpret_cast<const uint8_t *>(ptr);
63         case DataType::S8:
64             return *reinterpret_cast<const int8_t *>(ptr);
65         case DataType::U16:
66             return *reinterpret_cast<const uint16_t *>(ptr);
67         case DataType::S16:
68             return *reinterpret_cast<const int16_t *>(ptr);
69         case DataType::U32:
70             return *reinterpret_cast<const uint32_t *>(ptr);
71         case DataType::S32:
72             return *reinterpret_cast<const int32_t *>(ptr);
73         case DataType::U64:
74             return *reinterpret_cast<const uint64_t *>(ptr);
75         case DataType::S64:
76             return *reinterpret_cast<const int64_t *>(ptr);
77         case DataType::F16:
78             return *reinterpret_cast<const half *>(ptr);
79         case DataType::F32:
80             return *reinterpret_cast<const float *>(ptr);
81         case DataType::F64:
82             return *reinterpret_cast<const double *>(ptr);
83         case DataType::SIZET:
84             return *reinterpret_cast<const size_t *>(ptr);
85         default:
86             ARM_COMPUTE_ERROR("NOT SUPPORTED!");
87     }
88 }
89 
check_border_element(const IAccessor & tensor,const Coordinates & id,const BorderMode & border_mode,const void * border_value,int64_t & num_elements,int64_t & num_mismatches)90 void check_border_element(const IAccessor &tensor, const Coordinates &id,
91                           const BorderMode &border_mode, const void *border_value,
92                           int64_t &num_elements, int64_t &num_mismatches)
93 {
94     const size_t channel_size = element_size_from_data_type(tensor.data_type());
95     const auto   ptr          = static_cast<const uint8_t *>(tensor(id));
96 
97     if(border_mode == BorderMode::REPLICATE)
98     {
99         Coordinates border_id{ id };
100 
101         if(id.x() < 0)
102         {
103             border_id.set(0, 0);
104         }
105         else if(static_cast<size_t>(id.x()) >= tensor.shape().x())
106         {
107             border_id.set(0, tensor.shape().x() - 1);
108         }
109 
110         if(id.y() < 0)
111         {
112             border_id.set(1, 0);
113         }
114         else if(static_cast<size_t>(id.y()) >= tensor.shape().y())
115         {
116             border_id.set(1, tensor.shape().y() - 1);
117         }
118 
119         border_value = tensor(border_id);
120     }
121 
122     // Iterate over all channels within one element
123     for(int channel = 0; channel < tensor.num_channels(); ++channel)
124     {
125         const size_t channel_offset = channel * channel_size;
126         const double target         = get_double_data(ptr + channel_offset, tensor.data_type());
127         const double reference      = get_double_data(static_cast<const uint8_t *>(border_value) + channel_offset, tensor.data_type());
128 
129         if(!compare<AbsoluteTolerance<double>>(target, reference))
130         {
131             ARM_COMPUTE_TEST_INFO("id = " << id);
132             ARM_COMPUTE_TEST_INFO("channel = " << channel);
133             ARM_COMPUTE_TEST_INFO("target = " << std::setprecision(5) << target);
134             ARM_COMPUTE_TEST_INFO("reference = " << std::setprecision(5) << reference);
135             ARM_COMPUTE_EXPECT_EQUAL(target, reference, framework::LogLevel::DEBUG);
136 
137             ++num_mismatches;
138         }
139 
140         ++num_elements;
141     }
142 }
143 } // namespace
144 
validate(const arm_compute::ValidRegion & region,const arm_compute::ValidRegion & reference)145 void validate(const arm_compute::ValidRegion &region, const arm_compute::ValidRegion &reference)
146 {
147     ARM_COMPUTE_EXPECT_EQUAL(region.anchor.num_dimensions(), reference.anchor.num_dimensions(), framework::LogLevel::ERRORS);
148     ARM_COMPUTE_EXPECT_EQUAL(region.shape.num_dimensions(), reference.shape.num_dimensions(), framework::LogLevel::ERRORS);
149 
150     for(unsigned int d = 0; d < region.anchor.num_dimensions(); ++d)
151     {
152         ARM_COMPUTE_EXPECT_EQUAL(region.anchor[d], reference.anchor[d], framework::LogLevel::ERRORS);
153     }
154 
155     for(unsigned int d = 0; d < region.shape.num_dimensions(); ++d)
156     {
157         ARM_COMPUTE_EXPECT_EQUAL(region.shape[d], reference.shape[d], framework::LogLevel::ERRORS);
158     }
159 }
160 
validate(const arm_compute::PaddingSize & padding,const arm_compute::PaddingSize & reference)161 void validate(const arm_compute::PaddingSize &padding, const arm_compute::PaddingSize &reference)
162 {
163     ARM_COMPUTE_EXPECT_EQUAL(padding.top, reference.top, framework::LogLevel::ERRORS);
164     ARM_COMPUTE_EXPECT_EQUAL(padding.right, reference.right, framework::LogLevel::ERRORS);
165     ARM_COMPUTE_EXPECT_EQUAL(padding.bottom, reference.bottom, framework::LogLevel::ERRORS);
166     ARM_COMPUTE_EXPECT_EQUAL(padding.left, reference.left, framework::LogLevel::ERRORS);
167 }
168 
validate(const arm_compute::PaddingSize & padding,const arm_compute::PaddingSize & width_reference,const arm_compute::PaddingSize & height_reference)169 void validate(const arm_compute::PaddingSize &padding, const arm_compute::PaddingSize &width_reference, const arm_compute::PaddingSize &height_reference)
170 {
171     ARM_COMPUTE_EXPECT_EQUAL(padding.top, height_reference.top, framework::LogLevel::ERRORS);
172     ARM_COMPUTE_EXPECT_EQUAL(padding.right, width_reference.right, framework::LogLevel::ERRORS);
173     ARM_COMPUTE_EXPECT_EQUAL(padding.bottom, height_reference.bottom, framework::LogLevel::ERRORS);
174     ARM_COMPUTE_EXPECT_EQUAL(padding.left, width_reference.left, framework::LogLevel::ERRORS);
175 }
176 
validate(const IAccessor & tensor,const void * reference_value)177 void validate(const IAccessor &tensor, const void *reference_value)
178 {
179     ARM_COMPUTE_ASSERT(reference_value != nullptr);
180 
181     int64_t      num_mismatches = 0;
182     int64_t      num_elements   = 0;
183     const size_t channel_size   = element_size_from_data_type(tensor.data_type());
184 
185     // Iterate over all elements, e.g. U8, S16, RGB888, ...
186     const uint32_t tensor_num_elements = tensor.num_elements();
187     for(uint32_t element_idx = 0; element_idx < tensor_num_elements; ++element_idx)
188     {
189         const Coordinates id = index2coord(tensor.shape(), element_idx);
190 
191         const auto ptr = static_cast<const uint8_t *>(tensor(id));
192 
193         // Iterate over all channels within one element
194         for(int channel = 0; channel < tensor.num_channels(); ++channel)
195         {
196             const size_t channel_offset = channel * channel_size;
197             const double target         = get_double_data(ptr + channel_offset, tensor.data_type());
198             const double reference      = get_double_data(reference_value, tensor.data_type());
199 
200             if(!compare<AbsoluteTolerance<double>>(target, reference))
201             {
202                 ARM_COMPUTE_TEST_INFO("id = " << id);
203                 ARM_COMPUTE_TEST_INFO("channel = " << channel);
204                 ARM_COMPUTE_TEST_INFO("target = " << std::setprecision(5) << target);
205                 ARM_COMPUTE_TEST_INFO("reference = " << std::setprecision(5) << reference);
206                 ARM_COMPUTE_EXPECT_EQUAL(target, reference, framework::LogLevel::DEBUG);
207 
208                 ++num_mismatches;
209             }
210 
211             ++num_elements;
212         }
213     }
214 
215     if(num_elements > 0)
216     {
217         const float percent_mismatches = static_cast<float>(num_mismatches) / num_elements * 100.f;
218 
219         ARM_COMPUTE_TEST_INFO(num_mismatches << " values (" << std::fixed << std::setprecision(2) << percent_mismatches << "%) mismatched");
220         ARM_COMPUTE_EXPECT_EQUAL(num_mismatches, 0, framework::LogLevel::ERRORS);
221     }
222 }
223 
validate(const IAccessor & tensor,BorderSize border_size,const BorderMode & border_mode,const void * border_value)224 void validate(const IAccessor &tensor, BorderSize border_size, const BorderMode &border_mode, const void *border_value)
225 {
226     if(border_mode == BorderMode::UNDEFINED)
227     {
228         return;
229     }
230     else if(border_mode == BorderMode::CONSTANT)
231     {
232         ARM_COMPUTE_ASSERT(border_value != nullptr);
233     }
234 
235     int64_t   num_mismatches = 0;
236     int64_t   num_elements   = 0;
237     const int slice_size     = tensor.shape()[0] * tensor.shape()[1];
238 
239     for(int element_idx = 0; element_idx < tensor.num_elements(); element_idx += slice_size)
240     {
241         Coordinates id = index2coord(tensor.shape(), element_idx);
242 
243         // Top border
244         for(int y = -border_size.top; y < 0; ++y)
245         {
246             id.set(1, y);
247 
248             for(int x = -border_size.left; x < static_cast<int>(tensor.shape()[0]) + static_cast<int>(border_size.right); ++x)
249             {
250                 id.set(0, x);
251 
252                 check_border_element(tensor, id, border_mode, border_value, num_elements, num_mismatches);
253             }
254         }
255 
256         // Bottom border
257         for(int y = tensor.shape()[1]; y < static_cast<int>(tensor.shape()[1]) + static_cast<int>(border_size.bottom); ++y)
258         {
259             id.set(1, y);
260 
261             for(int x = -border_size.left; x < static_cast<int>(tensor.shape()[0]) + static_cast<int>(border_size.right); ++x)
262             {
263                 id.set(0, x);
264 
265                 check_border_element(tensor, id, border_mode, border_value, num_elements, num_mismatches);
266             }
267         }
268 
269         // Left/right border
270         for(int y = 0; y < static_cast<int>(tensor.shape()[1]); ++y)
271         {
272             id.set(1, y);
273 
274             // Left border
275             for(int x = -border_size.left; x < 0; ++x)
276             {
277                 id.set(0, x);
278 
279                 check_border_element(tensor, id, border_mode, border_value, num_elements, num_mismatches);
280             }
281 
282             // Right border
283             for(int x = tensor.shape()[0]; x < static_cast<int>(tensor.shape()[0]) + static_cast<int>(border_size.right); ++x)
284             {
285                 id.set(0, x);
286 
287                 check_border_element(tensor, id, border_mode, border_value, num_elements, num_mismatches);
288             }
289         }
290     }
291 
292     if(num_elements > 0)
293     {
294         const float percent_mismatches = static_cast<float>(num_mismatches) / num_elements * 100.f;
295 
296         ARM_COMPUTE_TEST_INFO(num_mismatches << " values (" << std::fixed << std::setprecision(2) << percent_mismatches << "%) mismatched");
297         ARM_COMPUTE_EXPECT_EQUAL(num_mismatches, 0, framework::LogLevel::ERRORS);
298     }
299 }
300 
validate(std::vector<unsigned int> classified_labels,std::vector<unsigned int> expected_labels)301 void validate(std::vector<unsigned int> classified_labels, std::vector<unsigned int> expected_labels)
302 {
303     ARM_COMPUTE_EXPECT_EQUAL(classified_labels.size(), expected_labels.size(), framework::LogLevel::ERRORS);
304 
305     int64_t   num_mismatches = 0;
306     const int num_elements   = std::min(classified_labels.size(), expected_labels.size());
307 
308     for(int i = 0; i < num_elements; ++i)
309     {
310         if(classified_labels[i] != expected_labels[i])
311         {
312             ++num_mismatches;
313             ARM_COMPUTE_EXPECT_EQUAL(classified_labels[i], expected_labels[i], framework::LogLevel::DEBUG);
314         }
315     }
316 
317     if(num_elements > 0)
318     {
319         const float percent_mismatches = static_cast<float>(num_mismatches) / num_elements * 100.f;
320 
321         ARM_COMPUTE_TEST_INFO(num_mismatches << " values (" << std::fixed << std::setprecision(2) << percent_mismatches << "%) mismatched");
322         ARM_COMPUTE_EXPECT_EQUAL(num_mismatches, 0, framework::LogLevel::ERRORS);
323     }
324 }
325 } // namespace validation
326 } // namespace test
327 } // namespace arm_compute
328