Lines Matching full:outputs
31 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
36 xnn_math_f16_f32_cvt__sse2_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
39 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
42 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
49 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
54 xnn_math_f16_f32_cvt__sse2_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
57 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
60 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
67 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
69 xnn_math_f16_f32_cvt__sse2_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
71 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
74 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
79 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
81 xnn_math_f16_f32_cvt__sse2_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
83 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
86 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
91 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
96 xnn_math_f16_f32_cvt__sse2_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
99 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
102 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
109 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
114 xnn_math_f16_f32_cvt__sse2_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
117 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
120 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
127 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
129 xnn_math_f16_f32_cvt__sse2_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
131 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
134 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
139 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
141 xnn_math_f16_f32_cvt__sse2_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
143 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
146 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
151 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
156 xnn_math_f16_f32_cvt__sse2_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
159 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
162 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
169 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
174 xnn_math_f16_f32_cvt__sse2_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
177 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
180 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
189 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
194 xnn_math_f16_f32_cvt__sse2_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
197 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
200 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
207 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
212 xnn_math_f16_f32_cvt__sse2_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
215 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
218 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
225 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
227 xnn_math_f16_f32_cvt__sse2_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
229 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
232 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
237 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
239 xnn_math_f16_f32_cvt__sse2_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
241 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
244 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
249 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
254 xnn_math_f16_f32_cvt__sse2_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
257 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
260 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
267 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
272 xnn_math_f16_f32_cvt__sse2_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
275 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
278 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
285 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
287 xnn_math_f16_f32_cvt__sse2_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
289 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
292 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
297 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
299 xnn_math_f16_f32_cvt__sse2_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
301 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
304 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
309 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
314 xnn_math_f16_f32_cvt__sse2_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
317 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
320 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
327 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
332 xnn_math_f16_f32_cvt__sse2_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
335 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
338 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
349 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
354 xnn_math_f16_f32_cvt__sse41_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
357 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
360 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
369 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
374 xnn_math_f16_f32_cvt__sse41_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
377 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
380 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
389 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
391 xnn_math_f16_f32_cvt__sse41_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
393 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
396 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
403 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
405 xnn_math_f16_f32_cvt__sse41_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
407 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
410 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
417 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
422 xnn_math_f16_f32_cvt__sse41_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
425 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
428 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
437 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
442 xnn_math_f16_f32_cvt__sse41_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
445 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
448 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
457 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
459 xnn_math_f16_f32_cvt__sse41_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
461 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
464 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
471 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
473 xnn_math_f16_f32_cvt__sse41_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
475 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
478 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
485 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
490 xnn_math_f16_f32_cvt__sse41_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
493 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
496 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
505 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
510 xnn_math_f16_f32_cvt__sse41_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
513 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
516 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
527 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
532 xnn_math_f16_f32_cvt__sse41_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
535 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
538 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
547 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
552 xnn_math_f16_f32_cvt__sse41_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
555 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
558 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
567 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
569 xnn_math_f16_f32_cvt__sse41_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
571 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
574 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
581 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
583 xnn_math_f16_f32_cvt__sse41_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
585 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
588 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
595 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
600 xnn_math_f16_f32_cvt__sse41_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
603 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
606 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
615 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
620 xnn_math_f16_f32_cvt__sse41_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
623 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
626 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
635 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
637 xnn_math_f16_f32_cvt__sse41_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
639 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
642 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
649 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
651 xnn_math_f16_f32_cvt__sse41_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
653 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
656 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
663 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
668 xnn_math_f16_f32_cvt__sse41_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
671 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
674 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
683 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
688 xnn_math_f16_f32_cvt__sse41_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
691 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
694 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
705 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
710 xnn_math_f16_f32_cvt__f16c(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
713 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
716 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
725 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
730 xnn_math_f16_f32_cvt__f16c(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
733 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
736 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
745 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
747 xnn_math_f16_f32_cvt__f16c(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
749 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
752 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
759 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
761 xnn_math_f16_f32_cvt__f16c(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
763 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
766 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
773 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
778 xnn_math_f16_f32_cvt__f16c(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
781 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
784 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
793 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
798 xnn_math_f16_f32_cvt__f16c(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
801 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
804 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
813 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
815 xnn_math_f16_f32_cvt__f16c(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
817 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
820 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
827 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
829 xnn_math_f16_f32_cvt__f16c(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
831 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
834 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
841 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
846 xnn_math_f16_f32_cvt__f16c(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
849 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
852 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
861 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
866 xnn_math_f16_f32_cvt__f16c(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
869 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
872 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
883 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
888 xnn_math_f16_f32_cvt__neon_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
891 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
894 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
903 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
908 xnn_math_f16_f32_cvt__neon_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
911 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
914 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
923 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
925 xnn_math_f16_f32_cvt__neon_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
927 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
930 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
937 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
939 xnn_math_f16_f32_cvt__neon_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
941 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
944 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
951 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
956 xnn_math_f16_f32_cvt__neon_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
959 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
962 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
971 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
976 xnn_math_f16_f32_cvt__neon_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
979 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
982 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
991 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
993 xnn_math_f16_f32_cvt__neon_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
995 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
998 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1005 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1007 xnn_math_f16_f32_cvt__neon_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1009 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1012 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1019 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1024 xnn_math_f16_f32_cvt__neon_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1027 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1030 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1039 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1044 xnn_math_f16_f32_cvt__neon_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1047 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1050 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1061 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1066 xnn_math_f16_f32_cvt__neon_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1069 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1072 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1081 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1086 xnn_math_f16_f32_cvt__neon_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1089 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1092 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1101 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1103 xnn_math_f16_f32_cvt__neon_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1105 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1108 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1115 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1117 xnn_math_f16_f32_cvt__neon_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1119 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1122 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1129 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1134 xnn_math_f16_f32_cvt__neon_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1137 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1140 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1149 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1154 xnn_math_f16_f32_cvt__neon_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1157 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1160 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1169 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1171 xnn_math_f16_f32_cvt__neon_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1173 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1176 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1183 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1185 xnn_math_f16_f32_cvt__neon_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1187 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1190 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1197 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1202 xnn_math_f16_f32_cvt__neon_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1205 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1208 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1217 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1222 xnn_math_f16_f32_cvt__neon_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1225 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1228 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1239 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1244 xnn_math_f16_f32_cvt__neonfp16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1247 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1250 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1259 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1264 xnn_math_f16_f32_cvt__neonfp16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1267 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1270 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1279 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1281 xnn_math_f16_f32_cvt__neonfp16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1283 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1286 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1293 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1295 xnn_math_f16_f32_cvt__neonfp16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1297 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1300 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1307 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1312 xnn_math_f16_f32_cvt__neonfp16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1315 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1318 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1327 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1332 xnn_math_f16_f32_cvt__neonfp16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1335 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1338 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1347 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1349 xnn_math_f16_f32_cvt__neonfp16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1351 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1354 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1361 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1363 xnn_math_f16_f32_cvt__neonfp16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1365 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1368 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1375 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1380 xnn_math_f16_f32_cvt__neonfp16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1383 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1386 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1395 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1400 xnn_math_f16_f32_cvt__neonfp16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1403 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1406 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1415 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1420 … xnn_math_f16_f32_cvt__wasmsimd_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1423 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1426 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1433 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1438 … xnn_math_f16_f32_cvt__wasmsimd_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1441 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1444 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1451 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1453 xnn_math_f16_f32_cvt__wasmsimd_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1455 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1458 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1463 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1465 xnn_math_f16_f32_cvt__wasmsimd_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1467 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1470 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1475 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1480 … xnn_math_f16_f32_cvt__wasmsimd_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1483 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1486 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1493 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1498 … xnn_math_f16_f32_cvt__wasmsimd_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1501 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1504 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1511 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1513 xnn_math_f16_f32_cvt__wasmsimd_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1515 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1518 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1523 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1525 xnn_math_f16_f32_cvt__wasmsimd_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1527 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1530 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1535 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1540 … xnn_math_f16_f32_cvt__wasmsimd_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1543 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1546 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1553 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1558 … xnn_math_f16_f32_cvt__wasmsimd_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1561 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1564 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1573 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1578 … xnn_math_f16_f32_cvt__wasmsimd_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1581 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1584 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1591 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1596 … xnn_math_f16_f32_cvt__wasmsimd_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1599 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1602 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1609 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1611 xnn_math_f16_f32_cvt__wasmsimd_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1613 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1616 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1621 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1623 xnn_math_f16_f32_cvt__wasmsimd_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1625 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1628 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1633 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1638 … xnn_math_f16_f32_cvt__wasmsimd_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1641 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1644 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1651 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1656 … xnn_math_f16_f32_cvt__wasmsimd_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1659 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1662 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1669 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1671 xnn_math_f16_f32_cvt__wasmsimd_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1673 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1676 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1681 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1683 xnn_math_f16_f32_cvt__wasmsimd_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1685 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1688 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1693 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1698 … xnn_math_f16_f32_cvt__wasmsimd_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1701 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1704 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1711 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1716 … xnn_math_f16_f32_cvt__wasmsimd_int32(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1719 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1722 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()