Lines Matching full:outputs
31 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
36 xnn_math_f32_f16_cvt__sse2(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
39 ASSERT_EQ(reference_output, outputs[i]) in TEST()
42 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
49 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
54 xnn_math_f32_f16_cvt__sse2(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
57 ASSERT_EQ(reference_output, outputs[i]) in TEST()
60 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
67 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
72 xnn_math_f32_f16_cvt__sse2(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
75 ASSERT_EQ(reference_output, outputs[i]) in TEST()
78 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
85 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
90 xnn_math_f32_f16_cvt__sse2(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
93 ASSERT_EQ(reference_output, outputs[i]) in TEST()
96 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
103 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
108 xnn_math_f32_f16_cvt__sse2(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
111 ASSERT_EQ(reference_output, outputs[i]) in TEST()
114 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
121 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
126 xnn_math_f32_f16_cvt__sse2(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
129 ASSERT_EQ(reference_output, outputs[i]) in TEST()
132 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
139 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
141 xnn_math_f32_f16_cvt__sse2(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
143 ASSERT_EQ(reference_output, outputs[0]) in TEST()
146 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
151 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
153 xnn_math_f32_f16_cvt__sse2(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
155 ASSERT_EQ(reference_output, outputs[0]) in TEST()
158 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
163 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
168 xnn_math_f32_f16_cvt__sse2(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
171 ASSERT_EQ(reference_output, outputs[i]) in TEST()
174 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
181 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
186 xnn_math_f32_f16_cvt__sse2(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
189 ASSERT_EQ(reference_output, outputs[i]) in TEST()
192 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
199 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
201 xnn_math_f32_f16_cvt__sse2(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
203 ASSERT_EQ(reference_output, outputs[0]) in TEST()
206 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
211 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
213 xnn_math_f32_f16_cvt__sse2(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
215 ASSERT_EQ(reference_output, outputs[0]) in TEST()
218 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
223 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
228 xnn_math_f32_f16_cvt__sse2(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
230 ASSERT_GT(outputs[i], UINT16_C(0x7C00)) in TEST()
232 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
233 ASSERT_LT(outputs[i], UINT16_C(0x8000)) in TEST()
235 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
242 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
247 xnn_math_f32_f16_cvt__sse2(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
249 ASSERT_GT(outputs[i], UINT16_C(0xFC00)) in TEST()
251 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
262 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
267 xnn_math_f32_f16_cvt__sse41(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
270 ASSERT_EQ(reference_output, outputs[i]) in TEST()
273 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
282 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
287 xnn_math_f32_f16_cvt__sse41(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
290 ASSERT_EQ(reference_output, outputs[i]) in TEST()
293 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
302 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
307 xnn_math_f32_f16_cvt__sse41(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
310 ASSERT_EQ(reference_output, outputs[i]) in TEST()
313 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
322 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
327 xnn_math_f32_f16_cvt__sse41(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
330 ASSERT_EQ(reference_output, outputs[i]) in TEST()
333 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
342 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
347 xnn_math_f32_f16_cvt__sse41(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
350 ASSERT_EQ(reference_output, outputs[i]) in TEST()
353 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
362 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
367 xnn_math_f32_f16_cvt__sse41(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
370 ASSERT_EQ(reference_output, outputs[i]) in TEST()
373 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
382 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
384 xnn_math_f32_f16_cvt__sse41(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
386 ASSERT_EQ(reference_output, outputs[0]) in TEST()
389 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
396 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
398 xnn_math_f32_f16_cvt__sse41(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
400 ASSERT_EQ(reference_output, outputs[0]) in TEST()
403 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
410 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
415 xnn_math_f32_f16_cvt__sse41(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
418 ASSERT_EQ(reference_output, outputs[i]) in TEST()
421 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
430 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
435 xnn_math_f32_f16_cvt__sse41(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
438 ASSERT_EQ(reference_output, outputs[i]) in TEST()
441 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
450 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
452 xnn_math_f32_f16_cvt__sse41(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
454 ASSERT_EQ(reference_output, outputs[0]) in TEST()
457 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
464 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
466 xnn_math_f32_f16_cvt__sse41(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
468 ASSERT_EQ(reference_output, outputs[0]) in TEST()
471 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
478 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
483 xnn_math_f32_f16_cvt__sse41(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
485 ASSERT_GT(outputs[i], UINT16_C(0x7C00)) in TEST()
487 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
488 ASSERT_LT(outputs[i], UINT16_C(0x8000)) in TEST()
490 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
499 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
504 xnn_math_f32_f16_cvt__sse41(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
506 ASSERT_GT(outputs[i], UINT16_C(0xFC00)) in TEST()
508 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
519 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
524 xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
527 ASSERT_EQ(reference_output, outputs[i]) in TEST()
530 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
539 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
544 xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
547 ASSERT_EQ(reference_output, outputs[i]) in TEST()
550 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
559 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
564 xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
567 ASSERT_EQ(reference_output, outputs[i]) in TEST()
570 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
579 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
584 xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
587 ASSERT_EQ(reference_output, outputs[i]) in TEST()
590 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
599 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
604 xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
607 ASSERT_EQ(reference_output, outputs[i]) in TEST()
610 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
619 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
624 xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
627 ASSERT_EQ(reference_output, outputs[i]) in TEST()
630 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
639 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
641 xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
643 ASSERT_EQ(reference_output, outputs[0]) in TEST()
646 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
653 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
655 xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
657 ASSERT_EQ(reference_output, outputs[0]) in TEST()
660 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
667 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
672 xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
675 ASSERT_EQ(reference_output, outputs[i]) in TEST()
678 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
687 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
692 xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
695 ASSERT_EQ(reference_output, outputs[i]) in TEST()
698 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
707 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
709 xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
711 ASSERT_EQ(reference_output, outputs[0]) in TEST()
714 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
721 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
723 xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
725 ASSERT_EQ(reference_output, outputs[0]) in TEST()
728 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
735 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
740 xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
742 ASSERT_GT(outputs[i], UINT16_C(0x7C00)) in TEST()
744 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
745 ASSERT_LT(outputs[i], UINT16_C(0x8000)) in TEST()
747 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
756 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
761 xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
763 ASSERT_GT(outputs[i], UINT16_C(0xFC00)) in TEST()
765 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
776 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
781 xnn_math_f32_f16_cvt__neon(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
784 ASSERT_EQ(reference_output, outputs[i]) in TEST()
787 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
796 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
801 xnn_math_f32_f16_cvt__neon(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
804 ASSERT_EQ(reference_output, outputs[i]) in TEST()
807 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
816 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
821 xnn_math_f32_f16_cvt__neon(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
824 ASSERT_EQ(reference_output, outputs[i]) in TEST()
827 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
836 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
841 xnn_math_f32_f16_cvt__neon(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
844 ASSERT_EQ(reference_output, outputs[i]) in TEST()
847 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
856 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
861 xnn_math_f32_f16_cvt__neon(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
864 ASSERT_EQ(reference_output, outputs[i]) in TEST()
867 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
876 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
881 xnn_math_f32_f16_cvt__neon(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
884 ASSERT_EQ(reference_output, outputs[i]) in TEST()
887 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
896 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
898 xnn_math_f32_f16_cvt__neon(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
900 ASSERT_EQ(reference_output, outputs[0]) in TEST()
903 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
910 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
912 xnn_math_f32_f16_cvt__neon(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
914 ASSERT_EQ(reference_output, outputs[0]) in TEST()
917 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
924 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
929 xnn_math_f32_f16_cvt__neon(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
932 ASSERT_EQ(reference_output, outputs[i]) in TEST()
935 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
944 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
949 xnn_math_f32_f16_cvt__neon(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
952 ASSERT_EQ(reference_output, outputs[i]) in TEST()
955 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
964 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
966 xnn_math_f32_f16_cvt__neon(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
968 ASSERT_EQ(reference_output, outputs[0]) in TEST()
971 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
978 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
980 xnn_math_f32_f16_cvt__neon(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
982 ASSERT_EQ(reference_output, outputs[0]) in TEST()
985 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
992 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
997 xnn_math_f32_f16_cvt__neon(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
999 ASSERT_GT(outputs[i], UINT16_C(0x7C00)) in TEST()
1001 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1002 ASSERT_LT(outputs[i], UINT16_C(0x8000)) in TEST()
1004 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1013 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1018 xnn_math_f32_f16_cvt__neon(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1020 ASSERT_GT(outputs[i], UINT16_C(0xFC00)) in TEST()
1022 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1033 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1038 xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1041 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1044 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1053 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1058 xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1061 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1064 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1073 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1078 xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1081 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1084 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1093 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1098 xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1101 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1104 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1113 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1118 xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1121 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1124 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1133 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1138 xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1141 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1144 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1153 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1155 xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1157 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1160 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
1167 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1169 xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1171 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1174 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
1181 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1186 xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1189 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1192 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1201 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1206 xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1209 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1212 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1221 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1223 xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1225 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1228 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
1235 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1237 xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1239 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1242 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
1249 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1254 xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1256 ASSERT_GT(outputs[i], UINT16_C(0x7C00)) in TEST()
1258 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1259 ASSERT_LT(outputs[i], UINT16_C(0x8000)) in TEST()
1261 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1270 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1275 xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1277 ASSERT_GT(outputs[i], UINT16_C(0xFC00)) in TEST()
1279 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1288 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1293 xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1296 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1299 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1306 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1311 xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1314 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1317 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1324 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1329 xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1332 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1335 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1342 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1347 xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1350 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1353 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1360 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1365 xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1368 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1371 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1378 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1383 xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1386 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1389 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1396 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1398 xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1400 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1403 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
1408 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1410 xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1412 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1415 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
1420 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1425 xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1428 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1431 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1438 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1443 xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1446 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1449 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1456 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1458 xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1460 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1463 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
1468 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1470 xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1472 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1475 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
1480 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1485 xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1487 ASSERT_GT(outputs[i], UINT16_C(0x7C00)) in TEST()
1489 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1490 ASSERT_LT(outputs[i], UINT16_C(0x8000)) in TEST()
1492 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1499 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1504 xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1506 ASSERT_GT(outputs[i], UINT16_C(0xFC00)) in TEST()
1508 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1516 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1521 …xnn_math_f32_f16_cvt__scalar_bitcast(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1524 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1527 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1534 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1539 …xnn_math_f32_f16_cvt__scalar_bitcast(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1542 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1545 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1552 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1557 …xnn_math_f32_f16_cvt__scalar_bitcast(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1560 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1563 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1570 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1575 …xnn_math_f32_f16_cvt__scalar_bitcast(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1578 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1581 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1588 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1593 …xnn_math_f32_f16_cvt__scalar_bitcast(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1596 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1599 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1606 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1611 …xnn_math_f32_f16_cvt__scalar_bitcast(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1614 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1617 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1624 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1626 …xnn_math_f32_f16_cvt__scalar_bitcast(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1628 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1631 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
1636 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1638 …xnn_math_f32_f16_cvt__scalar_bitcast(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1640 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1643 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
1648 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1653 …xnn_math_f32_f16_cvt__scalar_bitcast(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1656 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1659 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1666 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1671 …xnn_math_f32_f16_cvt__scalar_bitcast(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1674 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1677 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1684 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1686 …xnn_math_f32_f16_cvt__scalar_bitcast(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1688 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1691 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
1696 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1698 …xnn_math_f32_f16_cvt__scalar_bitcast(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1700 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1703 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
1708 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1713 …xnn_math_f32_f16_cvt__scalar_bitcast(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1715 ASSERT_GT(outputs[i], UINT16_C(0x7C00)) in TEST()
1717 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1718 ASSERT_LT(outputs[i], UINT16_C(0x8000)) in TEST()
1720 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1727 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1732 …xnn_math_f32_f16_cvt__scalar_bitcast(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1734 ASSERT_GT(outputs[i], UINT16_C(0xFC00)) in TEST()
1736 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1743 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1748 … xnn_math_f32_f16_cvt__scalar_fabsf(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1751 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1754 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1761 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1766 … xnn_math_f32_f16_cvt__scalar_fabsf(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1769 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1772 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1779 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1784 … xnn_math_f32_f16_cvt__scalar_fabsf(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1787 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1790 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1797 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1802 … xnn_math_f32_f16_cvt__scalar_fabsf(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1805 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1808 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1815 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1820 … xnn_math_f32_f16_cvt__scalar_fabsf(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1823 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1826 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1833 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1838 … xnn_math_f32_f16_cvt__scalar_fabsf(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1841 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1844 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1851 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1853 xnn_math_f32_f16_cvt__scalar_fabsf(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1855 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1858 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
1863 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1865 xnn_math_f32_f16_cvt__scalar_fabsf(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1867 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1870 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
1875 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1880 … xnn_math_f32_f16_cvt__scalar_fabsf(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1883 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1886 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1893 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1898 … xnn_math_f32_f16_cvt__scalar_fabsf(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1901 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1904 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1911 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1913 xnn_math_f32_f16_cvt__scalar_fabsf(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1915 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1918 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
1923 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1925 xnn_math_f32_f16_cvt__scalar_fabsf(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1927 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1930 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0]; in TEST()
1935 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1940 … xnn_math_f32_f16_cvt__scalar_fabsf(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1942 ASSERT_GT(outputs[i], UINT16_C(0x7C00)) in TEST()
1944 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1945 ASSERT_LT(outputs[i], UINT16_C(0x8000)) in TEST()
1947 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()
1954 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local
1959 … xnn_math_f32_f16_cvt__scalar_fabsf(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST()
1961 ASSERT_GT(outputs[i], UINT16_C(0xFC00)) in TEST()
1963 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST()