Lines Matching full:outputs
30 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
32 xnn_math_f32_roundd__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
34 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
37 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
42 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
44 xnn_math_f32_roundd__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
46 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
49 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
54 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
59 xnn_math_f32_roundd__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
62 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
65 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
72 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
77 xnn_math_f32_roundd__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
80 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
83 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
90 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
95 xnn_math_f32_roundd__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
98 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
101 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
108 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
113 xnn_math_f32_roundd__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
116 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
119 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
126 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
131 xnn_math_f32_roundd__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
134 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
137 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
144 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
149 xnn_math_f32_roundd__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
152 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
155 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
162 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
164 xnn_math_f32_roundd__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
166 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
169 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
174 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
176 xnn_math_f32_roundd__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
178 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
181 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
186 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
191 xnn_math_f32_roundd__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
194 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
197 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
204 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
209 xnn_math_f32_roundd__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
212 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
215 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
222 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
227 xnn_math_f32_roundd__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
230 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
233 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
240 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
245 xnn_math_f32_roundd__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
248 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
251 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
258 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
263 xnn_math_f32_roundd__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
266 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
269 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
276 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
281 xnn_math_f32_roundd__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
284 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
287 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
296 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
298 xnn_math_f32_roundd__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
300 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
303 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
308 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
310 xnn_math_f32_roundd__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
312 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
315 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
320 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
325 xnn_math_f32_roundd__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
328 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
331 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
338 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
343 xnn_math_f32_roundd__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
346 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
349 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
356 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
361 xnn_math_f32_roundd__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
364 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
367 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
374 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
379 xnn_math_f32_roundd__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
382 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
385 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
392 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
397 xnn_math_f32_roundd__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
400 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
403 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
410 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
415 xnn_math_f32_roundd__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
418 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
421 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
428 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
430 xnn_math_f32_roundd__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
432 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
435 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
440 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
442 xnn_math_f32_roundd__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
444 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
447 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
452 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
457 xnn_math_f32_roundd__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
460 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
463 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
470 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
475 xnn_math_f32_roundd__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
478 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
481 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
488 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
493 xnn_math_f32_roundd__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
496 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
499 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
506 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
511 xnn_math_f32_roundd__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
514 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
517 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
524 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
529 xnn_math_f32_roundd__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
532 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
535 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
542 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
547 xnn_math_f32_roundd__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
550 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
553 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
562 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
564 xnn_math_f32_roundd__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
566 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
569 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
574 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
576 xnn_math_f32_roundd__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
578 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
581 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
586 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
591 xnn_math_f32_roundd__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
594 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
597 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
604 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
609 xnn_math_f32_roundd__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
612 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
615 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
622 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
627 xnn_math_f32_roundd__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
630 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
633 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
640 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
645 xnn_math_f32_roundd__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
648 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
651 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
658 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
663 xnn_math_f32_roundd__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
666 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
669 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
676 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
681 xnn_math_f32_roundd__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
684 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
687 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
694 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
696 xnn_math_f32_roundd__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
698 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
701 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
706 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
708 xnn_math_f32_roundd__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
710 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
713 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
718 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
723 xnn_math_f32_roundd__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
726 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
729 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
736 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
741 xnn_math_f32_roundd__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
744 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
747 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
754 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
759 xnn_math_f32_roundd__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
762 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
765 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
772 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
777 xnn_math_f32_roundd__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
780 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
783 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
790 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
795 xnn_math_f32_roundd__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
798 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
801 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
808 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
813 xnn_math_f32_roundd__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
816 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
819 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
828 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
830 xnn_math_f32_roundd__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
832 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
835 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
840 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
842 xnn_math_f32_roundd__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
844 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
847 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
852 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
857 xnn_math_f32_roundd__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
860 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
863 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
870 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
875 xnn_math_f32_roundd__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
878 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
881 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
888 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
893 xnn_math_f32_roundd__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
896 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
899 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
906 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
911 xnn_math_f32_roundd__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
914 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
917 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
924 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
929 xnn_math_f32_roundd__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
932 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
935 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
942 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
947 xnn_math_f32_roundd__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
950 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
953 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
960 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
962 xnn_math_f32_roundd__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
964 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
967 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
972 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
974 xnn_math_f32_roundd__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
976 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
979 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
984 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
989 xnn_math_f32_roundd__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
992 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
995 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1002 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1007 xnn_math_f32_roundd__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1010 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1013 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1020 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1025 xnn_math_f32_roundd__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1028 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
1031 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1038 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1043 xnn_math_f32_roundd__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1046 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
1049 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1056 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1061 xnn_math_f32_roundd__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1064 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1067 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1074 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1079 xnn_math_f32_roundd__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1082 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1085 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1094 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1096 xnn_math_f32_roundd__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1098 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1101 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1106 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1108 xnn_math_f32_roundd__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1110 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1113 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1118 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1123 xnn_math_f32_roundd__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1126 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1129 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1136 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1141 xnn_math_f32_roundd__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1144 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1147 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1154 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1159 xnn_math_f32_roundd__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1162 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1165 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1172 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1177 xnn_math_f32_roundd__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1180 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1183 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1190 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1195 xnn_math_f32_roundd__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1198 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1201 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1208 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1213 xnn_math_f32_roundd__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1216 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1219 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1226 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1228 xnn_math_f32_roundd__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1230 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1233 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1238 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1240 xnn_math_f32_roundd__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1242 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1245 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1250 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1255 xnn_math_f32_roundd__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1258 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1261 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1268 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1273 xnn_math_f32_roundd__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1276 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1279 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1286 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1291 xnn_math_f32_roundd__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1294 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
1297 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1304 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1309 xnn_math_f32_roundd__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1312 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
1315 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1322 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1327 xnn_math_f32_roundd__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1330 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1333 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1340 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1345 xnn_math_f32_roundd__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1348 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1351 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1360 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1362 xnn_math_f32_roundd__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1364 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1367 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1372 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1374 xnn_math_f32_roundd__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1376 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1379 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1384 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1389 xnn_math_f32_roundd__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1392 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1395 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1402 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1407 xnn_math_f32_roundd__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1410 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1413 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1420 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1425 xnn_math_f32_roundd__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1428 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1431 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1438 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1443 xnn_math_f32_roundd__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1446 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1449 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1456 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1461 xnn_math_f32_roundd__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1464 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1467 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1474 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1479 xnn_math_f32_roundd__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1482 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1485 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1492 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1494 xnn_math_f32_roundd__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1496 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1499 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1504 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1506 xnn_math_f32_roundd__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1508 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1511 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1516 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1521 xnn_math_f32_roundd__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1524 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1527 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1534 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1539 xnn_math_f32_roundd__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1542 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1545 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1552 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1557 xnn_math_f32_roundd__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1560 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
1563 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1570 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1575 xnn_math_f32_roundd__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1578 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
1581 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1588 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1593 xnn_math_f32_roundd__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1596 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1599 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1606 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1611 xnn_math_f32_roundd__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1614 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1617 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1626 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1628 xnn_math_f32_roundd__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1630 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1633 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1638 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1640 xnn_math_f32_roundd__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1642 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1645 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1650 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1655 … xnn_math_f32_roundd__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1658 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1661 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1668 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1673 … xnn_math_f32_roundd__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1676 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1679 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1686 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1691 … xnn_math_f32_roundd__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1694 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1697 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1704 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1709 … xnn_math_f32_roundd__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1712 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1715 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1722 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1727 … xnn_math_f32_roundd__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1730 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1733 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1740 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1745 … xnn_math_f32_roundd__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1748 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1751 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1758 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1760 xnn_math_f32_roundd__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1762 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1765 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1770 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1772 xnn_math_f32_roundd__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1774 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1777 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1782 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1787 … xnn_math_f32_roundd__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1790 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1793 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1800 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1805 … xnn_math_f32_roundd__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1808 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1811 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1818 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1823 … xnn_math_f32_roundd__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1826 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
1829 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1836 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1841 … xnn_math_f32_roundd__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1844 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
1847 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1854 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1859 … xnn_math_f32_roundd__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1862 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1865 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1872 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1877 … xnn_math_f32_roundd__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1880 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1883 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1892 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1894 xnn_math_f32_roundd__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1896 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1899 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1904 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1906 xnn_math_f32_roundd__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1908 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1911 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1916 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1921 xnn_math_f32_roundd__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1924 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1927 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1934 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1939 xnn_math_f32_roundd__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1942 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1945 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1952 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1957 xnn_math_f32_roundd__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1960 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1963 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1970 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1975 xnn_math_f32_roundd__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1978 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1981 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1988 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1993 xnn_math_f32_roundd__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1996 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1999 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2006 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2011 xnn_math_f32_roundd__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2014 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2017 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2024 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2026 xnn_math_f32_roundd__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2028 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
2031 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
2036 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2038 xnn_math_f32_roundd__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2040 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
2043 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
2048 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2053 xnn_math_f32_roundd__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2056 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2059 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2066 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2071 xnn_math_f32_roundd__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2074 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2077 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2084 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2089 xnn_math_f32_roundd__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2092 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
2095 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2102 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2107 xnn_math_f32_roundd__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2110 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
2113 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2120 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2125 xnn_math_f32_roundd__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2128 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2131 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2138 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2143 xnn_math_f32_roundd__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2146 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2149 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2158 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2160 xnn_math_f32_roundd__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2162 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
2165 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
2170 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2172 xnn_math_f32_roundd__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2174 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
2177 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
2182 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2187 … xnn_math_f32_roundd__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2190 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2193 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2200 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2205 … xnn_math_f32_roundd__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2208 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2211 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2218 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2223 … xnn_math_f32_roundd__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2226 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2229 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2236 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2241 … xnn_math_f32_roundd__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2244 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2247 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2254 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2259 … xnn_math_f32_roundd__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2262 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2265 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2272 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2277 … xnn_math_f32_roundd__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2280 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2283 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2290 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2292 xnn_math_f32_roundd__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2294 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
2297 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
2302 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2304 xnn_math_f32_roundd__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2306 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
2309 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
2314 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2319 … xnn_math_f32_roundd__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2322 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2325 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2332 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2337 … xnn_math_f32_roundd__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2340 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2343 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2350 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2355 … xnn_math_f32_roundd__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2358 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
2361 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2368 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2373 … xnn_math_f32_roundd__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2376 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
2379 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2386 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2391 … xnn_math_f32_roundd__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2394 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2397 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2404 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2409 … xnn_math_f32_roundd__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2412 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2415 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2423 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2425 xnn_math_f32_roundd__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2427 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
2430 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
2435 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2437 xnn_math_f32_roundd__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2439 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
2442 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
2447 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2452 xnn_math_f32_roundd__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2455 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2458 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2465 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2470 xnn_math_f32_roundd__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2473 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2476 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2483 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2488 xnn_math_f32_roundd__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2491 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2494 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2501 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2506 xnn_math_f32_roundd__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2509 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2512 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2519 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2524 xnn_math_f32_roundd__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2527 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2530 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2537 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2542 xnn_math_f32_roundd__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2545 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2548 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2555 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2557 xnn_math_f32_roundd__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2559 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
2562 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
2567 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2569 xnn_math_f32_roundd__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2571 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
2574 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
2579 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2584 xnn_math_f32_roundd__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2587 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2590 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2597 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2602 xnn_math_f32_roundd__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2605 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2608 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2615 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2620 xnn_math_f32_roundd__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2623 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
2626 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2633 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2638 xnn_math_f32_roundd__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2641 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
2644 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2651 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2656 xnn_math_f32_roundd__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2659 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2662 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2669 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2674 xnn_math_f32_roundd__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2677 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2680 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2687 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2689 xnn_math_f32_roundd__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2691 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
2694 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
2699 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2701 xnn_math_f32_roundd__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2703 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
2706 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
2711 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2716 xnn_math_f32_roundd__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2719 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2722 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2729 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2734 xnn_math_f32_roundd__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2737 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2740 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2747 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2752 xnn_math_f32_roundd__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2755 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2758 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2765 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2770 xnn_math_f32_roundd__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2773 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2776 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2783 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2788 xnn_math_f32_roundd__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2791 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2794 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2801 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2806 xnn_math_f32_roundd__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2809 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2812 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2819 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2821 xnn_math_f32_roundd__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2823 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
2826 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
2831 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2833 xnn_math_f32_roundd__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2835 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
2838 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
2843 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2848 xnn_math_f32_roundd__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2851 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2854 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2861 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2866 xnn_math_f32_roundd__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2869 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2872 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2879 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2884 xnn_math_f32_roundd__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2887 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
2890 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2897 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2902 xnn_math_f32_roundd__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2905 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
2908 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2915 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2920 xnn_math_f32_roundd__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2923 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2926 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2933 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2938 xnn_math_f32_roundd__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2941 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2944 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()