Lines Matching full:outputs
30 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
35 xnn_math_f32_roundz__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
38 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
41 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
48 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
53 xnn_math_f32_roundz__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
56 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
59 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
66 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
71 xnn_math_f32_roundz__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
74 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
77 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
84 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
89 xnn_math_f32_roundz__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
92 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
95 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
102 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
104 xnn_math_f32_roundz__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
106 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
109 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
114 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
116 xnn_math_f32_roundz__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
118 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
121 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
126 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
131 xnn_math_f32_roundz__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
134 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
137 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
144 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
149 xnn_math_f32_roundz__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
152 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
155 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
162 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
167 xnn_math_f32_roundz__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
170 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
173 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
180 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
185 xnn_math_f32_roundz__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
188 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
191 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
198 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
203 xnn_math_f32_roundz__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
206 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
209 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
216 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
221 xnn_math_f32_roundz__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
224 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
227 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
236 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
241 xnn_math_f32_roundz__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
244 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
247 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
254 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
259 xnn_math_f32_roundz__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
262 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
265 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
272 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
277 xnn_math_f32_roundz__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
280 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
283 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
290 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
295 xnn_math_f32_roundz__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
298 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
301 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
308 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
310 xnn_math_f32_roundz__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
312 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
315 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
320 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
322 xnn_math_f32_roundz__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
324 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
327 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
332 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
337 xnn_math_f32_roundz__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
340 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
343 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
350 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
355 xnn_math_f32_roundz__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
358 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
361 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
368 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
373 xnn_math_f32_roundz__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
376 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
379 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
386 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
391 xnn_math_f32_roundz__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
394 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
397 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
404 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
409 xnn_math_f32_roundz__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
412 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
415 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
422 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
427 xnn_math_f32_roundz__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
430 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
433 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
442 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
447 xnn_math_f32_roundz__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
450 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
453 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
460 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
465 xnn_math_f32_roundz__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
468 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
471 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
478 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
483 xnn_math_f32_roundz__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
486 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
489 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
496 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
501 xnn_math_f32_roundz__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
504 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
507 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
514 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
516 xnn_math_f32_roundz__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
518 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
521 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
526 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
528 xnn_math_f32_roundz__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
530 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
533 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
538 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
543 xnn_math_f32_roundz__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
546 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
549 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
556 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
561 xnn_math_f32_roundz__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
564 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
567 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
574 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
579 xnn_math_f32_roundz__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
582 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
585 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
592 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
597 xnn_math_f32_roundz__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
600 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
603 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
610 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
615 xnn_math_f32_roundz__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
618 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
621 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
628 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
633 xnn_math_f32_roundz__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
636 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
639 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
648 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
653 xnn_math_f32_roundz__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
656 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
659 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
666 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
671 xnn_math_f32_roundz__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
674 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
677 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
684 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
689 xnn_math_f32_roundz__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
692 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
695 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
702 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
707 xnn_math_f32_roundz__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
710 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
713 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
720 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
722 xnn_math_f32_roundz__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
724 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
727 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
732 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
734 xnn_math_f32_roundz__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
736 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
739 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
744 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
749 xnn_math_f32_roundz__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
752 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
755 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
762 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
767 xnn_math_f32_roundz__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
770 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
773 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
780 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
785 xnn_math_f32_roundz__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
788 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
791 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
798 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
803 xnn_math_f32_roundz__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
806 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
809 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
816 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
821 xnn_math_f32_roundz__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
824 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
827 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
834 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
839 xnn_math_f32_roundz__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
842 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
845 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
854 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
859 xnn_math_f32_roundz__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
862 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
865 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
872 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
877 xnn_math_f32_roundz__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
880 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
883 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
890 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
895 xnn_math_f32_roundz__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
898 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
901 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
908 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
913 xnn_math_f32_roundz__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
916 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
919 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
926 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
928 xnn_math_f32_roundz__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
930 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
933 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
938 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
940 xnn_math_f32_roundz__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
942 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
945 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
950 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
955 xnn_math_f32_roundz__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
958 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
961 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
968 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
973 xnn_math_f32_roundz__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
976 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
979 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
986 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
991 xnn_math_f32_roundz__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
994 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
997 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1004 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1009 xnn_math_f32_roundz__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1012 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
1015 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1022 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1027 xnn_math_f32_roundz__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1030 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1033 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1040 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1045 xnn_math_f32_roundz__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1048 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1051 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1060 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1065 xnn_math_f32_roundz__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1068 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1071 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1078 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1083 xnn_math_f32_roundz__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1086 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1089 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1096 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1101 xnn_math_f32_roundz__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1104 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1107 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1114 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1119 xnn_math_f32_roundz__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1122 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1125 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1132 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1134 xnn_math_f32_roundz__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1136 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1139 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1144 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1146 xnn_math_f32_roundz__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1148 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1151 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1156 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1161 xnn_math_f32_roundz__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1164 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1167 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1174 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1179 xnn_math_f32_roundz__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1182 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1185 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1192 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1197 xnn_math_f32_roundz__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1200 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
1203 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1210 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1215 xnn_math_f32_roundz__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1218 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
1221 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1228 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1233 xnn_math_f32_roundz__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1236 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1239 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1246 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1251 xnn_math_f32_roundz__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1254 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1257 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1266 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1271 … xnn_math_f32_roundz__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1274 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1277 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1284 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1289 … xnn_math_f32_roundz__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1292 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1295 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1302 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1307 … xnn_math_f32_roundz__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1310 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1313 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1320 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1325 … xnn_math_f32_roundz__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1328 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1331 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1338 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1340 xnn_math_f32_roundz__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1342 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1345 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1350 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1352 xnn_math_f32_roundz__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1354 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1357 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1362 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1367 … xnn_math_f32_roundz__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1370 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1373 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1380 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1385 … xnn_math_f32_roundz__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1388 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1391 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1398 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1403 … xnn_math_f32_roundz__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1406 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
1409 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1416 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1421 … xnn_math_f32_roundz__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1424 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
1427 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1434 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1439 … xnn_math_f32_roundz__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1442 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1445 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1452 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1457 … xnn_math_f32_roundz__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1460 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1463 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1472 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1477 xnn_math_f32_roundz__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1480 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1483 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1490 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1495 xnn_math_f32_roundz__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1498 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1501 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1508 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1513 xnn_math_f32_roundz__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1516 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1519 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1526 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1531 xnn_math_f32_roundz__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1534 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1537 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1544 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1546 xnn_math_f32_roundz__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1548 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1551 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1556 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1558 xnn_math_f32_roundz__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1560 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1563 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1568 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1573 xnn_math_f32_roundz__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1576 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1579 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1586 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1591 xnn_math_f32_roundz__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1594 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1597 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1604 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1609 xnn_math_f32_roundz__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1612 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
1615 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1622 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1627 xnn_math_f32_roundz__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1630 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
1633 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1640 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1645 xnn_math_f32_roundz__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1648 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1651 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1658 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1663 xnn_math_f32_roundz__wasmsimd_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1666 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1669 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1678 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1683 … xnn_math_f32_roundz__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1686 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1689 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1696 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1701 … xnn_math_f32_roundz__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1704 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1707 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1714 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1719 … xnn_math_f32_roundz__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1722 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1725 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1732 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1737 … xnn_math_f32_roundz__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1740 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1743 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1750 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1752 xnn_math_f32_roundz__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1754 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1757 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1762 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1764 xnn_math_f32_roundz__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1766 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1769 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1774 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1779 … xnn_math_f32_roundz__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1782 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1785 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1792 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1797 … xnn_math_f32_roundz__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1800 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1803 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1810 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1815 … xnn_math_f32_roundz__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1818 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
1821 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1828 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1833 … xnn_math_f32_roundz__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1836 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
1839 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1846 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1851 … xnn_math_f32_roundz__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1854 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1857 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1864 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1869 … xnn_math_f32_roundz__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1872 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1875 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1883 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1888 xnn_math_f32_roundz__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1891 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1894 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1901 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1906 xnn_math_f32_roundz__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1909 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1912 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1919 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1924 xnn_math_f32_roundz__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1927 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1930 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1937 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1942 xnn_math_f32_roundz__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1945 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1948 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1955 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1957 xnn_math_f32_roundz__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1959 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1962 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1967 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1969 xnn_math_f32_roundz__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1971 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1974 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1979 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1984 xnn_math_f32_roundz__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1987 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1990 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1997 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2002 xnn_math_f32_roundz__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2005 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2008 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2015 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2020 xnn_math_f32_roundz__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2023 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
2026 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2033 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2038 xnn_math_f32_roundz__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2041 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
2044 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2051 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2056 xnn_math_f32_roundz__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2059 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2062 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2069 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2074 xnn_math_f32_roundz__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2077 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2080 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2087 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2092 xnn_math_f32_roundz__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2095 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2098 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2105 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2110 xnn_math_f32_roundz__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2113 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2116 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2123 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2128 xnn_math_f32_roundz__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2131 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2134 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2141 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2146 xnn_math_f32_roundz__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2149 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2152 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2159 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2161 xnn_math_f32_roundz__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2163 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
2166 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
2171 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2173 xnn_math_f32_roundz__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2175 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
2178 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
2183 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2188 xnn_math_f32_roundz__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2191 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2194 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2201 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2206 xnn_math_f32_roundz__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2209 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2212 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2219 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2224 xnn_math_f32_roundz__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2227 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
2230 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2237 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2242 xnn_math_f32_roundz__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2245 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
2248 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2255 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2260 xnn_math_f32_roundz__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2263 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2266 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
2273 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
2278 xnn_math_f32_roundz__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
2281 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
2284 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()