Lines Matching full:outputs

30     std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);  in TEST()  local
35 xnn_math_f32_roundne__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
38 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
41 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
48 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
53 xnn_math_f32_roundne__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
56 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
59 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
66 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
71 xnn_math_f32_roundne__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
74 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
77 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
84 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
89 xnn_math_f32_roundne__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
92 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
95 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
102 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
104 xnn_math_f32_roundne__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
106 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
109 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
114 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
116 xnn_math_f32_roundne__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
118 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
121 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
126 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
131 xnn_math_f32_roundne__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
134 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
137 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
144 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
149 xnn_math_f32_roundne__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
152 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
155 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
162 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
167 xnn_math_f32_roundne__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
170 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
173 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
180 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
185 xnn_math_f32_roundne__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
188 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
191 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
198 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
203 xnn_math_f32_roundne__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
206 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
209 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
216 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
221 xnn_math_f32_roundne__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
224 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
227 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
236 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
241 xnn_math_f32_roundne__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
244 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
247 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
254 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
259 xnn_math_f32_roundne__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
262 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
265 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
272 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
277 xnn_math_f32_roundne__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
280 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
283 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
290 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
295 xnn_math_f32_roundne__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
298 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
301 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
308 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
310 xnn_math_f32_roundne__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
312 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
315 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
320 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
322 xnn_math_f32_roundne__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
324 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
327 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
332 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
337 xnn_math_f32_roundne__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
340 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
343 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
350 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
355 xnn_math_f32_roundne__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
358 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
361 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
368 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
373 xnn_math_f32_roundne__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
376 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
379 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
386 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
391 xnn_math_f32_roundne__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
394 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
397 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
404 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
409 xnn_math_f32_roundne__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
412 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
415 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
422 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
427 xnn_math_f32_roundne__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
430 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
433 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
442 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
447 xnn_math_f32_roundne__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
450 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
453 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
460 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
465 xnn_math_f32_roundne__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
468 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
471 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
478 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
483 xnn_math_f32_roundne__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
486 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
489 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
496 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
501 xnn_math_f32_roundne__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
504 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
507 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
514 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
516 xnn_math_f32_roundne__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
518 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
521 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
526 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
528 xnn_math_f32_roundne__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
530 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
533 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
538 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
543 xnn_math_f32_roundne__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
546 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
549 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
556 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
561 xnn_math_f32_roundne__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
564 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
567 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
574 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
579 xnn_math_f32_roundne__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
582 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
585 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
592 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
597 xnn_math_f32_roundne__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
600 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
603 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
610 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
615 xnn_math_f32_roundne__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
618 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
621 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
628 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
633 xnn_math_f32_roundne__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
636 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
639 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
648 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
653 xnn_math_f32_roundne__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
656 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
659 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
666 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
671 xnn_math_f32_roundne__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
674 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
677 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
684 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
689 xnn_math_f32_roundne__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
692 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
695 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
702 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
707 xnn_math_f32_roundne__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
710 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
713 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
720 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
722 xnn_math_f32_roundne__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
724 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
727 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
732 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
734 xnn_math_f32_roundne__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
736 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
739 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
744 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
749 xnn_math_f32_roundne__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
752 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
755 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
762 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
767 xnn_math_f32_roundne__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
770 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
773 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
780 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
785 xnn_math_f32_roundne__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
788 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
791 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
798 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
803 xnn_math_f32_roundne__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
806 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
809 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
816 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
821 xnn_math_f32_roundne__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
824 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
827 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
834 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
839 xnn_math_f32_roundne__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
842 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
845 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
854 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
859 xnn_math_f32_roundne__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
862 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
865 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
872 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
877 xnn_math_f32_roundne__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
880 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
883 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
890 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
895 xnn_math_f32_roundne__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
898 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
901 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
908 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
913 xnn_math_f32_roundne__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
916 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
919 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
926 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
928 xnn_math_f32_roundne__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
930 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
933 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
938 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
940 xnn_math_f32_roundne__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
942 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
945 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
950 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
955 xnn_math_f32_roundne__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
958 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
961 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
968 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
973 xnn_math_f32_roundne__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
976 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
979 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
986 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
991 xnn_math_f32_roundne__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
994 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
997 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1004 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1009 xnn_math_f32_roundne__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1012 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
1015 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1022 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1027 xnn_math_f32_roundne__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1030 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1033 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1040 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1045 xnn_math_f32_roundne__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1048 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1051 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1060 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1065 … xnn_math_f32_roundne__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1068 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1071 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1078 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1083 … xnn_math_f32_roundne__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1086 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1089 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1096 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1101 … xnn_math_f32_roundne__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1104 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1107 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1114 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1119 … xnn_math_f32_roundne__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1122 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1125 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1132 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1134 … xnn_math_f32_roundne__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1136 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1139 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1144 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1146 … xnn_math_f32_roundne__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1148 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1151 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1156 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1161 … xnn_math_f32_roundne__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1164 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1167 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1174 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1179 … xnn_math_f32_roundne__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1182 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1185 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1192 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1197 … xnn_math_f32_roundne__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1200 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
1203 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1210 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1215 … xnn_math_f32_roundne__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1218 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
1221 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1228 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1233 … xnn_math_f32_roundne__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1236 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1239 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1246 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1251 … xnn_math_f32_roundne__wasmsimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1254 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1257 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1266 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1271 … xnn_math_f32_roundne__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1274 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1277 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1284 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1289 … xnn_math_f32_roundne__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1292 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1295 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1302 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1307 … xnn_math_f32_roundne__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1310 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1313 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1320 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1325 … xnn_math_f32_roundne__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1328 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1331 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1338 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1340 … xnn_math_f32_roundne__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1342 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1345 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1350 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1352 … xnn_math_f32_roundne__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1354 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1357 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1362 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1367 … xnn_math_f32_roundne__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1370 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1373 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1380 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1385 … xnn_math_f32_roundne__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1388 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1391 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1398 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1403 … xnn_math_f32_roundne__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1406 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
1409 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1416 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1421 … xnn_math_f32_roundne__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1424 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
1427 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1434 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1439 … xnn_math_f32_roundne__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1442 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1445 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1452 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1457 … xnn_math_f32_roundne__wasmsimd_native(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1460 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1463 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1471 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1476 xnn_math_f32_roundne__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1479 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1482 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1489 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1494 xnn_math_f32_roundne__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1497 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1500 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1507 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1512 xnn_math_f32_roundne__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1515 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1518 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1525 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1530 xnn_math_f32_roundne__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1533 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1536 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1543 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1545 xnn_math_f32_roundne__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1547 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1550 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1555 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1557 xnn_math_f32_roundne__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1559 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST()
1562 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1567 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1572 xnn_math_f32_roundne__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1575 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1578 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1585 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1590 xnn_math_f32_roundne__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1593 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1596 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1603 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1608 xnn_math_f32_roundne__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1611 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
1614 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1621 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1626 xnn_math_f32_roundne__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1629 …ASSERT_EQ(reference_output & UINT32_C(0xFFBFFFFF), float_as_uint32(outputs[i]) & UINT32_C(0xFFBFFF… in TEST()
1632 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1639 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1644 xnn_math_f32_roundne__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1647 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1650 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1657 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1662 xnn_math_f32_roundne__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1665 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1668 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()