Lines Matching full:outputs

34     std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);  in TEST()  local
36 … xnn_math_f32_exp__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
38 ASSERT_EQ(reference_output, outputs[0]) in TEST()
41 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
48 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
50 … xnn_math_f32_exp__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
52 ASSERT_EQ(reference_output, outputs[0]) in TEST()
55 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
62 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
67 … xnn_math_f32_exp__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
70 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
73 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
82 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
87 … xnn_math_f32_exp__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
90 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
93 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
102 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
107 … xnn_math_f32_exp__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
109 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
111 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
120 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
125 … xnn_math_f32_exp__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
127 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
129 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
141 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
143 xnn_math_f32_exp__neonfma_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
145 ASSERT_EQ(reference_output, outputs[0]) in TEST()
148 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
155 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
157 xnn_math_f32_exp__neonfma_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
159 ASSERT_EQ(reference_output, outputs[0]) in TEST()
162 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
169 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
174 xnn_math_f32_exp__neonfma_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
177 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
180 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
189 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
194 xnn_math_f32_exp__neonfma_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
197 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
200 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
209 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
214 xnn_math_f32_exp__neonfma_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
216 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
218 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
227 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
232 xnn_math_f32_exp__neonfma_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
234 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
236 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
248 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
250 …xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.dat… in TEST()
252 ASSERT_EQ(reference_output, outputs[0]) in TEST()
255 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
262 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
264 …xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.dat… in TEST()
266 ASSERT_EQ(reference_output, outputs[0]) in TEST()
269 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
276 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
281 …math_f32_exp__avx512f_rr2_lut16_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
284 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
287 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
296 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
301 …math_f32_exp__avx512f_rr2_lut16_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
304 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
307 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
316 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
321 …math_f32_exp__avx512f_rr2_lut16_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
323 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
325 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
334 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
339 …math_f32_exp__avx512f_rr2_lut16_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
341 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
343 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
355 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
357 …2_exp__avx512f_rr2_lut16_p3_perm_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
359 ASSERT_EQ(reference_output, outputs[0]) in TEST()
362 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
369 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
371 …2_exp__avx512f_rr2_lut16_p3_perm_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
373 ASSERT_EQ(reference_output, outputs[0]) in TEST()
376 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
383 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
388 …2_exp__avx512f_rr2_lut16_p3_perm_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
391 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
394 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
403 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
408 …2_exp__avx512f_rr2_lut16_p3_perm_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
411 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
414 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
423 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
428 …2_exp__avx512f_rr2_lut16_p3_perm_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
430 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
432 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
441 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
446 …2_exp__avx512f_rr2_lut16_p3_perm_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
448 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
450 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
462 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
464 …xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2(kBlockSize * sizeof(float), inputs.data(), outputs.da… in TEST()
466 ASSERT_EQ(reference_output, outputs[0]) in TEST()
469 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
476 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
478 …xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2(kBlockSize * sizeof(float), inputs.data(), outputs.da… in TEST()
480 ASSERT_EQ(reference_output, outputs[0]) in TEST()
483 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
490 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
495 …ath_f32_exp__avx512f_rr2_lut32_p2_perm2(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
498 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
501 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
510 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
515 …ath_f32_exp__avx512f_rr2_lut32_p2_perm2(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
518 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
521 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
530 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
535 …ath_f32_exp__avx512f_rr2_lut32_p2_perm2(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
537 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
539 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
548 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
553 …ath_f32_exp__avx512f_rr2_lut32_p2_perm2(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
555 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
557 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
569 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
571 …_exp__avx512f_rr2_lut32_p2_perm2_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
573 ASSERT_EQ(reference_output, outputs[0]) in TEST()
576 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
583 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
585 …_exp__avx512f_rr2_lut32_p2_perm2_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
587 ASSERT_EQ(reference_output, outputs[0]) in TEST()
590 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
597 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
602 …_exp__avx512f_rr2_lut32_p2_perm2_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
605 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
608 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
617 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
622 …_exp__avx512f_rr2_lut32_p2_perm2_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
625 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
628 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
637 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
642 …_exp__avx512f_rr2_lut32_p2_perm2_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
644 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
646 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
655 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
660 …_exp__avx512f_rr2_lut32_p2_perm2_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
662 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
664 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
676 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
678 xnn_math_f32_exp__avx512f_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
680 ASSERT_EQ(reference_output, outputs[0]) in TEST()
683 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
690 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
692 xnn_math_f32_exp__avx512f_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
694 ASSERT_EQ(reference_output, outputs[0]) in TEST()
697 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
704 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
709 xnn_math_f32_exp__avx512f_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
712 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
715 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
724 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
729 xnn_math_f32_exp__avx512f_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
732 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
735 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
744 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
749 xnn_math_f32_exp__avx512f_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
751 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
753 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
762 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
767 xnn_math_f32_exp__avx512f_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
769 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
771 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
783 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
785 …xnn_math_f32_exp__avx512f_rr2_p5_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
787 ASSERT_EQ(reference_output, outputs[0]) in TEST()
790 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
797 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
799 …xnn_math_f32_exp__avx512f_rr2_p5_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
801 ASSERT_EQ(reference_output, outputs[0]) in TEST()
804 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
811 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
816 …xnn_math_f32_exp__avx512f_rr2_p5_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
819 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
822 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
831 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
836 …xnn_math_f32_exp__avx512f_rr2_p5_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
839 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
842 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
851 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
856 …xnn_math_f32_exp__avx512f_rr2_p5_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
858 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
860 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
869 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
874 …xnn_math_f32_exp__avx512f_rr2_p5_scalef(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
876 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
878 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
890 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
892 …xnn_math_f32_exp__avx2_rr2_lut8_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
894 ASSERT_EQ(reference_output, outputs[0]) in TEST()
897 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
904 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
906 …xnn_math_f32_exp__avx2_rr2_lut8_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
908 ASSERT_EQ(reference_output, outputs[0]) in TEST()
911 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
918 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
923 …xnn_math_f32_exp__avx2_rr2_lut8_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
926 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
929 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
938 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
943 …xnn_math_f32_exp__avx2_rr2_lut8_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
946 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
949 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
958 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
963 …xnn_math_f32_exp__avx2_rr2_lut8_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
965 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
967 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
976 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
981 …xnn_math_f32_exp__avx2_rr2_lut8_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
983 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
985 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
997 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
999 …xnn_math_f32_exp__avx2_rr2_lut8_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1001 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1004 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1011 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1013 …xnn_math_f32_exp__avx2_rr2_lut8_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1015 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1018 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1025 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1030 …xnn_math_f32_exp__avx2_rr2_lut8_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1033 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1036 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1045 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1050 …xnn_math_f32_exp__avx2_rr2_lut8_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1053 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1056 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1065 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1070 …xnn_math_f32_exp__avx2_rr2_lut8_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1072 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1074 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1083 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1088 …xnn_math_f32_exp__avx2_rr2_lut8_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1090 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1092 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1104 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1106 xnn_math_f32_exp__avx2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1108 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1111 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1118 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1120 xnn_math_f32_exp__avx2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1122 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1125 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1132 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1137 xnn_math_f32_exp__avx2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1140 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1143 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1152 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1157 xnn_math_f32_exp__avx2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1160 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1163 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1172 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1177 xnn_math_f32_exp__avx2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1179 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1181 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1190 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1195 xnn_math_f32_exp__avx2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1197 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1199 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1211 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1213 xnn_math_f32_exp__avx_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1215 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1218 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1225 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1227 xnn_math_f32_exp__avx_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1229 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1232 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1239 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1244 xnn_math_f32_exp__avx_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1247 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1250 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1259 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1264 xnn_math_f32_exp__avx_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1267 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1270 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1279 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1284 xnn_math_f32_exp__avx_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1286 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1288 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1297 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1302 xnn_math_f32_exp__avx_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1304 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1306 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1316 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1318 xnn_math_f32_exp__sse2_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1320 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1323 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1328 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1330 xnn_math_f32_exp__sse2_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1332 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1335 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1340 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1345 … xnn_math_f32_exp__sse2_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1348 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1351 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1358 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1363 … xnn_math_f32_exp__sse2_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1366 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1369 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1376 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1381 … xnn_math_f32_exp__sse2_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1383 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1385 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1392 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1397 … xnn_math_f32_exp__sse2_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1399 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1401 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1411 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1413 xnn_math_f32_exp__sse2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1415 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1418 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1423 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1425 xnn_math_f32_exp__sse2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1427 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1430 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1435 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1440 xnn_math_f32_exp__sse2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1443 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1446 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1453 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1458 xnn_math_f32_exp__sse2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1461 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST()
1464 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1471 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1476 xnn_math_f32_exp__sse2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1478 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1480 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1487 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1492 xnn_math_f32_exp__sse2_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1494 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1496 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()