Lines Matching full:outputs

34     std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);  in TEST()  local
36 …xnn_math_f32_expm1minus__neon_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data… in TEST()
38 ASSERT_EQ(reference_output, outputs[0]) in TEST()
41 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
48 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
53 …xnn_math_f32_expm1minus__neon_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data… in TEST()
56 ASSERT_EQ(reference_output, outputs[i]) in TEST()
59 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
68 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
73 …xnn_math_f32_expm1minus__neon_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data… in TEST()
75 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
77 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
86 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
91 …xnn_math_f32_expm1minus__neon_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data… in TEST()
93 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
95 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
107 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
109 xnn_math_f32_expm1minus__neon_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
111 ASSERT_EQ(reference_output, outputs[0]) in TEST()
114 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
121 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
126 … xnn_math_f32_expm1minus__neon_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
129 ASSERT_EQ(reference_output, outputs[i]) in TEST()
132 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
141 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
146 … xnn_math_f32_expm1minus__neon_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
148 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
150 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
159 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
164 … xnn_math_f32_expm1minus__neon_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
166 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
168 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
180 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
182 …th_f32_expm1minus__neonfma_rr1_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
184 ASSERT_EQ(reference_output, outputs[0]) in TEST()
187 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
194 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
199 …th_f32_expm1minus__neonfma_rr1_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
202 ASSERT_EQ(reference_output, outputs[i]) in TEST()
205 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
214 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
219 …th_f32_expm1minus__neonfma_rr1_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
221 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
223 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
232 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
237 …th_f32_expm1minus__neonfma_rr1_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
239 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
241 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
253 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
255 …xnn_math_f32_expm1minus__neonfma_rr1_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
257 ASSERT_EQ(reference_output, outputs[0]) in TEST()
260 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
267 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
272 …xnn_math_f32_expm1minus__neonfma_rr1_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
275 ASSERT_EQ(reference_output, outputs[i]) in TEST()
278 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
287 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
292 …xnn_math_f32_expm1minus__neonfma_rr1_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
294 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
296 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
305 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
310 …xnn_math_f32_expm1minus__neonfma_rr1_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
312 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
314 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
326 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
328 …2_expm1minus__avx512f_rr1_lut16_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
330 ASSERT_EQ(reference_output, outputs[0]) in TEST()
333 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
340 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
345 …2_expm1minus__avx512f_rr1_lut16_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
348 ASSERT_EQ(reference_output, outputs[i]) in TEST()
351 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
360 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
365 …2_expm1minus__avx512f_rr1_lut16_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
367 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
369 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
378 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
383 …2_expm1minus__avx512f_rr1_lut16_p3_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
385 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
387 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
399 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
401 …xnn_math_f32_expm1minus__avx512f_rr1_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
403 ASSERT_EQ(reference_output, outputs[0]) in TEST()
406 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
413 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
418 …xnn_math_f32_expm1minus__avx512f_rr1_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
421 ASSERT_EQ(reference_output, outputs[i]) in TEST()
424 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
433 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
438 …xnn_math_f32_expm1minus__avx512f_rr1_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
440 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
442 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
451 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
456 …xnn_math_f32_expm1minus__avx512f_rr1_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
458 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
460 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
472 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
474 …h_f32_expm1minus__avx2_rr1_lut4_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
476 ASSERT_EQ(reference_output, outputs[0]) in TEST()
479 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
486 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
491 …h_f32_expm1minus__avx2_rr1_lut4_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
494 ASSERT_EQ(reference_output, outputs[i]) in TEST()
497 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
506 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
511 …h_f32_expm1minus__avx2_rr1_lut4_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
513 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
515 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
524 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
529 …h_f32_expm1minus__avx2_rr1_lut4_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
531 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
533 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
545 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
547 …h_f32_expm1minus__avx2_rr1_lut8_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
549 ASSERT_EQ(reference_output, outputs[0]) in TEST()
552 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
559 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
564 …h_f32_expm1minus__avx2_rr1_lut8_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
567 ASSERT_EQ(reference_output, outputs[i]) in TEST()
570 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
579 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
584 …h_f32_expm1minus__avx2_rr1_lut8_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
586 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
588 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
597 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
602 …h_f32_expm1minus__avx2_rr1_lut8_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
604 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
606 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
618 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
620 …32_expm1minus__avx2_rr1_lut16_p3_gather(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
622 ASSERT_EQ(reference_output, outputs[0]) in TEST()
625 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
632 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
637 …32_expm1minus__avx2_rr1_lut16_p3_gather(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
640 ASSERT_EQ(reference_output, outputs[i]) in TEST()
643 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
652 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
657 …32_expm1minus__avx2_rr1_lut16_p3_gather(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
659 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
661 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
670 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
675 …32_expm1minus__avx2_rr1_lut16_p3_gather(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
677 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
679 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
691 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
693 xnn_math_f32_expm1minus__avx2_rr1_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
695 ASSERT_EQ(reference_output, outputs[0]) in TEST()
698 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
705 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
710 … xnn_math_f32_expm1minus__avx2_rr1_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
713 ASSERT_EQ(reference_output, outputs[i]) in TEST()
716 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
725 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
730 … xnn_math_f32_expm1minus__avx2_rr1_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
732 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
734 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
743 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
748 … xnn_math_f32_expm1minus__avx2_rr1_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
750 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
752 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
764 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
766 …th_f32_expm1minus__avx_rr2_lut4_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
768 ASSERT_EQ(reference_output, outputs[0]) in TEST()
771 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
778 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
783 …th_f32_expm1minus__avx_rr2_lut4_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
786 ASSERT_EQ(reference_output, outputs[i]) in TEST()
789 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
798 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
803 …th_f32_expm1minus__avx_rr2_lut4_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
805 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
807 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
816 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
821 …th_f32_expm1minus__avx_rr2_lut4_p4_perm(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
823 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
825 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
837 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
839 …xnn_math_f32_expm1minus__avx_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data(… in TEST()
841 ASSERT_EQ(reference_output, outputs[0]) in TEST()
844 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
851 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
856 …xnn_math_f32_expm1minus__avx_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data(… in TEST()
859 ASSERT_EQ(reference_output, outputs[i]) in TEST()
862 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
871 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
876 …xnn_math_f32_expm1minus__avx_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data(… in TEST()
878 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
880 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
889 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
894 …xnn_math_f32_expm1minus__avx_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data(… in TEST()
896 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
898 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
910 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
912 xnn_math_f32_expm1minus__avx_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
914 ASSERT_EQ(reference_output, outputs[0]) in TEST()
917 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
924 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
929 … xnn_math_f32_expm1minus__avx_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
932 ASSERT_EQ(reference_output, outputs[i]) in TEST()
935 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
944 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
949 … xnn_math_f32_expm1minus__avx_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
951 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
953 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
962 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
967 … xnn_math_f32_expm1minus__avx_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
969 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
971 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
981 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
983 …xnn_math_f32_expm1minus__sse2_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data… in TEST()
985 ASSERT_EQ(reference_output, outputs[0]) in TEST()
988 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
993 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
998 …xnn_math_f32_expm1minus__sse2_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data… in TEST()
1001 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1004 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1011 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1016 …xnn_math_f32_expm1minus__sse2_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data… in TEST()
1018 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1020 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1027 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1032 …xnn_math_f32_expm1minus__sse2_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data… in TEST()
1034 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1036 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1046 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1048 xnn_math_f32_expm1minus__sse2_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1050 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1053 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1058 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1063 … xnn_math_f32_expm1minus__sse2_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1066 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1069 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1076 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1081 … xnn_math_f32_expm1minus__sse2_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1083 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1085 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1092 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1097 … xnn_math_f32_expm1minus__sse2_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1099 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1101 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1111 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1113 …xpm1minus__wasmsimd_rr2_lut16_p3_andnot(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1115 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1118 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1123 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1128 …xpm1minus__wasmsimd_rr2_lut16_p3_andnot(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1131 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1134 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1141 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1146 …xpm1minus__wasmsimd_rr2_lut16_p3_andnot(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1148 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1150 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1157 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1162 …xpm1minus__wasmsimd_rr2_lut16_p3_andnot(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1164 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1166 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1176 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1178 …2_expm1minus__wasmsimd_rr2_lut16_p3_max(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1180 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1183 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1188 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1193 …2_expm1minus__wasmsimd_rr2_lut16_p3_max(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1196 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1199 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1206 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1211 …2_expm1minus__wasmsimd_rr2_lut16_p3_max(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1213 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1215 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1222 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1227 …2_expm1minus__wasmsimd_rr2_lut16_p3_max(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1229 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1231 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1241 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1243 …_f32_expm1minus__wasmsimd_rr2_p6_andnot(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1245 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1248 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1253 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1258 …_f32_expm1minus__wasmsimd_rr2_p6_andnot(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1261 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1264 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1271 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1276 …_f32_expm1minus__wasmsimd_rr2_p6_andnot(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1278 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1280 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1287 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1292 …_f32_expm1minus__wasmsimd_rr2_p6_andnot(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1294 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1296 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1306 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1308 …xnn_math_f32_expm1minus__wasmsimd_rr2_p6_max(kBlockSize * sizeof(float), inputs.data(), outputs.da… in TEST()
1310 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1313 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1318 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1323 …ath_f32_expm1minus__wasmsimd_rr2_p6_max(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1326 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1329 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1336 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1341 …ath_f32_expm1minus__wasmsimd_rr2_p6_max(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1343 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1345 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1352 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1357 …ath_f32_expm1minus__wasmsimd_rr2_p6_max(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1359 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1361 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1370 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1372 …xnn_math_f32_expm1minus__scalar_rr2_lut4_p4(kBlockSize * sizeof(float), inputs.data(), outputs.dat… in TEST()
1374 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1377 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1382 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1387 …xnn_math_f32_expm1minus__scalar_rr2_lut4_p4(kBlockSize * sizeof(float), inputs.data(), outputs.dat… in TEST()
1390 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1393 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1400 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1405 …xnn_math_f32_expm1minus__scalar_rr2_lut4_p4(kBlockSize * sizeof(float), inputs.data(), outputs.dat… in TEST()
1407 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1409 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1416 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1421 …xnn_math_f32_expm1minus__scalar_rr2_lut4_p4(kBlockSize * sizeof(float), inputs.data(), outputs.dat… in TEST()
1423 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1425 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1433 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1435 …xnn_math_f32_expm1minus__scalar_rr2_lut8_p3(kBlockSize * sizeof(float), inputs.data(), outputs.dat… in TEST()
1437 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1440 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1445 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1450 …xnn_math_f32_expm1minus__scalar_rr2_lut8_p3(kBlockSize * sizeof(float), inputs.data(), outputs.dat… in TEST()
1453 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1456 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1463 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1468 …xnn_math_f32_expm1minus__scalar_rr2_lut8_p3(kBlockSize * sizeof(float), inputs.data(), outputs.dat… in TEST()
1470 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1472 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1479 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1484 …xnn_math_f32_expm1minus__scalar_rr2_lut8_p3(kBlockSize * sizeof(float), inputs.data(), outputs.dat… in TEST()
1486 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1488 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1496 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1498 …xnn_math_f32_expm1minus__scalar_rr2_lut8_p4(kBlockSize * sizeof(float), inputs.data(), outputs.dat… in TEST()
1500 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1503 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1508 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1513 …xnn_math_f32_expm1minus__scalar_rr2_lut8_p4(kBlockSize * sizeof(float), inputs.data(), outputs.dat… in TEST()
1516 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1519 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1526 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1531 …xnn_math_f32_expm1minus__scalar_rr2_lut8_p4(kBlockSize * sizeof(float), inputs.data(), outputs.dat… in TEST()
1533 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1535 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1542 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1547 …xnn_math_f32_expm1minus__scalar_rr2_lut8_p4(kBlockSize * sizeof(float), inputs.data(), outputs.dat… in TEST()
1549 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1551 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1559 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1561 …xnn_math_f32_expm1minus__scalar_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.da… in TEST()
1563 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1566 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1571 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1576 …xnn_math_f32_expm1minus__scalar_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.da… in TEST()
1579 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1582 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1589 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1594 …xnn_math_f32_expm1minus__scalar_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.da… in TEST()
1596 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1598 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1605 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1610 …xnn_math_f32_expm1minus__scalar_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.da… in TEST()
1612 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1614 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1622 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1624 …xnn_math_f32_expm1minus__scalar_rr2_lut16_p4(kBlockSize * sizeof(float), inputs.data(), outputs.da… in TEST()
1626 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1629 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1634 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1639 …xnn_math_f32_expm1minus__scalar_rr2_lut16_p4(kBlockSize * sizeof(float), inputs.data(), outputs.da… in TEST()
1642 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1645 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1652 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1657 …xnn_math_f32_expm1minus__scalar_rr2_lut16_p4(kBlockSize * sizeof(float), inputs.data(), outputs.da… in TEST()
1659 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1661 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1668 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1673 …xnn_math_f32_expm1minus__scalar_rr2_lut16_p4(kBlockSize * sizeof(float), inputs.data(), outputs.da… in TEST()
1675 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1677 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1685 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1687 xnn_math_f32_expm1minus__scalar_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1689 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1692 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1697 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1702 … xnn_math_f32_expm1minus__scalar_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1705 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1708 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1715 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1720 … xnn_math_f32_expm1minus__scalar_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1722 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1724 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1731 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1736 … xnn_math_f32_expm1minus__scalar_rr2_p5(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1738 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1740 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1748 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1750 xnn_math_f32_expm1minus__scalar_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1752 ASSERT_EQ(reference_output, outputs[0]) in TEST()
1755 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST()
1760 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1765 … xnn_math_f32_expm1minus__scalar_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1768 ASSERT_EQ(reference_output, outputs[i]) in TEST()
1771 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1778 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1783 … xnn_math_f32_expm1minus__scalar_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1785 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1787 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()
1794 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local
1799 … xnn_math_f32_expm1minus__scalar_rr2_p6(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST()
1801 ASSERT_TRUE(std::isnan(outputs[i])) in TEST()
1803 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST()