/aosp_15_r20/art/compiler/utils/riscv64/ |
H A D | assembler_riscv64.h | 734 void VLoxei8(VRegister vd, XRegister rs1, VRegister vs2, VM vm = VM::kUnmasked); 735 void VLoxei16(VRegister vd, XRegister rs1, VRegister vs2, VM vm = VM::kUnmasked); 736 void VLoxei32(VRegister vd, XRegister rs1, VRegister vs2, VM vm = VM::kUnmasked); 737 void VLoxei64(VRegister vd, XRegister rs1, VRegister vs2, VM vm = VM::kUnmasked); 739 void VLuxei8(VRegister vd, XRegister rs1, VRegister vs2, VM vm = VM::kUnmasked); 740 void VLuxei16(VRegister vd, XRegister rs1, VRegister vs2, VM vm = VM::kUnmasked); 741 void VLuxei32(VRegister vd, XRegister rs1, VRegister vs2, VM vm = VM::kUnmasked); 742 void VLuxei64(VRegister vd, XRegister rs1, VRegister vs2, VM vm = VM::kUnmasked); 744 void VSoxei8(VRegister vs3, XRegister rs1, VRegister vs2, VM vm = VM::kUnmasked); 745 void VSoxei16(VRegister vs3, XRegister rs1, VRegister vs2, VM vm = VM::kUnmasked); [all …]
|
H A D | assembler_riscv64.cc | 1939 void Riscv64Assembler::VLoxei8(VRegister vd, XRegister rs1, VRegister vs2, VM vm) { in VLoxei8() argument 1943 EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7); in VLoxei8() 1946 void Riscv64Assembler::VLoxei16(VRegister vd, XRegister rs1, VRegister vs2, VM vm) { in VLoxei16() argument 1950 EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7); in VLoxei16() 1953 void Riscv64Assembler::VLoxei32(VRegister vd, XRegister rs1, VRegister vs2, VM vm) { in VLoxei32() argument 1957 EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7); in VLoxei32() 1960 void Riscv64Assembler::VLoxei64(VRegister vd, XRegister rs1, VRegister vs2, VM vm) { in VLoxei64() argument 1964 EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7); in VLoxei64() 1967 void Riscv64Assembler::VLuxei8(VRegister vd, XRegister rs1, VRegister vs2, VM vm) { in VLuxei8() argument 1971 EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7); in VLuxei8() [all …]
|
/aosp_15_r20/prebuilts/go/linux-x86/src/cmd/asm/internal/asm/testdata/ |
D | ppc64_p10.s | 55 PMXVBF16GER2 VS1, VS2, $1, $2, $3, A1 // 0790c012ec811198 56 PMXVBF16GER2NN VS1, VS2, $1, $2, $3, A1 // 0790c012ec811790 57 PMXVBF16GER2NP VS1, VS2, $1, $2, $3, A1 // 0790c012ec811390 58 PMXVBF16GER2PN VS1, VS2, $1, $2, $3, A1 // 0790c012ec811590 59 PMXVBF16GER2PP VS1, VS2, $1, $2, $3, A1 // 0790c012ec811190 60 PMXVF16GER2 VS1, VS2, $1, $2, $3, A1 // 0790c012ec811098 61 PMXVF16GER2NN VS1, VS2, $1, $2, $3, A1 // 0790c012ec811690 62 PMXVF16GER2NP VS1, VS2, $1, $2, $3, A1 // 0790c012ec811290 63 PMXVF16GER2PN VS1, VS2, $1, $2, $3, A1 // 0790c012ec811490 64 PMXVF16GER2PP VS1, VS2, $1, $2, $3, A1 // 0790c012ec811090 [all …]
|
D | ppc64.s | 1023 XXBRW VS1, VS2 // f04f0f6c 1024 XXBRH VS2, VS3 // f067176c 1025 XXLAND VS1, VS2, VS3 // f0611410 1028 XXLANDC VS1, VS2, VS3 // f0611450 1029 XXLEQV VS0, VS1, VS2 // f0400dd0 1030 XXLNAND VS0, VS1, VS2 // f0400d90 1032 XXLOR VS1, VS2, VS3 // f0611490 1033 XXLORC VS1, VS2, VS3 // f0611550 1034 XXLORQ VS1, VS2, VS3 // f0611490 1035 XXLXOR VS1, VS2, VS3 // f06114d0 [all …]
|
/aosp_15_r20/external/swiftshader/third_party/llvm-16.0/llvm/lib/Target/RISCV/ |
H A D | RISCVInstrInfoV.td | 227 // indexed load vd, (rs1), vs2, vm 231 (ins GPRMem:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr, 232 "$vd, (${rs1}), $vs2$vm">; 253 // indexed segment load vd, (rs1), vs2, vm 258 (ins GPRMem:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr, 259 "$vd, (${rs1}), $vs2$vm">; 291 // indexed store vd, vs3, (rs1), vs2, vm 294 (ins VR:$vs3, GPRMem:$rs1, VR:$vs2, VMaskOp:$vm), 295 opcodestr, "$vs3, (${rs1}), $vs2$vm">; 309 // segment store vd, vs3, (rs1), vs2, vm [all …]
|
H A D | RISCVInstrFormatsV.td | 110 bits<5> vs2; 117 let Inst{24-20} = vs2; 130 bits<5> vs2; 137 let Inst{24-20} = vs2; 147 class RVInstV2<bits<6> funct6, bits<5> vs2, RISCVVFormat opv, dag outs, dag ins, 156 let Inst{24-20} = vs2; 169 bits<5> vs2; 176 let Inst{24-20} = vs2; 189 bits<5> vs2; 195 let Inst{24-20} = vs2; [all …]
|
H A D | RISCVInstrInfoXTHead.td | 33 // op vd, vs1, vs2, vm (reverse the order of vs1 and vs2) 36 (ins VR:$vs1, VR:$vs2, VMaskOp:$vm), 37 opcodestr, "$vd, $vs1, $vs2$vm">; 39 // op vd, rs1, vs2, vm (reverse the order of rs1 and vs2) 42 (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm), 43 opcodestr, "$vd, $rs1, $vs2$vm">;
|
/aosp_15_r20/external/rust/android-crates-io/crates/libz-sys/src/zlib-ng/arch/x86/ |
D | adler32_avx512_vnni.c | 52 __m512i vs1, vs2; in adler32_avx512_vnni() local 56 vs2 = _mm512_zextsi128_si512(_mm_cvtsi32_si128(adler1)); in adler32_avx512_vnni() 76 vs2 = _mm512_dpbusd_epi32(vs2, vbuf1, dot2v); in adler32_avx512_vnni() 84 vs2 = sum2 + 64 vs1 + sum( (64-i+1) c[i] ) in adler32_avx512_vnni() 96 vs2 = _mm512_dpbusd_epi32(vs2, vbuf0, dot2v); in adler32_avx512_vnni() 106 vs2 = _mm512_add_epi32(vs2, vs3); in adler32_avx512_vnni() 107 vs2 = _mm512_add_epi32(vs2, vs2_1); in adler32_avx512_vnni() 110 adler1 = _mm512_reduce_add_epu32(vs2) % BASE; in adler32_avx512_vnni() 149 __m256i vs1, vs2; in adler32_fold_copy_avx512_vnni() local 153 vs2 = _mm256_zextsi128_si256(_mm_cvtsi32_si128(adler1)); in adler32_fold_copy_avx512_vnni() [all …]
|
D | adler32_sse42.c | 28 __m128i vs1_0, vs3, vs1, vs2, vs2_0, v_sad_sum1, v_short_sum2, v_short_sum2_0, in adler32_fold_copy_sse42() local 43 vs2 = _mm_cvtsi32_si128(adler1); in adler32_fold_copy_sse42() 52 vs2 = sum2 + 16 vs1 + sum( (16-i+1) c[i] ) in adler32_fold_copy_sse42() 74 vs2 = _mm_add_epi32(vsum2, vs2); in adler32_fold_copy_sse42() 79 vs2 = _mm_add_epi32(vs2_0, vs2); in adler32_fold_copy_sse42() 81 vs2 = _mm_add_epi32(vs3, vs2); in adler32_fold_copy_sse42() 87 vs2 = sum2 + 16 vs1 + sum( (16-i+1) c[i] ) in adler32_fold_copy_sse42() 99 vs2 = _mm_add_epi32(vsum2, vs2); in adler32_fold_copy_sse42() 107 vs2 = _mm_add_epi32(vs2, vs3); in adler32_fold_copy_sse42() 110 adler1 = hsum(vs2) % BASE; in adler32_fold_copy_sse42()
|
D | adler32_ssse3.c | 41 __m128i vbuf, vs1_0, vs3, vs1, vs2, vs2_0, v_sad_sum1, v_short_sum2, v_short_sum2_0, in adler32_ssse3() local 63 vs2 = _mm_cvtsi32_si128(sum2); in adler32_ssse3() 83 vs2 = _mm_cvtsi32_si128(sum2); in adler32_ssse3() 95 vs2 = sum2 + 16 vs1 + sum( (16-i+1) c[i] ) in adler32_ssse3() 111 vs2 = _mm_add_epi32(vsum2, vs2); in adler32_ssse3() 117 vs2 = _mm_add_epi32(vs2_0, vs2); in adler32_ssse3() 119 vs2 = _mm_add_epi32(vs3, vs2); in adler32_ssse3() 125 vs2 = sum2 + 16 vs1 + sum( (16-i+1) c[i] ) in adler32_ssse3() 137 vs2 = _mm_add_epi32(vsum2, vs2); in adler32_ssse3() 142 vs2 = _mm_add_epi32(vs2, vs3); in adler32_ssse3() [all …]
|
D | adler32_avx2_tpl.h | 50 __m256i vs1, vs2; local 59 vs2 = _mm256_zextsi128_si256(_mm_cvtsi32_si128(adler1)); 70 vs2 = sum2 + 32 vs1 + sum( (32-i+1) c[i] ) 86 vs2 = _mm256_add_epi32(vsum2, vs2); 92 vs2 = _mm256_add_epi32(vs2, vs3); 130 adler1 = hsum256(vs2) % BASE;
|
D | adler32_avx512_tpl.h | 59 __m512i vs2 = _mm512_zextsi128_si512(_mm_cvtsi32_si128(adler1)); local 70 vs2 = sum2 + 64 vs1 + sum( (64-i+1) c[i] ) 85 vs2 = _mm512_add_epi32(vsum2, vs2); 90 vs2 = _mm512_add_epi32(vs2, vs3); 93 adler1 = _mm512_reduce_add_epu32(vs2) % BASE;
|
/aosp_15_r20/external/rust/android-crates-io/crates/libz-sys/src/zlib-ng/arch/power/ |
D | adler32_power8.c | 78 vector unsigned int vs2 = { 0 }; in adler32_power8() local 85 vs2[0] = s2; in adler32_power8() 100 vs2 = vec_add(vsum2, vs2); in adler32_power8() 107 vs2 = vec_add(vs1_save, vs2); in adler32_power8() 108 vs2 = vec_sumsu(vs2, vsum2); in adler32_power8() 112 /* vs2[0] = s2_i + 16*s1_save + in adler32_power8() 114 vs2[0] = vs2[0] % BASE; in adler32_power8() 117 vs2 = vec_and(vs2, vmask); in adler32_power8() 135 vs2 = vec_add(vsum2, vs2); in adler32_power8() 142 vs2 = vec_add(vs1_save, vs2); in adler32_power8() [all …]
|
/aosp_15_r20/external/tensorflow/tensorflow/python/kernel_tests/variables/ |
H A D | partitioned_variables_test.py | 350 vs2 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2], 354 var2_name = vs2[0]._save_slice_info.full_name 359 self.assertEqual(var2_name + "/part_0:0", vs2[0].name) 360 self.assertEqual(var2_name + "/part_1:0", vs2[1].name) 370 vs2 = partitioned_variables.create_partitioned_variables( 374 var2_name = vs2[0]._save_slice_info.full_name 379 self.assertEqual(var2_name + "/part_0:0", vs2[0].name) 380 self.assertEqual(var2_name + "/part_1:0", vs2[1].name) 387 vs2 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2], 391 var2_name = vs2[0]._save_slice_info.full_name [all …]
|
/aosp_15_r20/external/go-cmp/cmp/internal/value/ |
H A D | sort.go | 25 vs2 := vs[:1] 27 if isLess(vs2[len(vs2)-1], v) { 28 vs2 = append(vs2, v) 31 return vs2
|
/aosp_15_r20/external/XNNPACK/src/f32-velu/gen/ |
H A D | velu-scalar-rr2-p6-x3.c | 59 float vs2 = uint32_as_float(float_as_uint32(vn2) << 23); in xnn_f32_velu_ukernel__scalar_rr2_p6_x3() local 79 vs2 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_p6_x3() 107 vt2 *= vs2; in xnn_f32_velu_ukernel__scalar_rr2_p6_x3() 108 vs2 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_p6_x3() 118 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_p6_x3()
|
H A D | velu-scalar-rr2-lut16-p3-x3.c | 70 float vs2 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx2] + ven2); in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3() local 84 vs2 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3() 100 vt2 *= vs2; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3() 101 vs2 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3() 111 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3()
|
H A D | velu-scalar-rr2-lut16-p3-x4.c | 76 float vs2 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx2] + ven2); in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4() local 92 vs2 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4() 115 vt2 *= vs2; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4() 116 vs2 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4() 129 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4()
|
H A D | velu-scalar-rr2-p6-x4.c | 62 float vs2 = uint32_as_float(float_as_uint32(vn2) << 23); in xnn_f32_velu_ukernel__scalar_rr2_p6_x4() local 86 vs2 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_p6_x4() 123 vt2 *= vs2; in xnn_f32_velu_ukernel__scalar_rr2_p6_x4() 124 vs2 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_p6_x4() 137 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_p6_x4()
|
H A D | velu-scalar-rr2-lut16-p3-x5.c | 82 float vs2 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx2] + ven2); in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5() local 100 vs2 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5() 130 vt2 *= vs2; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5() 131 vs2 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5() 147 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5()
|
H A D | velu-scalar-rr2-p6-x5.c | 65 float vs2 = uint32_as_float(float_as_uint32(vn2) << 23); in xnn_f32_velu_ukernel__scalar_rr2_p6_x5() local 93 vs2 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_p6_x5() 139 vt2 *= vs2; in xnn_f32_velu_ukernel__scalar_rr2_p6_x5() 140 vs2 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_p6_x5() 156 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_p6_x5()
|
H A D | velu-avx512f-rr1-p6-x48.c | 59 __m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23)); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x48() local 87 vt2 = _mm512_mul_ps(vt2, vs2); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x48() 91 vs2 = _mm512_fmsub_ps(vs2, valpha, valpha); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x48() 102 __m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x48()
|
H A D | velu-avx512f-rr1-lut16-p3-perm-x48.c | 64 __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ven2)); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48() local 80 vt2 = _mm512_mul_ps(vt2, vs2); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48() 84 vs2 = _mm512_fmsub_ps(vs2, valpha, valpha); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48() 95 __m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48()
|
H A D | velu-avx512f-rr1-lut16-p3-perm-x64.c | 69 __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ven2)); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64() local 89 vt2 = _mm512_mul_ps(vt2, vs2); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64() 95 vs2 = _mm512_fmsub_ps(vs2, valpha, valpha); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64() 108 __m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64()
|
H A D | velu-avx2-rr1-lut8-p4-perm-x24.c | 66 __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2)); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x24() local 82 vt2 = _mm256_mul_ps(vt2, vs2); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x24() 88 vs2 = _mm256_fmsub_ps(vs2, valpha, valpha); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x24() 95 const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x24()
|