1//===- IntrinsicsRISCV.td - Defines RISCV intrinsics -------*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file defines all of the RISCV-specific intrinsics. 10// 11//===----------------------------------------------------------------------===// 12 13//===----------------------------------------------------------------------===// 14// Atomics 15 16// Atomic Intrinsics have multiple versions for different access widths, which 17// all follow one of the following signatures (depending on how many arguments 18// they require). We carefully instantiate only specific versions of these for 19// specific integer widths, rather than using `llvm_anyint_ty`. 20// 21// In fact, as these intrinsics take `llvm_anyptr_ty`, the given names are the 22// canonical names, and the intrinsics used in the code will have a name 23// suffixed with the pointer type they are specialised for (denoted `<p>` in the 24// names below), in order to avoid type conflicts. 25 26let TargetPrefix = "riscv" in { 27 28 // T @llvm.<name>.T.<p>(any*, T, T, T imm); 29 class MaskedAtomicRMWFourArg<LLVMType itype> 30 : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype], 31 [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>]>; 32 // T @llvm.<name>.T.<p>(any*, T, T, T, T imm); 33 class MaskedAtomicRMWFiveArg<LLVMType itype> 34 : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype, itype], 35 [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<4>>]>; 36 37 // We define 32-bit and 64-bit variants of the above, where T stands for i32 38 // or i64 respectively: 39 multiclass MaskedAtomicRMWFourArgIntrinsics { 40 // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32 imm); 41 def _i32 : MaskedAtomicRMWFourArg<llvm_i32_ty>; 42 // i64 @llvm.<name>.i32.<p>(any*, i64, i64, i64 imm); 43 def _i64 : MaskedAtomicRMWFourArg<llvm_i64_ty>; 44 } 45 46 multiclass MaskedAtomicRMWFiveArgIntrinsics { 47 // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32, i32 imm); 48 def _i32 : MaskedAtomicRMWFiveArg<llvm_i32_ty>; 49 // i64 @llvm.<name>.i64.<p>(any*, i64, i64, i64, i64 imm); 50 def _i64 : MaskedAtomicRMWFiveArg<llvm_i64_ty>; 51 } 52 53 // These intrinsics are intended only for internal compiler use (i.e. as 54 // part of AtomicExpandpass via the emitMaskedAtomic*Intrinsic hooks). Their 55 // names and semantics could change in the future. 56 57 // @llvm.riscv.masked.atomicrmw.*.{i32,i64}.<p>( 58 // ptr addr, ixlen oparg, ixlen mask, ixlenimm ordering) 59 defm int_riscv_masked_atomicrmw_xchg : MaskedAtomicRMWFourArgIntrinsics; 60 defm int_riscv_masked_atomicrmw_add : MaskedAtomicRMWFourArgIntrinsics; 61 defm int_riscv_masked_atomicrmw_sub : MaskedAtomicRMWFourArgIntrinsics; 62 defm int_riscv_masked_atomicrmw_nand : MaskedAtomicRMWFourArgIntrinsics; 63 defm int_riscv_masked_atomicrmw_umax : MaskedAtomicRMWFourArgIntrinsics; 64 defm int_riscv_masked_atomicrmw_umin : MaskedAtomicRMWFourArgIntrinsics; 65 // Signed min and max need an extra operand to do sign extension with. 66 // @llvm.riscv.masked.atomicrmw.{max,min}.{i32,i64}.<p>( 67 // ptr addr, ixlen oparg, ixlen mask, ixlen shamt, ixlenimm ordering) 68 defm int_riscv_masked_atomicrmw_max : MaskedAtomicRMWFiveArgIntrinsics; 69 defm int_riscv_masked_atomicrmw_min : MaskedAtomicRMWFiveArgIntrinsics; 70 71 // @llvm.riscv.masked.cmpxchg.{i32,i64}.<p>( 72 // ptr addr, ixlen cmpval, ixlen newval, ixlen mask, ixlenimm ordering) 73 defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics; 74 75} // TargetPrefix = "riscv" 76 77//===----------------------------------------------------------------------===// 78// Bitmanip (Bit Manipulation) Extension 79 80let TargetPrefix = "riscv" in { 81 82 class BitManipGPRIntrinsics 83 : DefaultAttrsIntrinsic<[llvm_any_ty], 84 [LLVMMatchType<0>], 85 [IntrNoMem, IntrSpeculatable]>; 86 class BitManipGPRGPRIntrinsics 87 : DefaultAttrsIntrinsic<[llvm_any_ty], 88 [LLVMMatchType<0>, LLVMMatchType<0>], 89 [IntrNoMem, IntrSpeculatable]>; 90 91 // Zbb 92 def int_riscv_orc_b : BitManipGPRIntrinsics; 93 94 // Zbc or Zbkc 95 def int_riscv_clmul : BitManipGPRGPRIntrinsics; 96 def int_riscv_clmulh : BitManipGPRGPRIntrinsics; 97 98 // Zbc 99 def int_riscv_clmulr : BitManipGPRGPRIntrinsics; 100 101 // Zbkb 102 def int_riscv_brev8 : BitManipGPRIntrinsics; 103 def int_riscv_zip : BitManipGPRIntrinsics; 104 def int_riscv_unzip : BitManipGPRIntrinsics; 105 106 // Zbkx 107 def int_riscv_xperm4 : BitManipGPRGPRIntrinsics; 108 def int_riscv_xperm8 : BitManipGPRGPRIntrinsics; 109} // TargetPrefix = "riscv" 110 111//===----------------------------------------------------------------------===// 112// May-Be-Operations 113 114let TargetPrefix = "riscv" in { 115 116 // Zimop 117 def int_riscv_mopr 118 : DefaultAttrsIntrinsic<[llvm_any_ty], 119 [LLVMMatchType<0>, LLVMMatchType<0>], 120 [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<1>>]>; 121 def int_riscv_moprr 122 : DefaultAttrsIntrinsic<[llvm_any_ty], 123 [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], 124 [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<2>>]>; 125} // TargetPrefix = "riscv" 126 127//===----------------------------------------------------------------------===// 128// Vectors 129 130// The intrinsic does not have any operand that must be extended. 131defvar NoScalarOperand = 0xF; 132 133// The intrinsic does not have a VL operand. 134// (e.g., riscv_vmv_x_s and riscv_vfmv_f_s) 135defvar NoVLOperand = 0x1F; 136 137class RISCVVIntrinsic { 138 // These intrinsics may accept illegal integer values in their llvm_any_ty 139 // operand, so they have to be extended. 140 Intrinsic IntrinsicID = !cast<Intrinsic>(NAME); 141 bits<4> ScalarOperand = NoScalarOperand; 142 bits<5> VLOperand = NoVLOperand; 143} 144 145let TargetPrefix = "riscv" in { 146 // We use anyint here but we only support XLen. 147 def int_riscv_vsetvli : Intrinsic<[llvm_anyint_ty], 148 /* AVL */ [LLVMMatchType<0>, 149 /* VSEW */ LLVMMatchType<0>, 150 /* VLMUL */ LLVMMatchType<0>], 151 [IntrNoMem, 152 ImmArg<ArgIndex<1>>, 153 ImmArg<ArgIndex<2>>]>; 154 def int_riscv_vsetvlimax : Intrinsic<[llvm_anyint_ty], 155 /* VSEW */ [LLVMMatchType<0>, 156 /* VLMUL */ LLVMMatchType<0>], 157 [IntrNoMem, 158 ImmArg<ArgIndex<0>>, 159 ImmArg<ArgIndex<1>>]>; 160 161 // For unit stride mask load 162 // Input: (pointer, vl) 163 class RISCVUSMLoad 164 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 165 [llvm_ptr_ty, llvm_anyint_ty], 166 [NoCapture<ArgIndex<0>>, IntrReadMem, IntrArgMemOnly]>, 167 RISCVVIntrinsic { 168 let VLOperand = 1; 169 } 170 // For unit stride load 171 // Input: (passthru, pointer, vl) 172 class RISCVUSLoad 173 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 174 [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty], 175 [NoCapture<ArgIndex<1>>, IntrReadMem, IntrArgMemOnly]>, 176 RISCVVIntrinsic { 177 let VLOperand = 2; 178 } 179 // For unit stride fault-only-first load 180 // Input: (passthru, pointer, vl) 181 // Output: (data, vl) 182 // NOTE: We model this with default memory properties since we model writing 183 // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work. 184 class RISCVUSLoadFF 185 : DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty], 186 [LLVMMatchType<0>, llvm_ptr_ty, LLVMMatchType<1>], 187 [NoCapture<ArgIndex<1>>]>, 188 RISCVVIntrinsic { 189 let VLOperand = 2; 190 } 191 // For unit stride load with mask 192 // Input: (maskedoff, pointer, mask, vl, policy) 193 class RISCVUSLoadMasked 194 : DefaultAttrsIntrinsic<[llvm_anyvector_ty ], 195 [LLVMMatchType<0>, llvm_ptr_ty, 196 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 197 llvm_anyint_ty, LLVMMatchType<1>], 198 [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>, IntrReadMem, 199 IntrArgMemOnly]>, 200 RISCVVIntrinsic { 201 let VLOperand = 3; 202 } 203 // For unit stride fault-only-first load with mask 204 // Input: (maskedoff, pointer, mask, vl, policy) 205 // Output: (data, vl) 206 // NOTE: We model this with default memory properties since we model writing 207 // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work. 208 class RISCVUSLoadFFMasked 209 : DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty], 210 [LLVMMatchType<0>, llvm_ptr_ty, 211 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 212 LLVMMatchType<1>, LLVMMatchType<1>], 213 [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic { 214 let VLOperand = 3; 215 } 216 // For strided load with passthru operand 217 // Input: (passthru, pointer, stride, vl) 218 class RISCVSLoad 219 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 220 [LLVMMatchType<0>, llvm_ptr_ty, 221 llvm_anyint_ty, LLVMMatchType<1>], 222 [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic { 223 let VLOperand = 3; 224 } 225 // For strided load with mask 226 // Input: (maskedoff, pointer, stride, mask, vl, policy) 227 class RISCVSLoadMasked 228 : DefaultAttrsIntrinsic<[llvm_anyvector_ty ], 229 [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty, 230 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>, 231 LLVMMatchType<1>], 232 [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>, 233 RISCVVIntrinsic { 234 let VLOperand = 4; 235 } 236 // For indexed load with passthru operand 237 // Input: (passthru, pointer, index, vl) 238 class RISCVILoad 239 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 240 [LLVMMatchType<0>, llvm_ptr_ty, 241 llvm_anyvector_ty, llvm_anyint_ty], 242 [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic { 243 let VLOperand = 3; 244 } 245 // For indexed load with mask 246 // Input: (maskedoff, pointer, index, mask, vl, policy) 247 class RISCVILoadMasked 248 : DefaultAttrsIntrinsic<[llvm_anyvector_ty ], 249 [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyvector_ty, 250 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 251 LLVMMatchType<2>], 252 [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>, 253 RISCVVIntrinsic { 254 let VLOperand = 4; 255 } 256 // For unit stride store 257 // Input: (vector_in, pointer, vl) 258 class RISCVUSStore 259 : DefaultAttrsIntrinsic<[], 260 [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyint_ty], 261 [NoCapture<ArgIndex<1>>, IntrWriteMem, IntrArgMemOnly]>, 262 RISCVVIntrinsic { 263 let VLOperand = 2; 264 } 265 // For unit stride store with mask 266 // Input: (vector_in, pointer, mask, vl) 267 class RISCVUSStoreMasked 268 : DefaultAttrsIntrinsic<[], 269 [llvm_anyvector_ty, llvm_ptr_ty, 270 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 271 llvm_anyint_ty], 272 [NoCapture<ArgIndex<1>>, IntrWriteMem, IntrArgMemOnly]>, 273 RISCVVIntrinsic { 274 let VLOperand = 3; 275 } 276 // For strided store 277 // Input: (vector_in, pointer, stride, vl) 278 class RISCVSStore 279 : DefaultAttrsIntrinsic<[], 280 [llvm_anyvector_ty, llvm_ptr_ty, 281 llvm_anyint_ty, LLVMMatchType<1>], 282 [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic { 283 let VLOperand = 3; 284 } 285 // For stride store with mask 286 // Input: (vector_in, pointer, stirde, mask, vl) 287 class RISCVSStoreMasked 288 : DefaultAttrsIntrinsic<[], 289 [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyint_ty, 290 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>], 291 [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic { 292 let VLOperand = 4; 293 } 294 // For indexed store 295 // Input: (vector_in, pointer, index, vl) 296 class RISCVIStore 297 : DefaultAttrsIntrinsic<[], 298 [llvm_anyvector_ty, llvm_ptr_ty, 299 llvm_anyint_ty, llvm_anyint_ty], 300 [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic { 301 let VLOperand = 3; 302 } 303 // For indexed store with mask 304 // Input: (vector_in, pointer, index, mask, vl) 305 class RISCVIStoreMasked 306 : DefaultAttrsIntrinsic<[], 307 [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyvector_ty, 308 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], 309 [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic { 310 let VLOperand = 4; 311 } 312 // For destination vector type is the same as source vector. 313 // Input: (passthru, vector_in, vl) 314 class RISCVUnaryAAUnMasked 315 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 316 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty], 317 [IntrNoMem]>, RISCVVIntrinsic { 318 let VLOperand = 2; 319 } 320 // For destination vector type is the same as the source vector type 321 // Input: (passthru, vector_in, vl, policy) 322 class RISCVUnaryAAUnMaskedZvk<bit IsVS> 323 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 324 [LLVMMatchType<0>, !if(IsVS, llvm_anyvector_ty, LLVMMatchType<0>), 325 llvm_anyint_ty, !if(IsVS, LLVMMatchType<2>, LLVMMatchType<1>)], 326 [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic { 327 let VLOperand = 2; 328 } 329 330 multiclass RISCVUnaryAAUnMaskedZvk<bit HasVV = 1, bit HasVS = 1> { 331 if HasVV then 332 def "int_riscv_" # NAME # "_vv" : RISCVUnaryAAUnMaskedZvk<IsVS=0>; 333 334 if HasVS then 335 def "int_riscv_" # NAME # "_vs" : RISCVUnaryAAUnMaskedZvk<IsVS=1>; 336 } 337 // For destination vector type is the same as first source vector (with mask). 338 // Input: (vector_in, vector_in, mask, vl, policy) 339 class RISCVUnaryAAMasked 340 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 341 [LLVMMatchType<0>, LLVMMatchType<0>, 342 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 343 LLVMMatchType<1>], 344 [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic { 345 let VLOperand = 3; 346 } 347 // For destination vector type is the same as source vector. 348 // Input: (passthru, vector_in, frm, vl) 349 class RISCVUnaryAAUnMaskedRoundingMode 350 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 351 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>], 352 [ImmArg<ArgIndex<2>>, IntrNoMem]>, RISCVVIntrinsic { 353 let VLOperand = 3; 354 } 355 // For destination vector type is the same as first source vector (with mask). 356 // Input: (vector_in, vector_in, mask, frm, vl, policy) 357 class RISCVUnaryAAMaskedRoundingMode 358 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 359 [LLVMMatchType<0>, LLVMMatchType<0>, 360 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 361 LLVMMatchType<1>, LLVMMatchType<1>], 362 [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 363 let VLOperand = 4; 364 } 365 // Input: (passthru, vector_in, vector_in, mask, vl) 366 class RISCVCompress 367 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 368 [LLVMMatchType<0>, LLVMMatchType<0>, 369 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], 370 [IntrNoMem]>, RISCVVIntrinsic { 371 let VLOperand = 3; 372 } 373 // For destination vector type is the same as first and second source vector. 374 // Input: (vector_in, vector_in, vl) 375 class RISCVBinaryAAAUnMasked 376 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 377 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty], 378 [IntrNoMem]>, RISCVVIntrinsic { 379 let VLOperand = 2; 380 } 381 // For destination vector type is the same as first and second source vector. 382 // Input: (passthru, vector_in, int_vector_in, vl) 383 class RISCVRGatherVVUnMasked 384 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 385 [LLVMMatchType<0>, LLVMMatchType<0>, 386 LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty], 387 [IntrNoMem]>, RISCVVIntrinsic { 388 let VLOperand = 3; 389 } 390 // For destination vector type is the same as first and second source vector. 391 // Input: (vector_in, vector_in, int_vector_in, vl, policy) 392 class RISCVRGatherVVMasked 393 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 394 [LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>, 395 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 396 LLVMMatchType<1>], 397 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 398 let VLOperand = 4; 399 } 400 // Input: (passthru, vector_in, int16_vector_in, vl) 401 class RISCVRGatherEI16VVUnMasked 402 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 403 [LLVMMatchType<0>, LLVMMatchType<0>, 404 LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>, 405 llvm_anyint_ty], 406 [IntrNoMem]>, RISCVVIntrinsic { 407 let VLOperand = 3; 408 } 409 // For destination vector type is the same as first and second source vector. 410 // Input: (vector_in, vector_in, int16_vector_in, vl, policy) 411 class RISCVRGatherEI16VVMasked 412 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 413 [LLVMMatchType<0>, LLVMMatchType<0>, 414 LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>, 415 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 416 LLVMMatchType<1>], 417 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 418 let VLOperand = 4; 419 } 420 // For destination vector type is the same as first source vector, and the 421 // second operand is XLen. 422 // Input: (passthru, vector_in, xlen_in, vl) 423 class RISCVGatherVXUnMasked 424 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 425 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, 426 LLVMMatchType<1>], 427 [IntrNoMem]>, RISCVVIntrinsic { 428 let VLOperand = 3; 429 } 430 // For destination vector type is the same as first source vector (with mask). 431 // Second operand is XLen. 432 // Input: (maskedoff, vector_in, xlen_in, mask, vl, policy) 433 class RISCVGatherVXMasked 434 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 435 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, 436 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>, 437 LLVMMatchType<1>], 438 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 439 let VLOperand = 4; 440 } 441 // For destination vector type is the same as first source vector. 442 // Input: (passthru, vector_in, vector_in/scalar_in, vl) 443 class RISCVBinaryAAXUnMasked<bit IsVI = 0> 444 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 445 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 446 llvm_anyint_ty], 447 !listconcat([IntrNoMem], 448 !if(IsVI, [ImmArg<ArgIndex<2>>], []))>, 449 RISCVVIntrinsic { 450 let ScalarOperand = 2; 451 let VLOperand = 3; 452 } 453 // For destination vector type is the same as the source vector type. 454 // Input: (passthru, vector_in, vector_in/scalar_in, vl, policy) 455 class RISCVBinaryAAXUnMaskedZvk<bit IsVI = 0> 456 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 457 [LLVMMatchType<0>, LLVMMatchType<0>, 458 llvm_any_ty, llvm_anyint_ty, LLVMMatchType<2>], 459 !listconcat([ImmArg<ArgIndex<4>>, IntrNoMem], 460 !if(IsVI, [ImmArg<ArgIndex<2>>], []))>, 461 RISCVVIntrinsic { 462 let ScalarOperand = 2; 463 let VLOperand = 3; 464 } 465 // For destination vector type is the same as first source vector (with mask). 466 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) 467 class RISCVBinaryAAXMasked 468 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 469 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 470 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 471 LLVMMatchType<2>], 472 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 473 let ScalarOperand = 2; 474 let VLOperand = 4; 475 } 476 // For destination vector type is the same as first source vector. 477 // Input: (passthru, vector_in, vector_in/scalar_in, frm, vl) 478 class RISCVBinaryAAXUnMaskedRoundingMode 479 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 480 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 481 llvm_anyint_ty, LLVMMatchType<2>], 482 [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic { 483 let ScalarOperand = 2; 484 let VLOperand = 4; 485 } 486 // For destination vector type is the same as first source vector (with mask). 487 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, frm, vl, policy) 488 class RISCVBinaryAAXMaskedRoundingMode 489 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 490 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 491 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 492 LLVMMatchType<2>, LLVMMatchType<2>], 493 [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic { 494 let ScalarOperand = 2; 495 let VLOperand = 5; 496 } 497 // For destination vector type is the same as first source vector. The 498 // second source operand must match the destination type or be an XLen scalar. 499 // Input: (passthru, vector_in, vector_in/scalar_in, vl) 500 class RISCVBinaryAAShiftUnMasked 501 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 502 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 503 llvm_anyint_ty], 504 [IntrNoMem]>, RISCVVIntrinsic { 505 let VLOperand = 3; 506 } 507 // For destination vector type is the same as first source vector (with mask). 508 // The second source operand must match the destination type or be an XLen scalar. 509 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) 510 class RISCVBinaryAAShiftMasked 511 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 512 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 513 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 514 LLVMMatchType<2>], 515 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 516 let VLOperand = 4; 517 } 518 // For destination vector type is NOT the same as first source vector. 519 // Input: (passthru, vector_in, vector_in/scalar_in, vl) 520 class RISCVBinaryABXUnMasked 521 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 522 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, 523 llvm_anyint_ty], 524 [IntrNoMem]>, RISCVVIntrinsic { 525 let ScalarOperand = 2; 526 let VLOperand = 3; 527 } 528 // For destination vector type is NOT the same as first source vector (with mask). 529 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) 530 class RISCVBinaryABXMasked 531 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 532 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, 533 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 534 LLVMMatchType<3>], 535 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 536 let ScalarOperand = 2; 537 let VLOperand = 4; 538 } 539 // For destination vector type is NOT the same as first source vector. 540 // Input: (passthru, vector_in, vector_in/scalar_in, frm, vl) 541 class RISCVBinaryABXUnMaskedRoundingMode 542 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 543 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, 544 llvm_anyint_ty, LLVMMatchType<3>], 545 [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic { 546 let ScalarOperand = 2; 547 let VLOperand = 4; 548 } 549 // For destination vector type is NOT the same as first source vector (with mask). 550 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, frm, vl, policy) 551 class RISCVBinaryABXMaskedRoundingMode 552 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 553 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, 554 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 555 LLVMMatchType<3>, LLVMMatchType<3>], 556 [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic { 557 let ScalarOperand = 2; 558 let VLOperand = 5; 559 } 560 // For destination vector type is NOT the same as first source vector. The 561 // second source operand must match the destination type or be an XLen scalar. 562 // Input: (passthru, vector_in, vector_in/scalar_in, vl) 563 class RISCVBinaryABShiftUnMasked 564 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 565 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, 566 llvm_anyint_ty], 567 [IntrNoMem]>, RISCVVIntrinsic { 568 let VLOperand = 3; 569 } 570 // For destination vector type is NOT the same as first source vector (with mask). 571 // The second source operand must match the destination type or be an XLen scalar. 572 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) 573 class RISCVBinaryABShiftMasked 574 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 575 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, 576 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 577 LLVMMatchType<3>], 578 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 579 let VLOperand = 4; 580 } 581 // For binary operations with V0 as input. 582 // Input: (passthru, vector_in, vector_in/scalar_in, V0, vl) 583 class RISCVBinaryWithV0 584 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 585 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 586 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 587 llvm_anyint_ty], 588 [IntrNoMem]>, RISCVVIntrinsic { 589 let ScalarOperand = 2; 590 let VLOperand = 4; 591 } 592 // For binary operations with mask type output and V0 as input. 593 // Output: (mask type output) 594 // Input: (vector_in, vector_in/scalar_in, V0, vl) 595 class RISCVBinaryMOutWithV0 596 :DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], 597 [llvm_anyvector_ty, llvm_any_ty, 598 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 599 llvm_anyint_ty], 600 [IntrNoMem]>, RISCVVIntrinsic { 601 let ScalarOperand = 1; 602 let VLOperand = 3; 603 } 604 // For binary operations with mask type output. 605 // Output: (mask type output) 606 // Input: (vector_in, vector_in/scalar_in, vl) 607 class RISCVBinaryMOut 608 : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], 609 [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], 610 [IntrNoMem]>, RISCVVIntrinsic { 611 let ScalarOperand = 1; 612 let VLOperand = 2; 613 } 614 // For binary operations with mask type output without mask. 615 // Output: (mask type output) 616 // Input: (vector_in, vector_in/scalar_in, vl) 617 class RISCVCompareUnMasked 618 : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], 619 [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], 620 [IntrNoMem]>, RISCVVIntrinsic { 621 let ScalarOperand = 1; 622 let VLOperand = 2; 623 } 624 // For binary operations with mask type output with mask. 625 // Output: (mask type output) 626 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl) 627 class RISCVCompareMasked 628 : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], 629 [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 630 llvm_anyvector_ty, llvm_any_ty, 631 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], 632 [IntrNoMem]>, RISCVVIntrinsic { 633 let ScalarOperand = 2; 634 let VLOperand = 4; 635 } 636 // For FP classify operations. 637 // Output: (bit mask type output) 638 // Input: (passthru, vector_in, vl) 639 class RISCVClassifyUnMasked 640 : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>], 641 [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty, 642 llvm_anyint_ty], 643 [IntrNoMem]>, RISCVVIntrinsic { 644 let VLOperand = 1; 645 } 646 // For FP classify operations with mask. 647 // Output: (bit mask type output) 648 // Input: (maskedoff, vector_in, mask, vl, policy) 649 class RISCVClassifyMasked 650 : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>], 651 [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty, 652 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 653 llvm_anyint_ty, LLVMMatchType<1>], 654 [IntrNoMem, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic { 655 let VLOperand = 3; 656 } 657 // For Saturating binary operations. 658 // The destination vector type is the same as first source vector. 659 // Input: (passthru, vector_in, vector_in/scalar_in, vl) 660 class RISCVSaturatingBinaryAAXUnMasked 661 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 662 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 663 llvm_anyint_ty], 664 [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { 665 let ScalarOperand = 2; 666 let VLOperand = 3; 667 } 668 // For Saturating binary operations with rounding-mode operand 669 // The destination vector type is the same as first source vector. 670 // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl) 671 class RISCVSaturatingBinaryAAXUnMaskedRoundingMode 672 : Intrinsic<[llvm_anyvector_ty], 673 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 674 llvm_anyint_ty, LLVMMatchType<2>], 675 [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic { 676 let ScalarOperand = 2; 677 let VLOperand = 4; 678 } 679 // For Saturating binary operations with mask. 680 // The destination vector type is the same as first source vector. 681 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) 682 class RISCVSaturatingBinaryAAXMasked 683 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 684 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 685 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 686 LLVMMatchType<2>], 687 [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { 688 let ScalarOperand = 2; 689 let VLOperand = 4; 690 } 691 // For Saturating binary operations with mask and rounding-mode operand 692 // The destination vector type is the same as first source vector. 693 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy) 694 class RISCVSaturatingBinaryAAXMaskedRoundingMode 695 : Intrinsic<[llvm_anyvector_ty], 696 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 697 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 698 LLVMMatchType<2>, LLVMMatchType<2>], 699 [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic { 700 let ScalarOperand = 2; 701 let VLOperand = 5; 702 } 703 // For Saturating binary operations. 704 // The destination vector type is the same as first source vector. 705 // The second source operand matches the destination type or is an XLen scalar. 706 // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl) 707 class RISCVSaturatingBinaryAAShiftUnMaskedRoundingMode 708 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 709 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 710 llvm_anyint_ty, LLVMMatchType<2>], 711 [ImmArg<ArgIndex<3>>, IntrNoMem, IntrHasSideEffects]>, 712 RISCVVIntrinsic { 713 let VLOperand = 4; 714 } 715 // For Saturating binary operations with mask. 716 // The destination vector type is the same as first source vector. 717 // The second source operand matches the destination type or is an XLen scalar. 718 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy) 719 class RISCVSaturatingBinaryAAShiftMaskedRoundingMode 720 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 721 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, 722 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 723 LLVMMatchType<2>, LLVMMatchType<2>], 724 [ImmArg<ArgIndex<4>>,ImmArg<ArgIndex<6>>, IntrNoMem, IntrHasSideEffects]>, 725 RISCVVIntrinsic { 726 let VLOperand = 5; 727 } 728 // For Saturating binary operations. 729 // The destination vector type is NOT the same as first source vector. 730 // The second source operand matches the destination type or is an XLen scalar. 731 // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl) 732 class RISCVSaturatingBinaryABShiftUnMaskedRoundingMode 733 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 734 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, 735 llvm_anyint_ty, LLVMMatchType<3>], 736 [ImmArg<ArgIndex<3>>, IntrNoMem, IntrHasSideEffects]>, 737 RISCVVIntrinsic { 738 let VLOperand = 4; 739 } 740 // For Saturating binary operations with mask. 741 // The destination vector type is NOT the same as first source vector (with mask). 742 // The second source operand matches the destination type or is an XLen scalar. 743 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy) 744 class RISCVSaturatingBinaryABShiftMaskedRoundingMode 745 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 746 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, 747 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 748 LLVMMatchType<3>, LLVMMatchType<3>], 749 [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem, 750 IntrHasSideEffects]>, RISCVVIntrinsic { 751 let VLOperand = 5; 752 } 753 // Input: (vector_in, vector_in, scalar_in, vl, policy) 754 class RVVSlideUnMasked 755 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 756 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, 757 LLVMMatchType<1>, LLVMMatchType<1>], 758 [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic { 759 let VLOperand = 3; 760 } 761 // Input: (vector_in, vector_in, vector_in/scalar_in, mask, vl, policy) 762 class RVVSlideMasked 763 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 764 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, 765 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 766 LLVMMatchType<1>, LLVMMatchType<1>], 767 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 768 let VLOperand = 4; 769 } 770 // UnMasked Vector Multiply-Add operations, its first operand can not be undef. 771 // Input: (vector_in, vector_in/scalar, vector_in, vl, policy) 772 class RISCVTernaryAAXAUnMasked 773 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 774 [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>, 775 llvm_anyint_ty, LLVMMatchType<2>], 776 [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic { 777 let ScalarOperand = 1; 778 let VLOperand = 3; 779 } 780 // Masked Vector Multiply-Add operations, its first operand can not be undef. 781 // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy 782 class RISCVTernaryAAXAMasked 783 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 784 [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>, 785 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 786 llvm_anyint_ty, LLVMMatchType<2>], 787 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 788 let ScalarOperand = 1; 789 let VLOperand = 4; 790 } 791 // UnMasked Vector Multiply-Add operations, its first operand can not be undef. 792 // Input: (vector_in, vector_in/scalar, vector_in, frm, vl, policy) 793 class RISCVTernaryAAXAUnMaskedRoundingMode 794 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 795 [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>, 796 llvm_anyint_ty, LLVMMatchType<2>, LLVMMatchType<2>], 797 [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>, 798 RISCVVIntrinsic { 799 let ScalarOperand = 1; 800 let VLOperand = 4; 801 } 802 // Masked Vector Multiply-Add operations, its first operand can not be undef. 803 // Input: (vector_in, vector_in/scalar, vector_in, mask, frm, vl, policy 804 class RISCVTernaryAAXAMaskedRoundingMode 805 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 806 [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>, 807 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 808 llvm_anyint_ty, LLVMMatchType<2>, LLVMMatchType<2>], 809 [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, 810 RISCVVIntrinsic { 811 let ScalarOperand = 1; 812 let VLOperand = 5; 813 } 814 // UnMasked Widening Vector Multiply-Add operations, its first operand can not be undef. 815 // Input: (vector_in, vector_in/scalar, vector_in, vl, policy) 816 class RISCVTernaryWideUnMasked 817 : DefaultAttrsIntrinsic< [llvm_anyvector_ty], 818 [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty, 819 llvm_anyint_ty, LLVMMatchType<3>], 820 [ImmArg<ArgIndex<4>>, IntrNoMem] >, RISCVVIntrinsic { 821 let ScalarOperand = 1; 822 let VLOperand = 3; 823 } 824 // Masked Widening Vector Multiply-Add operations, its first operand can not be undef. 825 // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy 826 class RISCVTernaryWideMasked 827 : DefaultAttrsIntrinsic< [llvm_anyvector_ty], 828 [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty, 829 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 830 llvm_anyint_ty, LLVMMatchType<3>], 831 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 832 let ScalarOperand = 1; 833 let VLOperand = 4; 834 } 835 // UnMasked Widening Vector Multiply-Add operations, its first operand can not be undef. 836 // Input: (vector_in, vector_in/scalar, vector_in, frm, vl, policy) 837 class RISCVTernaryWideUnMaskedRoundingMode 838 : DefaultAttrsIntrinsic< [llvm_anyvector_ty], 839 [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty, 840 llvm_anyint_ty, LLVMMatchType<3>, LLVMMatchType<3>], 841 [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem] >, 842 RISCVVIntrinsic { 843 let ScalarOperand = 1; 844 let VLOperand = 4; 845 } 846 // Masked Widening Vector Multiply-Add operations, its first operand can not be undef. 847 // Input: (vector_in, vector_in/scalar, vector_in, mask, frm, vl, policy 848 class RISCVTernaryWideMaskedRoundingMode 849 : DefaultAttrsIntrinsic< [llvm_anyvector_ty], 850 [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty, 851 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 852 llvm_anyint_ty, LLVMMatchType<3>, LLVMMatchType<3>], 853 [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, 854 RISCVVIntrinsic { 855 let ScalarOperand = 1; 856 let VLOperand = 5; 857 } 858 // For Reduction ternary operations. 859 // For destination vector type is the same as first and third source vector. 860 // Input: (vector_in, vector_in, vector_in, vl) 861 class RISCVReductionUnMasked 862 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 863 [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>, 864 llvm_anyint_ty], 865 [IntrNoMem]>, RISCVVIntrinsic { 866 let VLOperand = 3; 867 } 868 // For Reduction ternary operations with mask. 869 // For destination vector type is the same as first and third source vector. 870 // The mask type come from second source vector. 871 // Input: (maskedoff, vector_in, vector_in, vector_in, mask, vl) 872 class RISCVReductionMasked 873 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 874 [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>, 875 LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty], 876 [IntrNoMem]>, RISCVVIntrinsic { 877 let VLOperand = 4; 878 } 879 // For Reduction ternary operations. 880 // For destination vector type is the same as first and third source vector. 881 // Input: (vector_in, vector_in, vector_in, frm, vl) 882 class RISCVReductionUnMaskedRoundingMode 883 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 884 [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>, 885 llvm_anyint_ty, LLVMMatchType<2>], 886 [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic { 887 let VLOperand = 4; 888 } 889 // For Reduction ternary operations with mask. 890 // For destination vector type is the same as first and third source vector. 891 // The mask type come from second source vector. 892 // Input: (vector_in, vector_in, vector_in, mask, frm, vl) 893 class RISCVReductionMaskedRoundingMode 894 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 895 [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>, 896 LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty, 897 LLVMMatchType<2>], 898 [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic { 899 let VLOperand = 5; 900 } 901 // For unary operations with scalar type output without mask 902 // Output: (scalar type) 903 // Input: (vector_in, vl) 904 class RISCVMaskedUnarySOutUnMasked 905 : DefaultAttrsIntrinsic<[LLVMMatchType<1>], 906 [llvm_anyvector_ty, llvm_anyint_ty], 907 [IntrNoMem]>, RISCVVIntrinsic { 908 let VLOperand = 1; 909 } 910 // For unary operations with scalar type output with mask 911 // Output: (scalar type) 912 // Input: (vector_in, mask, vl) 913 class RISCVMaskedUnarySOutMasked 914 : DefaultAttrsIntrinsic<[LLVMMatchType<1>], 915 [llvm_anyvector_ty, LLVMMatchType<0>, llvm_anyint_ty], 916 [IntrNoMem]>, RISCVVIntrinsic { 917 let VLOperand = 2; 918 } 919 // For destination vector type is NOT the same as source vector. 920 // Input: (passthru, vector_in, vl) 921 class RISCVUnaryABUnMasked 922 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 923 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty], 924 [IntrNoMem]>, RISCVVIntrinsic { 925 let VLOperand = 2; 926 } 927 // For destination vector type is NOT the same as source vector (with mask). 928 // Input: (maskedoff, vector_in, mask, vl, policy) 929 class RISCVUnaryABMasked 930 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 931 [LLVMMatchType<0>, llvm_anyvector_ty, 932 LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, 933 llvm_anyint_ty, LLVMMatchType<2>], 934 [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic { 935 let VLOperand = 3; 936 } 937 // For unary operations with the same vector type in/out without mask 938 // Output: (vector) 939 // Input: (vector_in, vl) 940 class RISCVUnaryUnMasked 941 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 942 [LLVMMatchType<0>, llvm_anyint_ty], 943 [IntrNoMem]>, RISCVVIntrinsic { 944 let VLOperand = 1; 945 } 946 // For mask unary operations with mask type in/out with mask 947 // Output: (mask type output) 948 // Input: (mask type maskedoff, mask type vector_in, mask, vl) 949 class RISCVMaskedUnaryMOutMasked 950 : DefaultAttrsIntrinsic<[llvm_anyint_ty], 951 [LLVMMatchType<0>, LLVMMatchType<0>, 952 LLVMMatchType<0>, llvm_anyint_ty], 953 [IntrNoMem]>, RISCVVIntrinsic { 954 let VLOperand = 3; 955 } 956 // Output: (vector) 957 // Input: (vl) 958 class RISCVNullaryIntrinsic 959 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 960 [llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { 961 let VLOperand = 1; 962 } 963 // Output: (vector) 964 // Input: (passthru, vl) 965 class RISCVID 966 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 967 [LLVMMatchType<0>, llvm_anyint_ty], 968 [IntrNoMem]>, RISCVVIntrinsic { 969 let VLOperand = 1; 970 } 971 // For Conversion unary operations. 972 // Input: (passthru, vector_in, vl) 973 class RISCVConversionUnMasked 974 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 975 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty], 976 [IntrNoMem]>, RISCVVIntrinsic { 977 let VLOperand = 2; 978 } 979 // For Conversion unary operations with mask. 980 // Input: (maskedoff, vector_in, mask, vl, policy) 981 class RISCVConversionMasked 982 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 983 [LLVMMatchType<0>, llvm_anyvector_ty, 984 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 985 LLVMMatchType<2>], 986 [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic { 987 let VLOperand = 3; 988 } 989 // For Conversion unary operations. 990 // Input: (passthru, vector_in, frm, vl) 991 class RISCVConversionUnMaskedRoundingMode 992 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 993 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty, 994 LLVMMatchType<2>], 995 [ImmArg<ArgIndex<2>>, IntrNoMem]>, RISCVVIntrinsic { 996 let VLOperand = 3; 997 } 998 // For Conversion unary operations with mask. 999 // Input: (maskedoff, vector_in, mask, frm, vl, policy) 1000 class RISCVConversionMaskedRoundingMode 1001 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 1002 [LLVMMatchType<0>, llvm_anyvector_ty, 1003 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, 1004 LLVMMatchType<2>, LLVMMatchType<2>], 1005 [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic { 1006 let VLOperand = 4; 1007 } 1008 1009 // For unit stride segment load 1010 // Input: (passthru, pointer, vl) 1011 class RISCVUSSegLoad<int nf> 1012 : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>, 1013 !add(nf, -1))), 1014 !listconcat(!listsplat(LLVMMatchType<0>, nf), 1015 [llvm_ptr_ty, llvm_anyint_ty]), 1016 [NoCapture<ArgIndex<nf>>, IntrReadMem, IntrArgMemOnly]>, 1017 RISCVVIntrinsic { 1018 let VLOperand = !add(nf, 1); 1019 } 1020 // For unit stride segment load with mask 1021 // Input: (maskedoff, pointer, mask, vl, policy) 1022 class RISCVUSSegLoadMasked<int nf> 1023 : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>, 1024 !add(nf, -1))), 1025 !listconcat(!listsplat(LLVMMatchType<0>, nf), 1026 [llvm_ptr_ty, 1027 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 1028 llvm_anyint_ty, LLVMMatchType<1>]), 1029 [ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>, 1030 IntrReadMem, IntrArgMemOnly]>, 1031 RISCVVIntrinsic { 1032 let VLOperand = !add(nf, 2); 1033 } 1034 1035 // For unit stride fault-only-first segment load 1036 // Input: (passthru, pointer, vl) 1037 // Output: (data, vl) 1038 // NOTE: We model this with default memory properties since we model writing 1039 // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work. 1040 class RISCVUSSegLoadFF<int nf> 1041 : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>, 1042 !add(nf, -1)), [llvm_anyint_ty]), 1043 !listconcat(!listsplat(LLVMMatchType<0>, nf), 1044 [llvm_ptr_ty, LLVMMatchType<1>]), 1045 [NoCapture<ArgIndex<nf>>]>, RISCVVIntrinsic { 1046 let VLOperand = !add(nf, 1); 1047 } 1048 // For unit stride fault-only-first segment load with mask 1049 // Input: (maskedoff, pointer, mask, vl, policy) 1050 // Output: (data, vl) 1051 // NOTE: We model this with default memory properties since we model writing 1052 // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work. 1053 class RISCVUSSegLoadFFMasked<int nf> 1054 : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>, 1055 !add(nf, -1)), [llvm_anyint_ty]), 1056 !listconcat(!listsplat(LLVMMatchType<0>, nf), 1057 [llvm_ptr_ty, 1058 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 1059 LLVMMatchType<1>, LLVMMatchType<1>]), 1060 [ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>]>, 1061 RISCVVIntrinsic { 1062 let VLOperand = !add(nf, 2); 1063 } 1064 1065 // For stride segment load 1066 // Input: (passthru, pointer, offset, vl) 1067 class RISCVSSegLoad<int nf> 1068 : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>, 1069 !add(nf, -1))), 1070 !listconcat(!listsplat(LLVMMatchType<0>, nf), 1071 [llvm_ptr_ty, llvm_anyint_ty, LLVMMatchType<1>]), 1072 [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic { 1073 let VLOperand = !add(nf, 2); 1074 } 1075 // For stride segment load with mask 1076 // Input: (maskedoff, pointer, offset, mask, vl, policy) 1077 class RISCVSSegLoadMasked<int nf> 1078 : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>, 1079 !add(nf, -1))), 1080 !listconcat(!listsplat(LLVMMatchType<0>, nf), 1081 [llvm_ptr_ty, 1082 llvm_anyint_ty, 1083 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 1084 LLVMMatchType<1>, LLVMMatchType<1>]), 1085 [ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>, 1086 RISCVVIntrinsic { 1087 let VLOperand = !add(nf, 3); 1088 } 1089 1090 // For indexed segment load 1091 // Input: (passthru, pointer, index, vl) 1092 class RISCVISegLoad<int nf> 1093 : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>, 1094 !add(nf, -1))), 1095 !listconcat(!listsplat(LLVMMatchType<0>, nf), 1096 [llvm_ptr_ty, llvm_anyvector_ty, llvm_anyint_ty]), 1097 [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic { 1098 let VLOperand = !add(nf, 2); 1099 } 1100 // For indexed segment load with mask 1101 // Input: (maskedoff, pointer, index, mask, vl, policy) 1102 class RISCVISegLoadMasked<int nf> 1103 : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>, 1104 !add(nf, -1))), 1105 !listconcat(!listsplat(LLVMMatchType<0>, nf), 1106 [llvm_ptr_ty, 1107 llvm_anyvector_ty, 1108 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 1109 llvm_anyint_ty, LLVMMatchType<2>]), 1110 [ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>, 1111 RISCVVIntrinsic { 1112 let VLOperand = !add(nf, 3); 1113 } 1114 1115 // For unit stride segment store 1116 // Input: (value, pointer, vl) 1117 class RISCVUSSegStore<int nf> 1118 : DefaultAttrsIntrinsic<[], 1119 !listconcat([llvm_anyvector_ty], 1120 !listsplat(LLVMMatchType<0>, !add(nf, -1)), 1121 [llvm_ptr_ty, llvm_anyint_ty]), 1122 [NoCapture<ArgIndex<nf>>, IntrWriteMem, IntrArgMemOnly]>, 1123 RISCVVIntrinsic { 1124 let VLOperand = !add(nf, 1); 1125 } 1126 // For unit stride segment store with mask 1127 // Input: (value, pointer, mask, vl) 1128 class RISCVUSSegStoreMasked<int nf> 1129 : DefaultAttrsIntrinsic<[], 1130 !listconcat([llvm_anyvector_ty], 1131 !listsplat(LLVMMatchType<0>, !add(nf, -1)), 1132 [llvm_ptr_ty, 1133 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 1134 llvm_anyint_ty]), 1135 [NoCapture<ArgIndex<nf>>, IntrWriteMem, IntrArgMemOnly]>, 1136 RISCVVIntrinsic { 1137 let VLOperand = !add(nf, 2); 1138 } 1139 1140 // For stride segment store 1141 // Input: (value, pointer, offset, vl) 1142 class RISCVSSegStore<int nf> 1143 : DefaultAttrsIntrinsic<[], 1144 !listconcat([llvm_anyvector_ty], 1145 !listsplat(LLVMMatchType<0>, !add(nf, -1)), 1146 [llvm_ptr_ty, llvm_anyint_ty, 1147 LLVMMatchType<1>]), 1148 [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic { 1149 let VLOperand = !add(nf, 2); 1150 } 1151 // For stride segment store with mask 1152 // Input: (value, pointer, offset, mask, vl) 1153 class RISCVSSegStoreMasked<int nf> 1154 : DefaultAttrsIntrinsic<[], 1155 !listconcat([llvm_anyvector_ty], 1156 !listsplat(LLVMMatchType<0>, !add(nf, -1)), 1157 [llvm_ptr_ty, llvm_anyint_ty, 1158 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 1159 LLVMMatchType<1>]), 1160 [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic { 1161 let VLOperand = !add(nf, 3); 1162 } 1163 1164 // For indexed segment store 1165 // Input: (value, pointer, offset, vl) 1166 class RISCVISegStore<int nf> 1167 : DefaultAttrsIntrinsic<[], 1168 !listconcat([llvm_anyvector_ty], 1169 !listsplat(LLVMMatchType<0>, !add(nf, -1)), 1170 [llvm_ptr_ty, llvm_anyvector_ty, 1171 llvm_anyint_ty]), 1172 [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic { 1173 let VLOperand = !add(nf, 2); 1174 } 1175 // For indexed segment store with mask 1176 // Input: (value, pointer, offset, mask, vl) 1177 class RISCVISegStoreMasked<int nf> 1178 : DefaultAttrsIntrinsic<[], 1179 !listconcat([llvm_anyvector_ty], 1180 !listsplat(LLVMMatchType<0>, !add(nf, -1)), 1181 [llvm_ptr_ty, llvm_anyvector_ty, 1182 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 1183 llvm_anyint_ty]), 1184 [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic { 1185 let VLOperand = !add(nf, 3); 1186 } 1187 1188 multiclass RISCVUSLoad { 1189 def "int_riscv_" # NAME : RISCVUSLoad; 1190 def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMasked; 1191 } 1192 multiclass RISCVUSLoadFF { 1193 def "int_riscv_" # NAME : RISCVUSLoadFF; 1194 def "int_riscv_" # NAME # "_mask" : RISCVUSLoadFFMasked; 1195 } 1196 multiclass RISCVSLoad { 1197 def "int_riscv_" # NAME : RISCVSLoad; 1198 def "int_riscv_" # NAME # "_mask" : RISCVSLoadMasked; 1199 } 1200 multiclass RISCVILoad { 1201 def "int_riscv_" # NAME : RISCVILoad; 1202 def "int_riscv_" # NAME # "_mask" : RISCVILoadMasked; 1203 } 1204 multiclass RISCVUSStore { 1205 def "int_riscv_" # NAME : RISCVUSStore; 1206 def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMasked; 1207 } 1208 multiclass RISCVSStore { 1209 def "int_riscv_" # NAME : RISCVSStore; 1210 def "int_riscv_" # NAME # "_mask" : RISCVSStoreMasked; 1211 } 1212 1213 multiclass RISCVIStore { 1214 def "int_riscv_" # NAME : RISCVIStore; 1215 def "int_riscv_" # NAME # "_mask" : RISCVIStoreMasked; 1216 } 1217 multiclass RISCVUnaryAA { 1218 def "int_riscv_" # NAME : RISCVUnaryAAUnMasked; 1219 def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMasked; 1220 } 1221 multiclass RISCVUnaryAARoundingMode { 1222 def "int_riscv_" # NAME : RISCVUnaryAAUnMaskedRoundingMode; 1223 def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMaskedRoundingMode; 1224 } 1225 multiclass RISCVUnaryAB { 1226 def "int_riscv_" # NAME : RISCVUnaryABUnMasked; 1227 def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMasked; 1228 } 1229 // AAX means the destination type(A) is the same as the first source 1230 // type(A). X means any type for the second source operand. 1231 multiclass RISCVBinaryAAX { 1232 def "int_riscv_" # NAME : RISCVBinaryAAXUnMasked; 1233 def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMasked; 1234 } 1235 multiclass RISCVBinaryAAXRoundingMode { 1236 def "int_riscv_" # NAME : RISCVBinaryAAXUnMaskedRoundingMode; 1237 def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMaskedRoundingMode; 1238 } 1239 // Like RISCVBinaryAAX, but the second operand is used a shift amount so it 1240 // must be a vector or an XLen scalar. 1241 multiclass RISCVBinaryAAShift { 1242 def "int_riscv_" # NAME : RISCVBinaryAAShiftUnMasked; 1243 def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAShiftMasked; 1244 } 1245 multiclass RISCVRGatherVV { 1246 def "int_riscv_" # NAME : RISCVRGatherVVUnMasked; 1247 def "int_riscv_" # NAME # "_mask" : RISCVRGatherVVMasked; 1248 } 1249 multiclass RISCVRGatherVX { 1250 def "int_riscv_" # NAME : RISCVGatherVXUnMasked; 1251 def "int_riscv_" # NAME # "_mask" : RISCVGatherVXMasked; 1252 } 1253 multiclass RISCVRGatherEI16VV { 1254 def "int_riscv_" # NAME : RISCVRGatherEI16VVUnMasked; 1255 def "int_riscv_" # NAME # "_mask" : RISCVRGatherEI16VVMasked; 1256 } 1257 // ABX means the destination type(A) is different from the first source 1258 // type(B). X means any type for the second source operand. 1259 multiclass RISCVBinaryABX { 1260 def "int_riscv_" # NAME : RISCVBinaryABXUnMasked; 1261 def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMasked; 1262 } 1263 multiclass RISCVBinaryABXRoundingMode { 1264 def "int_riscv_" # NAME : RISCVBinaryABXUnMaskedRoundingMode; 1265 def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMaskedRoundingMode; 1266 } 1267 // Like RISCVBinaryABX, but the second operand is used a shift amount so it 1268 // must be a vector or an XLen scalar. 1269 multiclass RISCVBinaryABShift { 1270 def "int_riscv_" # NAME : RISCVBinaryABShiftUnMasked; 1271 def "int_riscv_" # NAME # "_mask" : RISCVBinaryABShiftMasked; 1272 } 1273 multiclass RISCVBinaryWithV0 { 1274 def "int_riscv_" # NAME : RISCVBinaryWithV0; 1275 } 1276 multiclass RISCVBinaryMaskOutWithV0 { 1277 def "int_riscv_" # NAME : RISCVBinaryMOutWithV0; 1278 } 1279 multiclass RISCVBinaryMaskOut { 1280 def "int_riscv_" # NAME : RISCVBinaryMOut; 1281 } 1282 multiclass RISCVSaturatingBinaryAAX { 1283 def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMasked; 1284 def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMasked; 1285 } 1286 multiclass RISCVSaturatingBinaryAAXRoundingMode { 1287 def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMaskedRoundingMode; 1288 def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMaskedRoundingMode; 1289 } 1290 multiclass RISCVSaturatingBinaryAAShiftRoundingMode { 1291 def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftUnMaskedRoundingMode; 1292 def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMaskedRoundingMode; 1293 } 1294 multiclass RISCVSaturatingBinaryABShiftRoundingMode { 1295 def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftUnMaskedRoundingMode; 1296 def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMaskedRoundingMode; 1297 } 1298 multiclass RVVSlide { 1299 def "int_riscv_" # NAME : RVVSlideUnMasked; 1300 def "int_riscv_" # NAME # "_mask" : RVVSlideMasked; 1301 } 1302 multiclass RISCVTernaryAAXA { 1303 def "int_riscv_" # NAME : RISCVTernaryAAXAUnMasked; 1304 def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMasked; 1305 } 1306 multiclass RISCVTernaryAAXARoundingMode { 1307 def "int_riscv_" # NAME : RISCVTernaryAAXAUnMaskedRoundingMode; 1308 def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMaskedRoundingMode; 1309 } 1310 multiclass RISCVCompare { 1311 def "int_riscv_" # NAME : RISCVCompareUnMasked; 1312 def "int_riscv_" # NAME # "_mask" : RISCVCompareMasked; 1313 } 1314 multiclass RISCVClassify { 1315 def "int_riscv_" # NAME : RISCVClassifyUnMasked; 1316 def "int_riscv_" # NAME # "_mask" : RISCVClassifyMasked; 1317 } 1318 multiclass RISCVTernaryWide { 1319 def "int_riscv_" # NAME : RISCVTernaryWideUnMasked; 1320 def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMasked; 1321 } 1322 multiclass RISCVTernaryWideRoundingMode { 1323 def "int_riscv_" # NAME : RISCVTernaryWideUnMaskedRoundingMode; 1324 def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMaskedRoundingMode; 1325 } 1326 multiclass RISCVReduction { 1327 def "int_riscv_" # NAME : RISCVReductionUnMasked; 1328 def "int_riscv_" # NAME # "_mask" : RISCVReductionMasked; 1329 } 1330 multiclass RISCVReductionRoundingMode { 1331 def "int_riscv_" # NAME : RISCVReductionUnMaskedRoundingMode; 1332 def "int_riscv_" # NAME # "_mask" : RISCVReductionMaskedRoundingMode; 1333 } 1334 multiclass RISCVMaskedUnarySOut { 1335 def "int_riscv_" # NAME : RISCVMaskedUnarySOutUnMasked; 1336 def "int_riscv_" # NAME # "_mask" : RISCVMaskedUnarySOutMasked; 1337 } 1338 multiclass RISCVMaskedUnaryMOut { 1339 def "int_riscv_" # NAME : RISCVUnaryUnMasked; 1340 def "int_riscv_" # NAME # "_mask" : RISCVMaskedUnaryMOutMasked; 1341 } 1342 multiclass RISCVConversion { 1343 def "int_riscv_" #NAME :RISCVConversionUnMasked; 1344 def "int_riscv_" # NAME # "_mask" : RISCVConversionMasked; 1345 } 1346 multiclass RISCVConversionRoundingMode { 1347 def "int_riscv_" #NAME :RISCVConversionUnMaskedRoundingMode; 1348 def "int_riscv_" # NAME # "_mask" : RISCVConversionMaskedRoundingMode; 1349 } 1350 multiclass RISCVUSSegLoad<int nf> { 1351 def "int_riscv_" # NAME : RISCVUSSegLoad<nf>; 1352 def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMasked<nf>; 1353 } 1354 multiclass RISCVUSSegLoadFF<int nf> { 1355 def "int_riscv_" # NAME : RISCVUSSegLoadFF<nf>; 1356 def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadFFMasked<nf>; 1357 } 1358 multiclass RISCVSSegLoad<int nf> { 1359 def "int_riscv_" # NAME : RISCVSSegLoad<nf>; 1360 def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMasked<nf>; 1361 } 1362 multiclass RISCVISegLoad<int nf> { 1363 def "int_riscv_" # NAME : RISCVISegLoad<nf>; 1364 def "int_riscv_" # NAME # "_mask" : RISCVISegLoadMasked<nf>; 1365 } 1366 multiclass RISCVUSSegStore<int nf> { 1367 def "int_riscv_" # NAME : RISCVUSSegStore<nf>; 1368 def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMasked<nf>; 1369 } 1370 multiclass RISCVSSegStore<int nf> { 1371 def "int_riscv_" # NAME : RISCVSSegStore<nf>; 1372 def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMasked<nf>; 1373 } 1374 multiclass RISCVISegStore<int nf> { 1375 def "int_riscv_" # NAME : RISCVISegStore<nf>; 1376 def "int_riscv_" # NAME # "_mask" : RISCVISegStoreMasked<nf>; 1377 } 1378 1379 defm vle : RISCVUSLoad; 1380 defm vleff : RISCVUSLoadFF; 1381 defm vse : RISCVUSStore; 1382 defm vlse: RISCVSLoad; 1383 defm vsse: RISCVSStore; 1384 defm vluxei : RISCVILoad; 1385 defm vloxei : RISCVILoad; 1386 defm vsoxei : RISCVIStore; 1387 defm vsuxei : RISCVIStore; 1388 1389 def int_riscv_vlm : RISCVUSMLoad; 1390 def int_riscv_vsm : RISCVUSStore; 1391 1392 defm vadd : RISCVBinaryAAX; 1393 defm vsub : RISCVBinaryAAX; 1394 defm vrsub : RISCVBinaryAAX; 1395 1396 defm vwaddu : RISCVBinaryABX; 1397 defm vwadd : RISCVBinaryABX; 1398 defm vwaddu_w : RISCVBinaryAAX; 1399 defm vwadd_w : RISCVBinaryAAX; 1400 defm vwsubu : RISCVBinaryABX; 1401 defm vwsub : RISCVBinaryABX; 1402 defm vwsubu_w : RISCVBinaryAAX; 1403 defm vwsub_w : RISCVBinaryAAX; 1404 1405 defm vzext : RISCVUnaryAB; 1406 defm vsext : RISCVUnaryAB; 1407 1408 defm vadc : RISCVBinaryWithV0; 1409 defm vmadc_carry_in : RISCVBinaryMaskOutWithV0; 1410 defm vmadc : RISCVBinaryMaskOut; 1411 1412 defm vsbc : RISCVBinaryWithV0; 1413 defm vmsbc_borrow_in : RISCVBinaryMaskOutWithV0; 1414 defm vmsbc : RISCVBinaryMaskOut; 1415 1416 defm vand : RISCVBinaryAAX; 1417 defm vor : RISCVBinaryAAX; 1418 defm vxor : RISCVBinaryAAX; 1419 1420 defm vsll : RISCVBinaryAAShift; 1421 defm vsrl : RISCVBinaryAAShift; 1422 defm vsra : RISCVBinaryAAShift; 1423 1424 defm vnsrl : RISCVBinaryABShift; 1425 defm vnsra : RISCVBinaryABShift; 1426 1427 defm vmseq : RISCVCompare; 1428 defm vmsne : RISCVCompare; 1429 defm vmsltu : RISCVCompare; 1430 defm vmslt : RISCVCompare; 1431 defm vmsleu : RISCVCompare; 1432 defm vmsle : RISCVCompare; 1433 defm vmsgtu : RISCVCompare; 1434 defm vmsgt : RISCVCompare; 1435 defm vmsgeu : RISCVCompare; 1436 defm vmsge : RISCVCompare; 1437 1438 defm vminu : RISCVBinaryAAX; 1439 defm vmin : RISCVBinaryAAX; 1440 defm vmaxu : RISCVBinaryAAX; 1441 defm vmax : RISCVBinaryAAX; 1442 1443 defm vmul : RISCVBinaryAAX; 1444 defm vmulh : RISCVBinaryAAX; 1445 defm vmulhu : RISCVBinaryAAX; 1446 defm vmulhsu : RISCVBinaryAAX; 1447 1448 defm vdivu : RISCVBinaryAAX; 1449 defm vdiv : RISCVBinaryAAX; 1450 defm vremu : RISCVBinaryAAX; 1451 defm vrem : RISCVBinaryAAX; 1452 1453 defm vwmul : RISCVBinaryABX; 1454 defm vwmulu : RISCVBinaryABX; 1455 defm vwmulsu : RISCVBinaryABX; 1456 1457 defm vmacc : RISCVTernaryAAXA; 1458 defm vnmsac : RISCVTernaryAAXA; 1459 defm vmadd : RISCVTernaryAAXA; 1460 defm vnmsub : RISCVTernaryAAXA; 1461 1462 defm vwmaccu : RISCVTernaryWide; 1463 defm vwmacc : RISCVTernaryWide; 1464 defm vwmaccus : RISCVTernaryWide; 1465 defm vwmaccsu : RISCVTernaryWide; 1466 1467 defm vfadd : RISCVBinaryAAXRoundingMode; 1468 defm vfsub : RISCVBinaryAAXRoundingMode; 1469 defm vfrsub : RISCVBinaryAAXRoundingMode; 1470 1471 defm vfwadd : RISCVBinaryABXRoundingMode; 1472 defm vfwsub : RISCVBinaryABXRoundingMode; 1473 defm vfwadd_w : RISCVBinaryAAXRoundingMode; 1474 defm vfwsub_w : RISCVBinaryAAXRoundingMode; 1475 1476 defm vsaddu : RISCVSaturatingBinaryAAX; 1477 defm vsadd : RISCVSaturatingBinaryAAX; 1478 defm vssubu : RISCVSaturatingBinaryAAX; 1479 defm vssub : RISCVSaturatingBinaryAAX; 1480 1481 defm vmerge : RISCVBinaryWithV0; 1482 1483 // Output: (vector) 1484 // Input: (passthru, vector_in, vl) 1485 def int_riscv_vmv_v_v : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 1486 [LLVMMatchType<0>, 1487 LLVMMatchType<0>, 1488 llvm_anyint_ty], 1489 [IntrNoMem]>, RISCVVIntrinsic { 1490 let VLOperand = 2; 1491 } 1492 // Output: (vector) 1493 // Input: (passthru, scalar, vl) 1494 def int_riscv_vmv_v_x : DefaultAttrsIntrinsic<[llvm_anyint_ty], 1495 [LLVMMatchType<0>, 1496 LLVMVectorElementType<0>, 1497 llvm_anyint_ty], 1498 [IntrNoMem]>, RISCVVIntrinsic { 1499 let VLOperand = 2; 1500 } 1501 // Output: (vector) 1502 // Input: (passthru, scalar, vl) 1503 def int_riscv_vfmv_v_f : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], 1504 [LLVMMatchType<0>, 1505 LLVMVectorElementType<0>, 1506 llvm_anyint_ty], 1507 [IntrNoMem]>, RISCVVIntrinsic { 1508 let VLOperand = 2; 1509 } 1510 1511 def int_riscv_vmv_x_s : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], 1512 [llvm_anyint_ty], 1513 [IntrNoMem]>, RISCVVIntrinsic; 1514 def int_riscv_vmv_s_x : DefaultAttrsIntrinsic<[llvm_anyint_ty], 1515 [LLVMMatchType<0>, 1516 LLVMVectorElementType<0>, 1517 llvm_anyint_ty], 1518 [IntrNoMem]>, RISCVVIntrinsic { 1519 let VLOperand = 2; 1520 } 1521 1522 def int_riscv_vfmv_f_s : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], 1523 [llvm_anyfloat_ty], 1524 [IntrNoMem]>, RISCVVIntrinsic; 1525 def int_riscv_vfmv_s_f : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], 1526 [LLVMMatchType<0>, 1527 LLVMVectorElementType<0>, 1528 llvm_anyint_ty], 1529 [IntrNoMem]>, RISCVVIntrinsic { 1530 let VLOperand = 2; 1531 } 1532 1533 defm vfmul : RISCVBinaryAAXRoundingMode; 1534 defm vfdiv : RISCVBinaryAAXRoundingMode; 1535 defm vfrdiv : RISCVBinaryAAXRoundingMode; 1536 1537 defm vfwmul : RISCVBinaryABXRoundingMode; 1538 1539 defm vfmacc : RISCVTernaryAAXARoundingMode; 1540 defm vfnmacc : RISCVTernaryAAXARoundingMode; 1541 defm vfmsac : RISCVTernaryAAXARoundingMode; 1542 defm vfnmsac : RISCVTernaryAAXARoundingMode; 1543 defm vfmadd : RISCVTernaryAAXARoundingMode; 1544 defm vfnmadd : RISCVTernaryAAXARoundingMode; 1545 defm vfmsub : RISCVTernaryAAXARoundingMode; 1546 defm vfnmsub : RISCVTernaryAAXARoundingMode; 1547 1548 defm vfwmacc : RISCVTernaryWideRoundingMode; 1549 defm vfwmaccbf16 : RISCVTernaryWideRoundingMode; 1550 defm vfwnmacc : RISCVTernaryWideRoundingMode; 1551 defm vfwmsac : RISCVTernaryWideRoundingMode; 1552 defm vfwnmsac : RISCVTernaryWideRoundingMode; 1553 1554 defm vfsqrt : RISCVUnaryAARoundingMode; 1555 defm vfrsqrt7 : RISCVUnaryAA; 1556 defm vfrec7 : RISCVUnaryAARoundingMode; 1557 1558 defm vfmin : RISCVBinaryAAX; 1559 defm vfmax : RISCVBinaryAAX; 1560 1561 defm vfsgnj : RISCVBinaryAAX; 1562 defm vfsgnjn : RISCVBinaryAAX; 1563 defm vfsgnjx : RISCVBinaryAAX; 1564 1565 defm vfclass : RISCVClassify; 1566 1567 defm vfmerge : RISCVBinaryWithV0; 1568 1569 defm vslideup : RVVSlide; 1570 defm vslidedown : RVVSlide; 1571 1572 defm vslide1up : RISCVBinaryAAX; 1573 defm vslide1down : RISCVBinaryAAX; 1574 defm vfslide1up : RISCVBinaryAAX; 1575 defm vfslide1down : RISCVBinaryAAX; 1576 1577 defm vrgather_vv : RISCVRGatherVV; 1578 defm vrgather_vx : RISCVRGatherVX; 1579 defm vrgatherei16_vv : RISCVRGatherEI16VV; 1580 1581 def "int_riscv_vcompress" : RISCVCompress; 1582 1583 defm vaaddu : RISCVSaturatingBinaryAAXRoundingMode; 1584 defm vaadd : RISCVSaturatingBinaryAAXRoundingMode; 1585 defm vasubu : RISCVSaturatingBinaryAAXRoundingMode; 1586 defm vasub : RISCVSaturatingBinaryAAXRoundingMode; 1587 1588 defm vsmul : RISCVSaturatingBinaryAAXRoundingMode; 1589 1590 defm vssrl : RISCVSaturatingBinaryAAShiftRoundingMode; 1591 defm vssra : RISCVSaturatingBinaryAAShiftRoundingMode; 1592 1593 defm vnclipu : RISCVSaturatingBinaryABShiftRoundingMode; 1594 defm vnclip : RISCVSaturatingBinaryABShiftRoundingMode; 1595 1596 defm vmfeq : RISCVCompare; 1597 defm vmfne : RISCVCompare; 1598 defm vmflt : RISCVCompare; 1599 defm vmfle : RISCVCompare; 1600 defm vmfgt : RISCVCompare; 1601 defm vmfge : RISCVCompare; 1602 1603 defm vredsum : RISCVReduction; 1604 defm vredand : RISCVReduction; 1605 defm vredor : RISCVReduction; 1606 defm vredxor : RISCVReduction; 1607 defm vredminu : RISCVReduction; 1608 defm vredmin : RISCVReduction; 1609 defm vredmaxu : RISCVReduction; 1610 defm vredmax : RISCVReduction; 1611 1612 defm vwredsumu : RISCVReduction; 1613 defm vwredsum : RISCVReduction; 1614 1615 defm vfredosum : RISCVReductionRoundingMode; 1616 defm vfredusum : RISCVReductionRoundingMode; 1617 defm vfredmin : RISCVReduction; 1618 defm vfredmax : RISCVReduction; 1619 1620 defm vfwredusum : RISCVReductionRoundingMode; 1621 defm vfwredosum : RISCVReductionRoundingMode; 1622 1623 def int_riscv_vmand: RISCVBinaryAAAUnMasked; 1624 def int_riscv_vmnand: RISCVBinaryAAAUnMasked; 1625 def int_riscv_vmandn: RISCVBinaryAAAUnMasked; 1626 def int_riscv_vmxor: RISCVBinaryAAAUnMasked; 1627 def int_riscv_vmor: RISCVBinaryAAAUnMasked; 1628 def int_riscv_vmnor: RISCVBinaryAAAUnMasked; 1629 def int_riscv_vmorn: RISCVBinaryAAAUnMasked; 1630 def int_riscv_vmxnor: RISCVBinaryAAAUnMasked; 1631 def int_riscv_vmclr : RISCVNullaryIntrinsic; 1632 def int_riscv_vmset : RISCVNullaryIntrinsic; 1633 1634 defm vcpop : RISCVMaskedUnarySOut; 1635 defm vfirst : RISCVMaskedUnarySOut; 1636 defm vmsbf : RISCVMaskedUnaryMOut; 1637 defm vmsof : RISCVMaskedUnaryMOut; 1638 defm vmsif : RISCVMaskedUnaryMOut; 1639 1640 defm vfcvt_xu_f_v : RISCVConversionRoundingMode; 1641 defm vfcvt_x_f_v : RISCVConversionRoundingMode; 1642 defm vfcvt_rtz_xu_f_v : RISCVConversion; 1643 defm vfcvt_rtz_x_f_v : RISCVConversion; 1644 defm vfcvt_f_xu_v : RISCVConversionRoundingMode; 1645 defm vfcvt_f_x_v : RISCVConversionRoundingMode; 1646 1647 defm vfwcvt_f_xu_v : RISCVConversion; 1648 defm vfwcvt_f_x_v : RISCVConversion; 1649 defm vfwcvt_xu_f_v : RISCVConversionRoundingMode; 1650 defm vfwcvt_x_f_v : RISCVConversionRoundingMode; 1651 defm vfwcvt_rtz_xu_f_v : RISCVConversion; 1652 defm vfwcvt_rtz_x_f_v : RISCVConversion; 1653 defm vfwcvt_f_f_v : RISCVConversion; 1654 defm vfwcvtbf16_f_f_v : RISCVConversion; 1655 1656 defm vfncvt_f_xu_w : RISCVConversionRoundingMode; 1657 defm vfncvt_f_x_w : RISCVConversionRoundingMode; 1658 defm vfncvt_xu_f_w : RISCVConversionRoundingMode; 1659 defm vfncvt_x_f_w : RISCVConversionRoundingMode; 1660 defm vfncvt_rtz_xu_f_w : RISCVConversion; 1661 defm vfncvt_rtz_x_f_w : RISCVConversion; 1662 defm vfncvt_f_f_w : RISCVConversionRoundingMode; 1663 defm vfncvtbf16_f_f_w : RISCVConversionRoundingMode; 1664 defm vfncvt_rod_f_f_w : RISCVConversion; 1665 1666 // Output: (vector) 1667 // Input: (passthru, mask type input, vl) 1668 def int_riscv_viota 1669 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 1670 [LLVMMatchType<0>, 1671 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 1672 llvm_anyint_ty], 1673 [IntrNoMem]>, RISCVVIntrinsic { 1674 let VLOperand = 2; 1675 } 1676 // Output: (vector) 1677 // Input: (maskedoff, mask type vector_in, mask, vl, policy) 1678 def int_riscv_viota_mask 1679 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 1680 [LLVMMatchType<0>, 1681 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 1682 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 1683 llvm_anyint_ty, LLVMMatchType<1>], 1684 [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic { 1685 let VLOperand = 3; 1686 } 1687 // Output: (vector) 1688 // Input: (passthru, vl) 1689 def int_riscv_vid : RISCVID; 1690 1691 // Output: (vector) 1692 // Input: (maskedoff, mask, vl, policy) 1693 def int_riscv_vid_mask 1694 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 1695 [LLVMMatchType<0>, 1696 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 1697 llvm_anyint_ty, LLVMMatchType<1>], 1698 [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic { 1699 let VLOperand = 2; 1700 } 1701 1702 foreach nf = [2, 3, 4, 5, 6, 7, 8] in { 1703 defm vlseg # nf : RISCVUSSegLoad<nf>; 1704 defm vlseg # nf # ff : RISCVUSSegLoadFF<nf>; 1705 defm vlsseg # nf : RISCVSSegLoad<nf>; 1706 defm vloxseg # nf : RISCVISegLoad<nf>; 1707 defm vluxseg # nf : RISCVISegLoad<nf>; 1708 defm vsseg # nf : RISCVUSSegStore<nf>; 1709 defm vssseg # nf : RISCVSSegStore<nf>; 1710 defm vsoxseg # nf : RISCVISegStore<nf>; 1711 defm vsuxseg # nf : RISCVISegStore<nf>; 1712 } 1713 1714 // Strided loads/stores for fixed vectors. 1715 def int_riscv_masked_strided_load 1716 : DefaultAttrsIntrinsic<[llvm_anyvector_ty], 1717 [LLVMMatchType<0>, llvm_anyptr_ty, 1718 llvm_anyint_ty, 1719 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], 1720 [NoCapture<ArgIndex<1>>, IntrReadMem]>; 1721 def int_riscv_masked_strided_store 1722 : DefaultAttrsIntrinsic<[], 1723 [llvm_anyvector_ty, llvm_anyptr_ty, 1724 llvm_anyint_ty, 1725 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], 1726 [NoCapture<ArgIndex<1>>, IntrWriteMem]>; 1727 1728 // Segment loads/stores for fixed vectors. 1729 foreach nf = [2, 3, 4, 5, 6, 7, 8] in { 1730 def int_riscv_seg # nf # _load 1731 : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], 1732 !listsplat(LLVMMatchType<0>, 1733 !add(nf, -1))), 1734 [llvm_anyptr_ty, llvm_anyint_ty], 1735 [NoCapture<ArgIndex<0>>, IntrReadMem]>; 1736 def int_riscv_seg # nf # _store 1737 : DefaultAttrsIntrinsic<[], 1738 !listconcat([llvm_anyvector_ty], 1739 !listsplat(LLVMMatchType<0>, 1740 !add(nf, -1)), 1741 [llvm_anyptr_ty, llvm_anyint_ty]), 1742 [NoCapture<ArgIndex<nf>>, IntrWriteMem]>; 1743 } 1744 1745} // TargetPrefix = "riscv" 1746 1747//===----------------------------------------------------------------------===// 1748// Scalar Cryptography 1749// 1750// These intrinsics will lower directly into the corresponding instructions 1751// added by the scalar cyptography extension, if the extension is present. 1752 1753let TargetPrefix = "riscv" in { 1754 1755class ScalarCryptoByteSelect32 1756 : DefaultAttrsIntrinsic<[llvm_i32_ty], 1757 [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], 1758 [IntrNoMem, IntrSpeculatable, 1759 ImmArg<ArgIndex<2>>]>; 1760 1761class ScalarCryptoGprGprIntrinsic32 1762 : DefaultAttrsIntrinsic<[llvm_i32_ty], 1763 [llvm_i32_ty, llvm_i32_ty], 1764 [IntrNoMem, IntrSpeculatable]>; 1765 1766class ScalarCryptoGprGprIntrinsic64 1767 : DefaultAttrsIntrinsic<[llvm_i64_ty], 1768 [llvm_i64_ty, llvm_i64_ty], 1769 [IntrNoMem, IntrSpeculatable]>; 1770 1771class ScalarCryptoGprIntrinsic32 1772 : DefaultAttrsIntrinsic<[llvm_i32_ty], 1773 [llvm_i32_ty], 1774 [IntrNoMem, IntrSpeculatable]>; 1775 1776class ScalarCryptoGprIntrinsic64 1777 : DefaultAttrsIntrinsic<[llvm_i64_ty], 1778 [llvm_i64_ty], 1779 [IntrNoMem, IntrSpeculatable]>; 1780 1781// Zknd 1782def int_riscv_aes32dsi : ScalarCryptoByteSelect32, 1783 ClangBuiltin<"__builtin_riscv_aes32dsi">; 1784def int_riscv_aes32dsmi : ScalarCryptoByteSelect32, 1785 ClangBuiltin<"__builtin_riscv_aes32dsmi">; 1786 1787def int_riscv_aes64ds : ScalarCryptoGprGprIntrinsic64, 1788 ClangBuiltin<"__builtin_riscv_aes64ds">; 1789def int_riscv_aes64dsm : ScalarCryptoGprGprIntrinsic64, 1790 ClangBuiltin<"__builtin_riscv_aes64dsm">; 1791 1792def int_riscv_aes64im : ScalarCryptoGprIntrinsic64, 1793 ClangBuiltin<"__builtin_riscv_aes64im">; 1794 1795// Zkne 1796def int_riscv_aes32esi : ScalarCryptoByteSelect32, 1797 ClangBuiltin<"__builtin_riscv_aes32esi">; 1798def int_riscv_aes32esmi : ScalarCryptoByteSelect32, 1799 ClangBuiltin<"__builtin_riscv_aes32esmi">; 1800 1801def int_riscv_aes64es : ScalarCryptoGprGprIntrinsic64, 1802 ClangBuiltin<"__builtin_riscv_aes64es">; 1803def int_riscv_aes64esm : ScalarCryptoGprGprIntrinsic64, 1804 ClangBuiltin<"__builtin_riscv_aes64esm">; 1805 1806// Zknd & Zkne 1807def int_riscv_aes64ks2 : ScalarCryptoGprGprIntrinsic64, 1808 ClangBuiltin<"__builtin_riscv_aes64ks2">; 1809def int_riscv_aes64ks1i : DefaultAttrsIntrinsic<[llvm_i64_ty], 1810 [llvm_i64_ty, llvm_i32_ty], 1811 [IntrNoMem, IntrSpeculatable, 1812 ImmArg<ArgIndex<1>>]>, 1813 ClangBuiltin<"__builtin_riscv_aes64ks1i">; 1814 1815// Zknh 1816def int_riscv_sha256sig0 : ScalarCryptoGprIntrinsic32; 1817def int_riscv_sha256sig1 : ScalarCryptoGprIntrinsic32; 1818def int_riscv_sha256sum0 : ScalarCryptoGprIntrinsic32; 1819def int_riscv_sha256sum1 : ScalarCryptoGprIntrinsic32; 1820 1821def int_riscv_sha512sig0l : ScalarCryptoGprGprIntrinsic32, 1822 ClangBuiltin<"__builtin_riscv_sha512sig0l">; 1823def int_riscv_sha512sig0h : ScalarCryptoGprGprIntrinsic32, 1824 ClangBuiltin<"__builtin_riscv_sha512sig0h">; 1825def int_riscv_sha512sig1l : ScalarCryptoGprGprIntrinsic32, 1826 ClangBuiltin<"__builtin_riscv_sha512sig1l">; 1827def int_riscv_sha512sig1h : ScalarCryptoGprGprIntrinsic32, 1828 ClangBuiltin<"__builtin_riscv_sha512sig1h">; 1829def int_riscv_sha512sum0r : ScalarCryptoGprGprIntrinsic32, 1830 ClangBuiltin<"__builtin_riscv_sha512sum0r">; 1831def int_riscv_sha512sum1r : ScalarCryptoGprGprIntrinsic32, 1832 ClangBuiltin<"__builtin_riscv_sha512sum1r">; 1833 1834def int_riscv_sha512sig0 : ScalarCryptoGprIntrinsic64, 1835 ClangBuiltin<"__builtin_riscv_sha512sig0">; 1836def int_riscv_sha512sig1 : ScalarCryptoGprIntrinsic64, 1837 ClangBuiltin<"__builtin_riscv_sha512sig1">; 1838def int_riscv_sha512sum0 : ScalarCryptoGprIntrinsic64, 1839 ClangBuiltin<"__builtin_riscv_sha512sum0">; 1840def int_riscv_sha512sum1 : ScalarCryptoGprIntrinsic64, 1841 ClangBuiltin<"__builtin_riscv_sha512sum1">; 1842 1843// Zksed 1844def int_riscv_sm4ks : ScalarCryptoByteSelect32; 1845def int_riscv_sm4ed : ScalarCryptoByteSelect32; 1846 1847// Zksh 1848def int_riscv_sm3p0 : ScalarCryptoGprIntrinsic32; 1849def int_riscv_sm3p1 : ScalarCryptoGprIntrinsic32; 1850} // TargetPrefix = "riscv" 1851 1852//===----------------------------------------------------------------------===// 1853// Vector Cryptography 1854// 1855// These intrinsics will lower directly into the corresponding instructions 1856// added by the vector cyptography extension, if the extension is present. 1857let TargetPrefix = "riscv" in { 1858 // Zvkb 1859 defm vandn : RISCVBinaryAAX; 1860 defm vbrev8 : RISCVUnaryAA; 1861 defm vrev8 : RISCVUnaryAA; 1862 defm vrol : RISCVBinaryAAX; 1863 defm vror : RISCVBinaryAAX; 1864 1865 // Zvbb 1866 defm vbrev : RISCVUnaryAA; 1867 defm vclz : RISCVUnaryAA; 1868 defm vctz : RISCVUnaryAA; 1869 defm vcpopv : RISCVUnaryAA; 1870 defm vwsll : RISCVBinaryABX; 1871 1872 // Zvbc 1873 defm vclmul : RISCVBinaryAAX; 1874 defm vclmulh : RISCVBinaryAAX; 1875 1876 // Zvkg 1877 def int_riscv_vghsh : RISCVBinaryAAXUnMaskedZvk; 1878 def int_riscv_vgmul_vv : RISCVUnaryAAUnMaskedZvk<IsVS=0>; 1879 1880 // Zvkned 1881 defm vaesdf : RISCVUnaryAAUnMaskedZvk; 1882 defm vaesdm : RISCVUnaryAAUnMaskedZvk; 1883 defm vaesef : RISCVUnaryAAUnMaskedZvk; 1884 defm vaesem : RISCVUnaryAAUnMaskedZvk; 1885 def int_riscv_vaeskf1 : RISCVBinaryAAXUnMasked<IsVI=1>; 1886 def int_riscv_vaeskf2 : RISCVBinaryAAXUnMaskedZvk<IsVI=1>; 1887 defm vaesz : RISCVUnaryAAUnMaskedZvk<HasVV=0>; 1888 1889 // Zvknha or Zvknhb 1890 def int_riscv_vsha2ch : RISCVBinaryAAXUnMaskedZvk; 1891 def int_riscv_vsha2cl : RISCVBinaryAAXUnMaskedZvk; 1892 def int_riscv_vsha2ms : RISCVBinaryAAXUnMaskedZvk; 1893 1894 // Zvksed 1895 def int_riscv_vsm4k : RISCVBinaryAAXUnMasked<IsVI=1>; 1896 defm vsm4r : RISCVUnaryAAUnMaskedZvk; 1897 1898 // Zvksh 1899 def int_riscv_vsm3c : RISCVBinaryAAXUnMaskedZvk<IsVI=1>; 1900 def int_riscv_vsm3me : RISCVBinaryAAXUnMasked; 1901} // TargetPrefix = "riscv" 1902 1903// Vendor extensions 1904//===----------------------------------------------------------------------===// 1905include "llvm/IR/IntrinsicsRISCVXTHead.td" 1906include "llvm/IR/IntrinsicsRISCVXsf.td" 1907include "llvm/IR/IntrinsicsRISCVXCV.td" 1908