1//===- IntrinsicsRISCV.td - Defines RISCV intrinsics -------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines all of the RISCV-specific intrinsics.
10//
11//===----------------------------------------------------------------------===//
12
13//===----------------------------------------------------------------------===//
14// Atomics
15
16// Atomic Intrinsics have multiple versions for different access widths, which
17// all follow one of the following signatures (depending on how many arguments
18// they require). We carefully instantiate only specific versions of these for
19// specific integer widths, rather than using `llvm_anyint_ty`.
20//
21// In fact, as these intrinsics take `llvm_anyptr_ty`, the given names are the
22// canonical names, and the intrinsics used in the code will have a name
23// suffixed with the pointer type they are specialised for (denoted `<p>` in the
24// names below), in order to avoid type conflicts.
25
26let TargetPrefix = "riscv" in {
27
28  // T @llvm.<name>.T.<p>(any*, T, T, T imm);
29  class MaskedAtomicRMWFourArg<LLVMType itype>
30      : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype],
31                  [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>]>;
32  // T @llvm.<name>.T.<p>(any*, T, T, T, T imm);
33  class MaskedAtomicRMWFiveArg<LLVMType itype>
34      : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype, itype],
35                  [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<4>>]>;
36
37  // We define 32-bit and 64-bit variants of the above, where T stands for i32
38  // or i64 respectively:
39  multiclass MaskedAtomicRMWFourArgIntrinsics {
40    // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32 imm);
41    def _i32 : MaskedAtomicRMWFourArg<llvm_i32_ty>;
42    // i64 @llvm.<name>.i32.<p>(any*, i64, i64, i64 imm);
43    def _i64 : MaskedAtomicRMWFourArg<llvm_i64_ty>;
44  }
45
46  multiclass MaskedAtomicRMWFiveArgIntrinsics {
47    // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32, i32 imm);
48    def _i32 : MaskedAtomicRMWFiveArg<llvm_i32_ty>;
49    // i64 @llvm.<name>.i64.<p>(any*, i64, i64, i64, i64 imm);
50    def _i64 : MaskedAtomicRMWFiveArg<llvm_i64_ty>;
51  }
52
53  // These intrinsics are intended only for internal compiler use (i.e. as
54  // part of AtomicExpandpass via the emitMaskedAtomic*Intrinsic hooks). Their
55  // names and semantics could change in the future.
56
57  // @llvm.riscv.masked.atomicrmw.*.{i32,i64}.<p>(
58  //   ptr addr, ixlen oparg, ixlen mask, ixlenimm ordering)
59  defm int_riscv_masked_atomicrmw_xchg : MaskedAtomicRMWFourArgIntrinsics;
60  defm int_riscv_masked_atomicrmw_add : MaskedAtomicRMWFourArgIntrinsics;
61  defm int_riscv_masked_atomicrmw_sub : MaskedAtomicRMWFourArgIntrinsics;
62  defm int_riscv_masked_atomicrmw_nand : MaskedAtomicRMWFourArgIntrinsics;
63  defm int_riscv_masked_atomicrmw_umax : MaskedAtomicRMWFourArgIntrinsics;
64  defm int_riscv_masked_atomicrmw_umin : MaskedAtomicRMWFourArgIntrinsics;
65  // Signed min and max need an extra operand to do sign extension with.
66  // @llvm.riscv.masked.atomicrmw.{max,min}.{i32,i64}.<p>(
67  //   ptr addr, ixlen oparg, ixlen mask, ixlen shamt, ixlenimm ordering)
68  defm int_riscv_masked_atomicrmw_max : MaskedAtomicRMWFiveArgIntrinsics;
69  defm int_riscv_masked_atomicrmw_min : MaskedAtomicRMWFiveArgIntrinsics;
70
71  // @llvm.riscv.masked.cmpxchg.{i32,i64}.<p>(
72  //   ptr addr, ixlen cmpval, ixlen newval, ixlen mask, ixlenimm ordering)
73  defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics;
74
75} // TargetPrefix = "riscv"
76
77//===----------------------------------------------------------------------===//
78// Bitmanip (Bit Manipulation) Extension
79
80let TargetPrefix = "riscv" in {
81
82  class BitManipGPRIntrinsics
83      : DefaultAttrsIntrinsic<[llvm_any_ty],
84                              [LLVMMatchType<0>],
85                              [IntrNoMem, IntrSpeculatable]>;
86  class BitManipGPRGPRIntrinsics
87      : DefaultAttrsIntrinsic<[llvm_any_ty],
88                              [LLVMMatchType<0>, LLVMMatchType<0>],
89                              [IntrNoMem, IntrSpeculatable]>;
90
91  // Zbb
92  def int_riscv_orc_b : BitManipGPRIntrinsics;
93
94  // Zbc or Zbkc
95  def int_riscv_clmul  : BitManipGPRGPRIntrinsics;
96  def int_riscv_clmulh : BitManipGPRGPRIntrinsics;
97
98  // Zbc
99  def int_riscv_clmulr : BitManipGPRGPRIntrinsics;
100
101  // Zbkb
102  def int_riscv_brev8 : BitManipGPRIntrinsics;
103  def int_riscv_zip   : BitManipGPRIntrinsics;
104  def int_riscv_unzip : BitManipGPRIntrinsics;
105
106  // Zbkx
107  def int_riscv_xperm4  : BitManipGPRGPRIntrinsics;
108  def int_riscv_xperm8  : BitManipGPRGPRIntrinsics;
109} // TargetPrefix = "riscv"
110
111//===----------------------------------------------------------------------===//
112// Vectors
113
114// The intrinsic does not have any operand that must be extended.
115defvar NoScalarOperand = 0xF;
116
117// The intrinsic does not have a VL operand.
118// (e.g., riscv_vmv_x_s and riscv_vfmv_f_s)
119defvar NoVLOperand = 0x1F;
120
121class RISCVVIntrinsic {
122  // These intrinsics may accept illegal integer values in their llvm_any_ty
123  // operand, so they have to be extended.
124  Intrinsic IntrinsicID = !cast<Intrinsic>(NAME);
125  bits<4> ScalarOperand = NoScalarOperand;
126  bits<5> VLOperand = NoVLOperand;
127}
128
129let TargetPrefix = "riscv" in {
130  // We use anyint here but we only support XLen.
131  def int_riscv_vsetvli   : Intrinsic<[llvm_anyint_ty],
132                           /* AVL */  [LLVMMatchType<0>,
133                           /* VSEW */  LLVMMatchType<0>,
134                           /* VLMUL */ LLVMMatchType<0>],
135                                      [IntrNoMem,
136                                       ImmArg<ArgIndex<1>>,
137                                       ImmArg<ArgIndex<2>>]>;
138  def int_riscv_vsetvlimax : Intrinsic<[llvm_anyint_ty],
139                            /* VSEW */ [LLVMMatchType<0>,
140                            /* VLMUL */ LLVMMatchType<0>],
141                                      [IntrNoMem,
142                                       ImmArg<ArgIndex<0>>,
143                                       ImmArg<ArgIndex<1>>]>;
144
145  // For unit stride mask load
146  // Input: (pointer, vl)
147  class RISCVUSMLoad
148        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
149                    [llvm_ptr_ty, llvm_anyint_ty],
150                    [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
151    let VLOperand = 1;
152  }
153  // For unit stride load
154  // Input: (passthru, pointer, vl)
155  class RISCVUSLoad
156        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
157                    [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty],
158                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
159    let VLOperand = 2;
160  }
161  // For unit stride fault-only-first load
162  // Input: (passthru, pointer, vl)
163  // Output: (data, vl)
164  // NOTE: We model this with default memory properties since we model writing
165  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
166  class RISCVUSLoadFF
167        : DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
168                    [LLVMMatchType<0>, llvm_ptr_ty, LLVMMatchType<1>],
169                    [NoCapture<ArgIndex<1>>]>,
170                    RISCVVIntrinsic {
171    let VLOperand = 2;
172  }
173  // For unit stride load with mask
174  // Input: (maskedoff, pointer, mask, vl, policy)
175  class RISCVUSLoadMasked
176        : DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
177                    [LLVMMatchType<0>, llvm_ptr_ty,
178                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
179                     llvm_anyint_ty, LLVMMatchType<1>],
180                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>, IntrReadMem]>,
181                    RISCVVIntrinsic {
182    let VLOperand = 3;
183  }
184  // For unit stride fault-only-first load with mask
185  // Input: (maskedoff, pointer, mask, vl, policy)
186  // Output: (data, vl)
187  // NOTE: We model this with default memory properties since we model writing
188  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
189  class RISCVUSLoadFFMasked
190        : DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
191                    [LLVMMatchType<0>, llvm_ptr_ty,
192                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
193                     LLVMMatchType<1>, LLVMMatchType<1>],
194                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
195    let VLOperand = 3;
196  }
197  // For strided load with passthru operand
198  // Input: (passthru, pointer, stride, vl)
199  class RISCVSLoad
200        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
201                    [LLVMMatchType<0>, llvm_ptr_ty,
202                     llvm_anyint_ty, LLVMMatchType<1>],
203                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
204    let VLOperand = 3;
205  }
206  // For strided load with mask
207  // Input: (maskedoff, pointer, stride, mask, vl, policy)
208  class RISCVSLoadMasked
209        : DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
210                    [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty,
211                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
212                     LLVMMatchType<1>],
213                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
214                    RISCVVIntrinsic {
215    let VLOperand = 4;
216  }
217  // For indexed load with passthru operand
218  // Input: (passthru, pointer, index, vl)
219  class RISCVILoad
220        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
221                    [LLVMMatchType<0>, llvm_ptr_ty,
222                     llvm_anyvector_ty, llvm_anyint_ty],
223                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
224    let VLOperand = 3;
225  }
226  // For indexed load with mask
227  // Input: (maskedoff, pointer, index, mask, vl, policy)
228  class RISCVILoadMasked
229        : DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
230                    [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyvector_ty,
231                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
232                     LLVMMatchType<2>],
233                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
234                    RISCVVIntrinsic {
235    let VLOperand = 4;
236  }
237  // For unit stride store
238  // Input: (vector_in, pointer, vl)
239  class RISCVUSStore
240        : DefaultAttrsIntrinsic<[],
241                    [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyint_ty],
242                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
243    let VLOperand = 2;
244  }
245  // For unit stride store with mask
246  // Input: (vector_in, pointer, mask, vl)
247  class RISCVUSStoreMasked
248        : DefaultAttrsIntrinsic<[],
249                    [llvm_anyvector_ty, llvm_ptr_ty,
250                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
251                     llvm_anyint_ty],
252                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
253    let VLOperand = 3;
254  }
255  // For strided store
256  // Input: (vector_in, pointer, stride, vl)
257  class RISCVSStore
258        : DefaultAttrsIntrinsic<[],
259                    [llvm_anyvector_ty, llvm_ptr_ty,
260                     llvm_anyint_ty, LLVMMatchType<1>],
261                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
262    let VLOperand = 3;
263  }
264  // For stride store with mask
265  // Input: (vector_in, pointer, stirde, mask, vl)
266  class RISCVSStoreMasked
267        : DefaultAttrsIntrinsic<[],
268                    [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyint_ty,
269                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
270                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
271    let VLOperand = 4;
272  }
273  // For indexed store
274  // Input: (vector_in, pointer, index, vl)
275  class RISCVIStore
276        : DefaultAttrsIntrinsic<[],
277                    [llvm_anyvector_ty, llvm_ptr_ty,
278                     llvm_anyint_ty, llvm_anyint_ty],
279                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
280    let VLOperand = 3;
281  }
282  // For indexed store with mask
283  // Input: (vector_in, pointer, index, mask, vl)
284  class RISCVIStoreMasked
285        : DefaultAttrsIntrinsic<[],
286                    [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyvector_ty,
287                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
288                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
289    let VLOperand = 4;
290  }
291  // For destination vector type is the same as source vector.
292  // Input: (passthru, vector_in, vl)
293  class RISCVUnaryAAUnMasked
294        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
295                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
296                    [IntrNoMem]>, RISCVVIntrinsic {
297    let VLOperand = 2;
298  }
299  // For destination vector type is the same as the source vector type
300  // Input: (passthru, vector_in, vl, policy)
301  class RISCVUnaryAAUnMaskedZvk<bit IsVS>
302        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
303                    [LLVMMatchType<0>, !if(IsVS, llvm_anyvector_ty, LLVMMatchType<0>),
304                     llvm_anyint_ty, !if(IsVS, LLVMMatchType<2>, LLVMMatchType<1>)],
305                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
306    let VLOperand = 2;
307  }
308
309  multiclass RISCVUnaryAAUnMaskedZvk<bit HasVV = 1, bit HasVS = 1> {
310    if HasVV then
311      def "int_riscv_" # NAME # "_vv" : RISCVUnaryAAUnMaskedZvk<IsVS=0>;
312
313    if HasVS then
314      def "int_riscv_" # NAME # "_vs" : RISCVUnaryAAUnMaskedZvk<IsVS=1>;
315  }
316  // For destination vector type is the same as first source vector (with mask).
317  // Input: (vector_in, vector_in, mask, vl, policy)
318  class RISCVUnaryAAMasked
319        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
320                    [LLVMMatchType<0>, LLVMMatchType<0>,
321                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
322                     LLVMMatchType<1>],
323                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
324    let VLOperand = 3;
325  }
326  // For destination vector type is the same as source vector.
327  // Input: (passthru, vector_in, frm, vl)
328  class RISCVUnaryAAUnMaskedRoundingMode
329        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
330                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>],
331                    [ImmArg<ArgIndex<2>>, IntrNoMem]>, RISCVVIntrinsic {
332    let VLOperand = 3;
333  }
334  // For destination vector type is the same as first source vector (with mask).
335  // Input: (vector_in, vector_in, mask, frm, vl, policy)
336  class RISCVUnaryAAMaskedRoundingMode
337        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
338                    [LLVMMatchType<0>, LLVMMatchType<0>,
339                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
340                     LLVMMatchType<1>, LLVMMatchType<1>],
341                    [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
342    let VLOperand = 4;
343  }
344  // Input: (passthru, vector_in, vector_in, mask, vl)
345  class RISCVCompress
346        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
347                    [LLVMMatchType<0>, LLVMMatchType<0>,
348                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
349                    [IntrNoMem]>, RISCVVIntrinsic {
350    let VLOperand = 3;
351  }
352  // For destination vector type is the same as first and second source vector.
353  // Input: (vector_in, vector_in, vl)
354  class RISCVBinaryAAAUnMasked
355        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
356                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
357                    [IntrNoMem]>, RISCVVIntrinsic {
358    let VLOperand = 2;
359  }
360  // For destination vector type is the same as first and second source vector.
361  // Input: (passthru, vector_in, int_vector_in, vl)
362  class RISCVRGatherVVUnMasked
363        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
364                    [LLVMMatchType<0>, LLVMMatchType<0>,
365                     LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty],
366                    [IntrNoMem]>, RISCVVIntrinsic {
367    let VLOperand = 3;
368  }
369  // For destination vector type is the same as first and second source vector.
370  // Input: (vector_in, vector_in, int_vector_in, vl, policy)
371  class RISCVRGatherVVMasked
372        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
373                    [LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>,
374                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
375                     LLVMMatchType<1>],
376                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
377    let VLOperand = 4;
378  }
379  // Input: (passthru, vector_in, int16_vector_in, vl)
380  class RISCVRGatherEI16VVUnMasked
381        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
382                    [LLVMMatchType<0>, LLVMMatchType<0>,
383                     LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
384                     llvm_anyint_ty],
385                    [IntrNoMem]>, RISCVVIntrinsic {
386    let VLOperand = 3;
387  }
388  // For destination vector type is the same as first and second source vector.
389  // Input: (vector_in, vector_in, int16_vector_in, vl, policy)
390  class RISCVRGatherEI16VVMasked
391        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
392                    [LLVMMatchType<0>, LLVMMatchType<0>,
393                     LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
394                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
395                     LLVMMatchType<1>],
396                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
397    let VLOperand = 4;
398  }
399  // For destination vector type is the same as first source vector, and the
400  // second operand is XLen.
401  // Input: (passthru, vector_in, xlen_in, vl)
402  class RISCVGatherVXUnMasked
403        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
404                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
405                     LLVMMatchType<1>],
406                    [IntrNoMem]>, RISCVVIntrinsic {
407    let VLOperand = 3;
408  }
409  // For destination vector type is the same as first source vector (with mask).
410  // Second operand is XLen.
411  // Input: (maskedoff, vector_in, xlen_in, mask, vl, policy)
412  class RISCVGatherVXMasked
413       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
414                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
415                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
416                    LLVMMatchType<1>],
417                   [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
418    let VLOperand = 4;
419  }
420  // For destination vector type is the same as first source vector.
421  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
422  class RISCVBinaryAAXUnMasked<bit IsVI = 0>
423        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
424                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
425                     llvm_anyint_ty],
426                    !listconcat([IntrNoMem],
427                                !if(IsVI, [ImmArg<ArgIndex<2>>], []))>,
428                    RISCVVIntrinsic {
429    let ScalarOperand = 2;
430    let VLOperand = 3;
431  }
432  // For destination vector type is the same as the source vector type.
433  // Input: (passthru, vector_in, vector_in/scalar_in, vl, policy)
434  class RISCVBinaryAAXUnMaskedZvk<bit IsVI = 0>
435        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
436                                [LLVMMatchType<0>, LLVMMatchType<0>,
437                                 llvm_any_ty, llvm_anyint_ty, LLVMMatchType<2>],
438                                !listconcat([ImmArg<ArgIndex<4>>, IntrNoMem],
439                                            !if(IsVI, [ImmArg<ArgIndex<2>>], []))>,
440                                RISCVVIntrinsic {
441    let ScalarOperand = 2;
442    let VLOperand = 3;
443  }
444  // For destination vector type is the same as first source vector (with mask).
445  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
446  class RISCVBinaryAAXMasked
447       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
448                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
449                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
450                    LLVMMatchType<2>],
451                   [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
452    let ScalarOperand = 2;
453    let VLOperand = 4;
454  }
455  // For destination vector type is the same as first source vector.
456  // Input: (passthru, vector_in, vector_in/scalar_in, frm, vl)
457  class RISCVBinaryAAXUnMaskedRoundingMode
458        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
459                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
460                     llvm_anyint_ty, LLVMMatchType<2>],
461                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
462    let ScalarOperand = 2;
463    let VLOperand = 4;
464  }
465  // For destination vector type is the same as first source vector (with mask).
466  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
467  class RISCVBinaryAAXMaskedRoundingMode
468       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
469                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
470                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
471                    LLVMMatchType<2>, LLVMMatchType<2>],
472                   [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
473    let ScalarOperand = 2;
474    let VLOperand = 5;
475  }
476  // For destination vector type is the same as first source vector. The
477  // second source operand must match the destination type or be an XLen scalar.
478  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
479  class RISCVBinaryAAShiftUnMasked
480        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
481                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
482                     llvm_anyint_ty],
483                    [IntrNoMem]>, RISCVVIntrinsic {
484    let VLOperand = 3;
485  }
486  // For destination vector type is the same as first source vector (with mask).
487  // The second source operand must match the destination type or be an XLen scalar.
488  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
489  class RISCVBinaryAAShiftMasked
490       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
491                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
492                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
493                    LLVMMatchType<2>],
494                   [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
495    let VLOperand = 4;
496  }
497  // For destination vector type is NOT the same as first source vector.
498  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
499  class RISCVBinaryABXUnMasked
500        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
501                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
502                     llvm_anyint_ty],
503                    [IntrNoMem]>, RISCVVIntrinsic {
504    let ScalarOperand = 2;
505    let VLOperand = 3;
506  }
507  // For destination vector type is NOT the same as first source vector (with mask).
508  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
509  class RISCVBinaryABXMasked
510        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
511                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
512                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
513                     LLVMMatchType<3>],
514                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
515    let ScalarOperand = 2;
516    let VLOperand = 4;
517  }
518  // For destination vector type is NOT the same as first source vector.
519  // Input: (passthru, vector_in, vector_in/scalar_in, frm, vl)
520  class RISCVBinaryABXUnMaskedRoundingMode
521        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
522                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
523                     llvm_anyint_ty, LLVMMatchType<3>],
524                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
525    let ScalarOperand = 2;
526    let VLOperand = 4;
527  }
528  // For destination vector type is NOT the same as first source vector (with mask).
529  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
530  class RISCVBinaryABXMaskedRoundingMode
531        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
532                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
533                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
534                     LLVMMatchType<3>, LLVMMatchType<3>],
535                    [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
536    let ScalarOperand = 2;
537    let VLOperand = 5;
538  }
539  // For destination vector type is NOT the same as first source vector. The
540  // second source operand must match the destination type or be an XLen scalar.
541  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
542  class RISCVBinaryABShiftUnMasked
543        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
544                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
545                     llvm_anyint_ty],
546                    [IntrNoMem]>, RISCVVIntrinsic {
547    let VLOperand = 3;
548  }
549  // For destination vector type is NOT the same as first source vector (with mask).
550  // The second source operand must match the destination type or be an XLen scalar.
551  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
552  class RISCVBinaryABShiftMasked
553        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
554                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
555                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
556                     LLVMMatchType<3>],
557                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
558    let VLOperand = 4;
559  }
560  // For binary operations with V0 as input.
561  // Input: (passthru, vector_in, vector_in/scalar_in, V0, vl)
562  class RISCVBinaryWithV0
563        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
564                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
565                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
566                     llvm_anyint_ty],
567                    [IntrNoMem]>, RISCVVIntrinsic {
568    let ScalarOperand = 2;
569    let VLOperand = 4;
570  }
571  // For binary operations with mask type output and V0 as input.
572  // Output: (mask type output)
573  // Input: (vector_in, vector_in/scalar_in, V0, vl)
574  class RISCVBinaryMOutWithV0
575        :DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
576                   [llvm_anyvector_ty, llvm_any_ty,
577                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
578                    llvm_anyint_ty],
579                   [IntrNoMem]>, RISCVVIntrinsic {
580    let ScalarOperand = 1;
581    let VLOperand = 3;
582  }
583  // For binary operations with mask type output.
584  // Output: (mask type output)
585  // Input: (vector_in, vector_in/scalar_in, vl)
586  class RISCVBinaryMOut
587        : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
588                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
589                    [IntrNoMem]>, RISCVVIntrinsic {
590    let ScalarOperand = 1;
591    let VLOperand = 2;
592  }
593  // For binary operations with mask type output without mask.
594  // Output: (mask type output)
595  // Input: (vector_in, vector_in/scalar_in, vl)
596  class RISCVCompareUnMasked
597        : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
598                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
599                    [IntrNoMem]>, RISCVVIntrinsic {
600    let ScalarOperand = 1;
601    let VLOperand = 2;
602  }
603  // For binary operations with mask type output with mask.
604  // Output: (mask type output)
605  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
606  class RISCVCompareMasked
607        : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
608                    [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
609                     llvm_anyvector_ty, llvm_any_ty,
610                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
611                    [IntrNoMem]>, RISCVVIntrinsic {
612    let ScalarOperand = 2;
613    let VLOperand = 4;
614  }
615  // For FP classify operations.
616  // Output: (bit mask type output)
617  // Input: (passthru, vector_in, vl)
618  class RISCVClassifyUnMasked
619        : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>],
620                    [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
621                      llvm_anyint_ty],
622                    [IntrNoMem]>, RISCVVIntrinsic {
623    let VLOperand = 1;
624  }
625  // For FP classify operations with mask.
626  // Output: (bit mask type output)
627  // Input: (maskedoff, vector_in, mask, vl, policy)
628  class RISCVClassifyMasked
629        : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>],
630                    [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
631                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
632                     llvm_anyint_ty, LLVMMatchType<1>],
633                    [IntrNoMem, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
634    let VLOperand = 3;
635  }
636  // For Saturating binary operations.
637  // The destination vector type is the same as first source vector.
638  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
639  class RISCVSaturatingBinaryAAXUnMasked
640        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
641                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
642                     llvm_anyint_ty],
643                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
644    let ScalarOperand = 2;
645    let VLOperand = 3;
646  }
647  // For Saturating binary operations with rounding-mode operand
648  // The destination vector type is the same as first source vector.
649  // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl)
650  class RISCVSaturatingBinaryAAXUnMaskedRoundingMode
651        : Intrinsic<[llvm_anyvector_ty],
652                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
653                     llvm_anyint_ty, LLVMMatchType<2>],
654                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
655    let ScalarOperand = 2;
656    let VLOperand = 4;
657  }
658  // For Saturating binary operations with mask.
659  // The destination vector type is the same as first source vector.
660  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
661  class RISCVSaturatingBinaryAAXMasked
662        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
663                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
664                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
665                     LLVMMatchType<2>],
666                    [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
667    let ScalarOperand = 2;
668    let VLOperand = 4;
669  }
670  // For Saturating binary operations with mask and rounding-mode operand
671  // The destination vector type is the same as first source vector.
672  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
673  class RISCVSaturatingBinaryAAXMaskedRoundingMode
674        : Intrinsic<[llvm_anyvector_ty],
675                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
676                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
677                     LLVMMatchType<2>, LLVMMatchType<2>],
678                    [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
679    let ScalarOperand = 2;
680    let VLOperand = 5;
681  }
682  // For Saturating binary operations.
683  // The destination vector type is the same as first source vector.
684  // The second source operand matches the destination type or is an XLen scalar.
685  // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl)
686  class RISCVSaturatingBinaryAAShiftUnMaskedRoundingMode
687        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
688                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
689                     llvm_anyint_ty, LLVMMatchType<2>],
690                    [ImmArg<ArgIndex<3>>, IntrNoMem, IntrHasSideEffects]>,
691                    RISCVVIntrinsic {
692    let VLOperand = 4;
693  }
694  // For Saturating binary operations with mask.
695  // The destination vector type is the same as first source vector.
696  // The second source operand matches the destination type or is an XLen scalar.
697  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
698  class RISCVSaturatingBinaryAAShiftMaskedRoundingMode
699        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
700                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
701                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
702                     LLVMMatchType<2>, LLVMMatchType<2>],
703                    [ImmArg<ArgIndex<4>>,ImmArg<ArgIndex<6>>, IntrNoMem, IntrHasSideEffects]>,
704                    RISCVVIntrinsic {
705    let VLOperand = 5;
706  }
707  // For Saturating binary operations.
708  // The destination vector type is NOT the same as first source vector.
709  // The second source operand matches the destination type or is an XLen scalar.
710  // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl)
711  class RISCVSaturatingBinaryABShiftUnMaskedRoundingMode
712        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
713                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
714                     llvm_anyint_ty, LLVMMatchType<3>],
715                    [ImmArg<ArgIndex<3>>, IntrNoMem, IntrHasSideEffects]>,
716                    RISCVVIntrinsic {
717    let VLOperand = 4;
718  }
719  // For Saturating binary operations with mask.
720  // The destination vector type is NOT the same as first source vector (with mask).
721  // The second source operand matches the destination type or is an XLen scalar.
722  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
723  class RISCVSaturatingBinaryABShiftMaskedRoundingMode
724        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
725                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
726                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
727                     LLVMMatchType<3>, LLVMMatchType<3>],
728                    [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem,
729                     IntrHasSideEffects]>, RISCVVIntrinsic {
730    let VLOperand = 5;
731  }
732  // Input: (vector_in, vector_in, scalar_in, vl, policy)
733  class RVVSlideUnMasked
734        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
735                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
736                     LLVMMatchType<1>, LLVMMatchType<1>],
737                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
738    let VLOperand = 3;
739  }
740  // Input: (vector_in, vector_in, vector_in/scalar_in, mask, vl, policy)
741  class RVVSlideMasked
742        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
743                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
744                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
745                     LLVMMatchType<1>, LLVMMatchType<1>],
746                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
747    let VLOperand = 4;
748  }
749  // UnMasked Vector Multiply-Add operations, its first operand can not be undef.
750  // Input: (vector_in, vector_in/scalar, vector_in, vl, policy)
751  class RISCVTernaryAAXAUnMasked
752        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
753                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
754                     llvm_anyint_ty, LLVMMatchType<2>],
755                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
756    let ScalarOperand = 1;
757    let VLOperand = 3;
758  }
759  // Masked Vector Multiply-Add operations, its first operand can not be undef.
760  // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy
761  class RISCVTernaryAAXAMasked
762        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
763                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
764                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
765                     llvm_anyint_ty, LLVMMatchType<2>],
766                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
767    let ScalarOperand = 1;
768    let VLOperand = 4;
769  }
770  // UnMasked Vector Multiply-Add operations, its first operand can not be undef.
771  // Input: (vector_in, vector_in/scalar, vector_in, frm, vl, policy)
772  class RISCVTernaryAAXAUnMaskedRoundingMode
773        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
774                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
775                     llvm_anyint_ty, LLVMMatchType<2>, LLVMMatchType<2>],
776                    [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>,
777                    RISCVVIntrinsic {
778    let ScalarOperand = 1;
779    let VLOperand = 4;
780  }
781  // Masked Vector Multiply-Add operations, its first operand can not be undef.
782  // Input: (vector_in, vector_in/scalar, vector_in, mask, frm, vl, policy
783  class RISCVTernaryAAXAMaskedRoundingMode
784        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
785                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
786                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
787                     llvm_anyint_ty, LLVMMatchType<2>, LLVMMatchType<2>],
788                    [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>,
789                    RISCVVIntrinsic {
790    let ScalarOperand = 1;
791    let VLOperand = 5;
792  }
793  // UnMasked Widening Vector Multiply-Add operations, its first operand can not be undef.
794  // Input: (vector_in, vector_in/scalar, vector_in, vl, policy)
795  class RISCVTernaryWideUnMasked
796        : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
797                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
798                      llvm_anyint_ty, LLVMMatchType<3>],
799                     [ImmArg<ArgIndex<4>>, IntrNoMem] >, RISCVVIntrinsic {
800    let ScalarOperand = 1;
801    let VLOperand = 3;
802  }
803  // Masked Widening Vector Multiply-Add operations, its first operand can not be undef.
804  // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy
805  class RISCVTernaryWideMasked
806        : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
807                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
808                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
809                      llvm_anyint_ty, LLVMMatchType<3>],
810                     [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
811    let ScalarOperand = 1;
812    let VLOperand = 4;
813  }
814  // UnMasked Widening Vector Multiply-Add operations, its first operand can not be undef.
815  // Input: (vector_in, vector_in/scalar, vector_in, frm, vl, policy)
816  class RISCVTernaryWideUnMaskedRoundingMode
817        : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
818                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
819                      llvm_anyint_ty, LLVMMatchType<3>, LLVMMatchType<3>],
820                     [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem] >,
821                     RISCVVIntrinsic {
822    let ScalarOperand = 1;
823    let VLOperand = 4;
824  }
825  // Masked Widening Vector Multiply-Add operations, its first operand can not be undef.
826  // Input: (vector_in, vector_in/scalar, vector_in, mask, frm, vl, policy
827  class RISCVTernaryWideMaskedRoundingMode
828        : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
829                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
830                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
831                      llvm_anyint_ty, LLVMMatchType<3>, LLVMMatchType<3>],
832                     [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>,
833                     RISCVVIntrinsic {
834    let ScalarOperand = 1;
835    let VLOperand = 5;
836  }
837  // For Reduction ternary operations.
838  // For destination vector type is the same as first and third source vector.
839  // Input: (vector_in, vector_in, vector_in, vl)
840  class RISCVReductionUnMasked
841        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
842                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
843                     llvm_anyint_ty],
844                    [IntrNoMem]>, RISCVVIntrinsic {
845    let VLOperand = 3;
846  }
847  // For Reduction ternary operations with mask.
848  // For destination vector type is the same as first and third source vector.
849  // The mask type come from second source vector.
850  // Input: (maskedoff, vector_in, vector_in, vector_in, mask, vl)
851  class RISCVReductionMasked
852        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
853                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
854                     LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty],
855                    [IntrNoMem]>, RISCVVIntrinsic {
856    let VLOperand = 4;
857  }
858  // For Reduction ternary operations.
859  // For destination vector type is the same as first and third source vector.
860  // Input: (vector_in, vector_in, vector_in, frm, vl)
861  class RISCVReductionUnMaskedRoundingMode
862        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
863                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
864                     llvm_anyint_ty, LLVMMatchType<2>],
865                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
866    let VLOperand = 4;
867  }
868  // For Reduction ternary operations with mask.
869  // For destination vector type is the same as first and third source vector.
870  // The mask type come from second source vector.
871  // Input: (vector_in, vector_in, vector_in, mask, frm, vl)
872  class RISCVReductionMaskedRoundingMode
873        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
874                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
875                     LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty,
876                     LLVMMatchType<2>],
877                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
878    let VLOperand = 5;
879  }
880  // For unary operations with scalar type output without mask
881  // Output: (scalar type)
882  // Input: (vector_in, vl)
883  class RISCVMaskedUnarySOutUnMasked
884        : DefaultAttrsIntrinsic<[LLVMMatchType<1>],
885                    [llvm_anyvector_ty, llvm_anyint_ty],
886                    [IntrNoMem]>, RISCVVIntrinsic {
887    let VLOperand = 1;
888  }
889  // For unary operations with scalar type output with mask
890  // Output: (scalar type)
891  // Input: (vector_in, mask, vl)
892  class RISCVMaskedUnarySOutMasked
893        : DefaultAttrsIntrinsic<[LLVMMatchType<1>],
894                    [llvm_anyvector_ty, LLVMMatchType<0>, llvm_anyint_ty],
895                    [IntrNoMem]>, RISCVVIntrinsic {
896    let VLOperand = 2;
897  }
898  // For destination vector type is NOT the same as source vector.
899  // Input: (passthru, vector_in, vl)
900  class RISCVUnaryABUnMasked
901        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
902                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty],
903                    [IntrNoMem]>, RISCVVIntrinsic {
904    let VLOperand = 2;
905  }
906  // For destination vector type is NOT the same as source vector (with mask).
907  // Input: (maskedoff, vector_in, mask, vl, policy)
908  class RISCVUnaryABMasked
909        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
910                    [LLVMMatchType<0>, llvm_anyvector_ty,
911                     LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
912                     llvm_anyint_ty, LLVMMatchType<2>],
913                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
914    let VLOperand = 3;
915  }
916  // For unary operations with the same vector type in/out without mask
917  // Output: (vector)
918  // Input: (vector_in, vl)
919  class RISCVUnaryUnMasked
920        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
921                    [LLVMMatchType<0>, llvm_anyint_ty],
922                    [IntrNoMem]>, RISCVVIntrinsic {
923    let VLOperand = 1;
924  }
925  // For mask unary operations with mask type in/out with mask
926  // Output: (mask type output)
927  // Input: (mask type maskedoff, mask type vector_in, mask, vl)
928  class RISCVMaskedUnaryMOutMasked
929        : DefaultAttrsIntrinsic<[llvm_anyint_ty],
930                    [LLVMMatchType<0>, LLVMMatchType<0>,
931                     LLVMMatchType<0>, llvm_anyint_ty],
932                    [IntrNoMem]>, RISCVVIntrinsic {
933    let VLOperand = 3;
934  }
935  // Output: (vector)
936  // Input: (vl)
937  class RISCVNullaryIntrinsic
938        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
939                    [llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic {
940    let VLOperand = 1;
941  }
942  // Output: (vector)
943  // Input: (passthru, vl)
944  class RISCVID
945        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
946                    [LLVMMatchType<0>, llvm_anyint_ty],
947                    [IntrNoMem]>, RISCVVIntrinsic {
948    let VLOperand = 1;
949  }
950  // For Conversion unary operations.
951  // Input: (passthru, vector_in, vl)
952  class RISCVConversionUnMasked
953        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
954                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty],
955                    [IntrNoMem]>, RISCVVIntrinsic {
956    let VLOperand = 2;
957  }
958  // For Conversion unary operations with mask.
959  // Input: (maskedoff, vector_in, mask, vl, policy)
960  class RISCVConversionMasked
961        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
962                    [LLVMMatchType<0>, llvm_anyvector_ty,
963                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
964                     LLVMMatchType<2>],
965                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
966    let VLOperand = 3;
967  }
968  // For Conversion unary operations.
969  // Input: (passthru, vector_in, frm, vl)
970  class RISCVConversionUnMaskedRoundingMode
971        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
972                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty,
973                     LLVMMatchType<2>],
974                    [ImmArg<ArgIndex<2>>, IntrNoMem]>, RISCVVIntrinsic {
975    let VLOperand = 3;
976  }
977  // For Conversion unary operations with mask.
978  // Input: (maskedoff, vector_in, mask, frm, vl, policy)
979  class RISCVConversionMaskedRoundingMode
980        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
981                    [LLVMMatchType<0>, llvm_anyvector_ty,
982                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
983                     LLVMMatchType<2>, LLVMMatchType<2>],
984                    [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
985    let VLOperand = 4;
986  }
987
988  // For unit stride segment load
989  // Input: (passthru, pointer, vl)
990  class RISCVUSSegLoad<int nf>
991        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
992                                !add(nf, -1))),
993                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
994                                [llvm_ptr_ty, llvm_anyint_ty]),
995                    [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic {
996    let VLOperand = !add(nf, 1);
997  }
998  // For unit stride segment load with mask
999  // Input: (maskedoff, pointer, mask, vl, policy)
1000  class RISCVUSSegLoadMasked<int nf>
1001        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1002                                !add(nf, -1))),
1003                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1004                                [llvm_ptr_ty,
1005                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1006                                 llvm_anyint_ty, LLVMMatchType<1>]),
1007                    [ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
1008                    RISCVVIntrinsic {
1009    let VLOperand = !add(nf, 2);
1010  }
1011
1012  // For unit stride fault-only-first segment load
1013  // Input: (passthru, pointer, vl)
1014  // Output: (data, vl)
1015  // NOTE: We model this with default memory properties since we model writing
1016  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
1017  class RISCVUSSegLoadFF<int nf>
1018        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1019                                !add(nf, -1)), [llvm_anyint_ty]),
1020                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1021                    [llvm_ptr_ty, LLVMMatchType<1>]),
1022                    [NoCapture<ArgIndex<nf>>]>, RISCVVIntrinsic {
1023    let VLOperand = !add(nf, 1);
1024  }
1025  // For unit stride fault-only-first segment load with mask
1026  // Input: (maskedoff, pointer, mask, vl, policy)
1027  // Output: (data, vl)
1028  // NOTE: We model this with default memory properties since we model writing
1029  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
1030  class RISCVUSSegLoadFFMasked<int nf>
1031        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1032                                !add(nf, -1)), [llvm_anyint_ty]),
1033                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1034                     [llvm_ptr_ty,
1035                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1036                      LLVMMatchType<1>, LLVMMatchType<1>]),
1037                    [ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>]>,
1038                    RISCVVIntrinsic {
1039    let VLOperand = !add(nf, 2);
1040  }
1041
1042  // For stride segment load
1043  // Input: (passthru, pointer, offset, vl)
1044  class RISCVSSegLoad<int nf>
1045        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1046                                !add(nf, -1))),
1047                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1048                    [llvm_ptr_ty, llvm_anyint_ty, LLVMMatchType<1>]),
1049                    [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic {
1050    let VLOperand = !add(nf, 2);
1051  }
1052  // For stride segment load with mask
1053  // Input: (maskedoff, pointer, offset, mask, vl, policy)
1054  class RISCVSSegLoadMasked<int nf>
1055        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1056                                !add(nf, -1))),
1057                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1058                                [llvm_ptr_ty,
1059                                 llvm_anyint_ty,
1060                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1061                                 LLVMMatchType<1>, LLVMMatchType<1>]),
1062                    [ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
1063                    RISCVVIntrinsic {
1064    let VLOperand = !add(nf, 3);
1065  }
1066
1067  // For indexed segment load
1068  // Input: (passthru, pointer, index, vl)
1069  class RISCVISegLoad<int nf>
1070        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1071                                !add(nf, -1))),
1072                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1073                    [llvm_ptr_ty, llvm_anyvector_ty, llvm_anyint_ty]),
1074                    [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic {
1075    let VLOperand = !add(nf, 2);
1076  }
1077  // For indexed segment load with mask
1078  // Input: (maskedoff, pointer, index, mask, vl, policy)
1079  class RISCVISegLoadMasked<int nf>
1080        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1081                                !add(nf, -1))),
1082                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1083                                [llvm_ptr_ty,
1084                                 llvm_anyvector_ty,
1085                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1086                                 llvm_anyint_ty, LLVMMatchType<2>]),
1087                    [ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
1088                    RISCVVIntrinsic {
1089    let VLOperand = !add(nf, 3);
1090  }
1091
1092  // For unit stride segment store
1093  // Input: (value, pointer, vl)
1094  class RISCVUSSegStore<int nf>
1095        : DefaultAttrsIntrinsic<[],
1096                    !listconcat([llvm_anyvector_ty],
1097                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1098                                [llvm_ptr_ty, llvm_anyint_ty]),
1099                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
1100    let VLOperand = !add(nf, 1);
1101  }
1102  // For unit stride segment store with mask
1103  // Input: (value, pointer, mask, vl)
1104  class RISCVUSSegStoreMasked<int nf>
1105        : DefaultAttrsIntrinsic<[],
1106                    !listconcat([llvm_anyvector_ty],
1107                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1108                                [llvm_ptr_ty,
1109                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1110                                 llvm_anyint_ty]),
1111                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
1112    let VLOperand = !add(nf, 2);
1113  }
1114
1115  // For stride segment store
1116  // Input: (value, pointer, offset, vl)
1117  class RISCVSSegStore<int nf>
1118        : DefaultAttrsIntrinsic<[],
1119                    !listconcat([llvm_anyvector_ty],
1120                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1121                                [llvm_ptr_ty, llvm_anyint_ty,
1122                                 LLVMMatchType<1>]),
1123                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
1124    let VLOperand = !add(nf, 2);
1125  }
1126  // For stride segment store with mask
1127  // Input: (value, pointer, offset, mask, vl)
1128  class RISCVSSegStoreMasked<int nf>
1129        : DefaultAttrsIntrinsic<[],
1130                    !listconcat([llvm_anyvector_ty],
1131                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1132                                [llvm_ptr_ty, llvm_anyint_ty,
1133                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1134                                 LLVMMatchType<1>]),
1135                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
1136    let VLOperand = !add(nf, 3);
1137  }
1138
1139  // For indexed segment store
1140  // Input: (value, pointer, offset, vl)
1141  class RISCVISegStore<int nf>
1142        : DefaultAttrsIntrinsic<[],
1143                    !listconcat([llvm_anyvector_ty],
1144                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1145                                [llvm_ptr_ty, llvm_anyvector_ty,
1146                                 llvm_anyint_ty]),
1147                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
1148    let VLOperand = !add(nf, 2);
1149  }
1150  // For indexed segment store with mask
1151  // Input: (value, pointer, offset, mask, vl)
1152  class RISCVISegStoreMasked<int nf>
1153        : DefaultAttrsIntrinsic<[],
1154                    !listconcat([llvm_anyvector_ty],
1155                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1156                                [llvm_ptr_ty, llvm_anyvector_ty,
1157                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1158                                 llvm_anyint_ty]),
1159                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
1160    let VLOperand = !add(nf, 3);
1161  }
1162
1163  multiclass RISCVUSLoad {
1164    def "int_riscv_" # NAME : RISCVUSLoad;
1165    def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMasked;
1166  }
1167  multiclass RISCVUSLoadFF {
1168    def "int_riscv_" # NAME : RISCVUSLoadFF;
1169    def "int_riscv_" # NAME # "_mask" : RISCVUSLoadFFMasked;
1170  }
1171  multiclass RISCVSLoad {
1172    def "int_riscv_" # NAME : RISCVSLoad;
1173    def "int_riscv_" # NAME # "_mask" : RISCVSLoadMasked;
1174  }
1175  multiclass RISCVILoad {
1176    def "int_riscv_" # NAME : RISCVILoad;
1177    def "int_riscv_" # NAME # "_mask" : RISCVILoadMasked;
1178  }
1179  multiclass RISCVUSStore {
1180    def "int_riscv_" # NAME : RISCVUSStore;
1181    def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMasked;
1182  }
1183  multiclass RISCVSStore {
1184    def "int_riscv_" # NAME : RISCVSStore;
1185    def "int_riscv_" # NAME # "_mask" : RISCVSStoreMasked;
1186  }
1187
1188  multiclass RISCVIStore {
1189    def "int_riscv_" # NAME : RISCVIStore;
1190    def "int_riscv_" # NAME # "_mask" : RISCVIStoreMasked;
1191  }
1192  multiclass RISCVUnaryAA {
1193    def "int_riscv_" # NAME : RISCVUnaryAAUnMasked;
1194    def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMasked;
1195  }
1196  multiclass RISCVUnaryAARoundingMode {
1197    def "int_riscv_" # NAME : RISCVUnaryAAUnMaskedRoundingMode;
1198    def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMaskedRoundingMode;
1199  }
1200  multiclass RISCVUnaryAB {
1201    def "int_riscv_" # NAME : RISCVUnaryABUnMasked;
1202    def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMasked;
1203  }
1204  // AAX means the destination type(A) is the same as the first source
1205  // type(A). X means any type for the second source operand.
1206  multiclass RISCVBinaryAAX {
1207    def "int_riscv_" # NAME : RISCVBinaryAAXUnMasked;
1208    def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMasked;
1209  }
1210  multiclass RISCVBinaryAAXRoundingMode {
1211    def "int_riscv_" # NAME : RISCVBinaryAAXUnMaskedRoundingMode;
1212    def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMaskedRoundingMode;
1213  }
1214  // Like RISCVBinaryAAX, but the second operand is used a shift amount so it
1215  // must be a vector or an XLen scalar.
1216  multiclass RISCVBinaryAAShift {
1217    def "int_riscv_" # NAME : RISCVBinaryAAShiftUnMasked;
1218    def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAShiftMasked;
1219  }
1220  multiclass RISCVRGatherVV {
1221    def "int_riscv_" # NAME : RISCVRGatherVVUnMasked;
1222    def "int_riscv_" # NAME # "_mask" : RISCVRGatherVVMasked;
1223  }
1224  multiclass RISCVRGatherVX {
1225    def "int_riscv_" # NAME : RISCVGatherVXUnMasked;
1226    def "int_riscv_" # NAME # "_mask" : RISCVGatherVXMasked;
1227  }
1228  multiclass RISCVRGatherEI16VV {
1229    def "int_riscv_" # NAME : RISCVRGatherEI16VVUnMasked;
1230    def "int_riscv_" # NAME # "_mask" : RISCVRGatherEI16VVMasked;
1231  }
1232  // ABX means the destination type(A) is different from the first source
1233  // type(B). X means any type for the second source operand.
1234  multiclass RISCVBinaryABX {
1235    def "int_riscv_" # NAME : RISCVBinaryABXUnMasked;
1236    def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMasked;
1237  }
1238  multiclass RISCVBinaryABXRoundingMode {
1239    def "int_riscv_" # NAME : RISCVBinaryABXUnMaskedRoundingMode;
1240    def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMaskedRoundingMode;
1241  }
1242  // Like RISCVBinaryABX, but the second operand is used a shift amount so it
1243  // must be a vector or an XLen scalar.
1244  multiclass RISCVBinaryABShift {
1245    def "int_riscv_" # NAME : RISCVBinaryABShiftUnMasked;
1246    def "int_riscv_" # NAME # "_mask" : RISCVBinaryABShiftMasked;
1247  }
1248  multiclass RISCVBinaryWithV0 {
1249    def "int_riscv_" # NAME : RISCVBinaryWithV0;
1250  }
1251  multiclass RISCVBinaryMaskOutWithV0 {
1252    def "int_riscv_" # NAME : RISCVBinaryMOutWithV0;
1253  }
1254  multiclass RISCVBinaryMaskOut {
1255    def "int_riscv_" # NAME : RISCVBinaryMOut;
1256  }
1257  multiclass RISCVSaturatingBinaryAAX {
1258    def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMasked;
1259    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMasked;
1260  }
1261  multiclass RISCVSaturatingBinaryAAXRoundingMode {
1262    def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMaskedRoundingMode;
1263    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMaskedRoundingMode;
1264  }
1265  multiclass RISCVSaturatingBinaryAAShiftRoundingMode {
1266    def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftUnMaskedRoundingMode;
1267    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMaskedRoundingMode;
1268  }
1269  multiclass RISCVSaturatingBinaryABShiftRoundingMode {
1270    def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftUnMaskedRoundingMode;
1271    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMaskedRoundingMode;
1272  }
1273  multiclass RVVSlide {
1274    def "int_riscv_" # NAME : RVVSlideUnMasked;
1275    def "int_riscv_" # NAME # "_mask" : RVVSlideMasked;
1276  }
1277  multiclass RISCVTernaryAAXA {
1278    def "int_riscv_" # NAME : RISCVTernaryAAXAUnMasked;
1279    def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMasked;
1280  }
1281  multiclass RISCVTernaryAAXARoundingMode {
1282    def "int_riscv_" # NAME : RISCVTernaryAAXAUnMaskedRoundingMode;
1283    def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMaskedRoundingMode;
1284  }
1285  multiclass RISCVCompare {
1286    def "int_riscv_" # NAME : RISCVCompareUnMasked;
1287    def "int_riscv_" # NAME # "_mask" : RISCVCompareMasked;
1288  }
1289  multiclass RISCVClassify {
1290    def "int_riscv_" # NAME : RISCVClassifyUnMasked;
1291    def "int_riscv_" # NAME # "_mask" : RISCVClassifyMasked;
1292  }
1293  multiclass RISCVTernaryWide {
1294    def "int_riscv_" # NAME : RISCVTernaryWideUnMasked;
1295    def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMasked;
1296  }
1297  multiclass RISCVTernaryWideRoundingMode {
1298    def "int_riscv_" # NAME : RISCVTernaryWideUnMaskedRoundingMode;
1299    def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMaskedRoundingMode;
1300  }
1301  multiclass RISCVReduction {
1302    def "int_riscv_" # NAME : RISCVReductionUnMasked;
1303    def "int_riscv_" # NAME # "_mask" : RISCVReductionMasked;
1304  }
1305  multiclass RISCVReductionRoundingMode {
1306    def "int_riscv_" # NAME : RISCVReductionUnMaskedRoundingMode;
1307    def "int_riscv_" # NAME # "_mask" : RISCVReductionMaskedRoundingMode;
1308  }
1309  multiclass RISCVMaskedUnarySOut {
1310    def "int_riscv_" # NAME : RISCVMaskedUnarySOutUnMasked;
1311    def "int_riscv_" # NAME # "_mask" : RISCVMaskedUnarySOutMasked;
1312  }
1313  multiclass RISCVMaskedUnaryMOut {
1314    def "int_riscv_" # NAME : RISCVUnaryUnMasked;
1315    def "int_riscv_" # NAME # "_mask" : RISCVMaskedUnaryMOutMasked;
1316  }
1317  multiclass RISCVConversion {
1318    def "int_riscv_" #NAME :RISCVConversionUnMasked;
1319    def "int_riscv_" # NAME # "_mask" : RISCVConversionMasked;
1320  }
1321  multiclass RISCVConversionRoundingMode {
1322    def "int_riscv_" #NAME :RISCVConversionUnMaskedRoundingMode;
1323    def "int_riscv_" # NAME # "_mask" : RISCVConversionMaskedRoundingMode;
1324  }
1325  multiclass RISCVUSSegLoad<int nf> {
1326    def "int_riscv_" # NAME : RISCVUSSegLoad<nf>;
1327    def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMasked<nf>;
1328  }
1329  multiclass RISCVUSSegLoadFF<int nf> {
1330    def "int_riscv_" # NAME : RISCVUSSegLoadFF<nf>;
1331    def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadFFMasked<nf>;
1332  }
1333  multiclass RISCVSSegLoad<int nf> {
1334    def "int_riscv_" # NAME : RISCVSSegLoad<nf>;
1335    def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMasked<nf>;
1336  }
1337  multiclass RISCVISegLoad<int nf> {
1338    def "int_riscv_" # NAME : RISCVISegLoad<nf>;
1339    def "int_riscv_" # NAME # "_mask" : RISCVISegLoadMasked<nf>;
1340  }
1341  multiclass RISCVUSSegStore<int nf> {
1342    def "int_riscv_" # NAME : RISCVUSSegStore<nf>;
1343    def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMasked<nf>;
1344  }
1345  multiclass RISCVSSegStore<int nf> {
1346    def "int_riscv_" # NAME : RISCVSSegStore<nf>;
1347    def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMasked<nf>;
1348  }
1349  multiclass RISCVISegStore<int nf> {
1350    def "int_riscv_" # NAME : RISCVISegStore<nf>;
1351    def "int_riscv_" # NAME # "_mask" : RISCVISegStoreMasked<nf>;
1352  }
1353
1354  defm vle : RISCVUSLoad;
1355  defm vleff : RISCVUSLoadFF;
1356  defm vse : RISCVUSStore;
1357  defm vlse: RISCVSLoad;
1358  defm vsse: RISCVSStore;
1359  defm vluxei : RISCVILoad;
1360  defm vloxei : RISCVILoad;
1361  defm vsoxei : RISCVIStore;
1362  defm vsuxei : RISCVIStore;
1363
1364  def int_riscv_vlm : RISCVUSMLoad;
1365  def int_riscv_vsm : RISCVUSStore;
1366
1367  defm vadd : RISCVBinaryAAX;
1368  defm vsub : RISCVBinaryAAX;
1369  defm vrsub : RISCVBinaryAAX;
1370
1371  defm vwaddu : RISCVBinaryABX;
1372  defm vwadd : RISCVBinaryABX;
1373  defm vwaddu_w : RISCVBinaryAAX;
1374  defm vwadd_w : RISCVBinaryAAX;
1375  defm vwsubu : RISCVBinaryABX;
1376  defm vwsub : RISCVBinaryABX;
1377  defm vwsubu_w : RISCVBinaryAAX;
1378  defm vwsub_w : RISCVBinaryAAX;
1379
1380  defm vzext : RISCVUnaryAB;
1381  defm vsext : RISCVUnaryAB;
1382
1383  defm vadc : RISCVBinaryWithV0;
1384  defm vmadc_carry_in : RISCVBinaryMaskOutWithV0;
1385  defm vmadc : RISCVBinaryMaskOut;
1386
1387  defm vsbc : RISCVBinaryWithV0;
1388  defm vmsbc_borrow_in : RISCVBinaryMaskOutWithV0;
1389  defm vmsbc : RISCVBinaryMaskOut;
1390
1391  defm vand : RISCVBinaryAAX;
1392  defm vor : RISCVBinaryAAX;
1393  defm vxor : RISCVBinaryAAX;
1394
1395  defm vsll : RISCVBinaryAAShift;
1396  defm vsrl : RISCVBinaryAAShift;
1397  defm vsra : RISCVBinaryAAShift;
1398
1399  defm vnsrl : RISCVBinaryABShift;
1400  defm vnsra : RISCVBinaryABShift;
1401
1402  defm vmseq : RISCVCompare;
1403  defm vmsne : RISCVCompare;
1404  defm vmsltu : RISCVCompare;
1405  defm vmslt : RISCVCompare;
1406  defm vmsleu : RISCVCompare;
1407  defm vmsle : RISCVCompare;
1408  defm vmsgtu : RISCVCompare;
1409  defm vmsgt : RISCVCompare;
1410  defm vmsgeu : RISCVCompare;
1411  defm vmsge : RISCVCompare;
1412
1413  defm vminu : RISCVBinaryAAX;
1414  defm vmin : RISCVBinaryAAX;
1415  defm vmaxu : RISCVBinaryAAX;
1416  defm vmax : RISCVBinaryAAX;
1417
1418  defm vmul : RISCVBinaryAAX;
1419  defm vmulh : RISCVBinaryAAX;
1420  defm vmulhu : RISCVBinaryAAX;
1421  defm vmulhsu : RISCVBinaryAAX;
1422
1423  defm vdivu : RISCVBinaryAAX;
1424  defm vdiv : RISCVBinaryAAX;
1425  defm vremu : RISCVBinaryAAX;
1426  defm vrem : RISCVBinaryAAX;
1427
1428  defm vwmul : RISCVBinaryABX;
1429  defm vwmulu : RISCVBinaryABX;
1430  defm vwmulsu : RISCVBinaryABX;
1431
1432  defm vmacc : RISCVTernaryAAXA;
1433  defm vnmsac : RISCVTernaryAAXA;
1434  defm vmadd : RISCVTernaryAAXA;
1435  defm vnmsub : RISCVTernaryAAXA;
1436
1437  defm vwmaccu  : RISCVTernaryWide;
1438  defm vwmacc   : RISCVTernaryWide;
1439  defm vwmaccus : RISCVTernaryWide;
1440  defm vwmaccsu : RISCVTernaryWide;
1441
1442  defm vfadd : RISCVBinaryAAXRoundingMode;
1443  defm vfsub : RISCVBinaryAAXRoundingMode;
1444  defm vfrsub : RISCVBinaryAAXRoundingMode;
1445
1446  defm vfwadd : RISCVBinaryABXRoundingMode;
1447  defm vfwsub : RISCVBinaryABXRoundingMode;
1448  defm vfwadd_w : RISCVBinaryAAXRoundingMode;
1449  defm vfwsub_w : RISCVBinaryAAXRoundingMode;
1450
1451  defm vsaddu : RISCVSaturatingBinaryAAX;
1452  defm vsadd : RISCVSaturatingBinaryAAX;
1453  defm vssubu : RISCVSaturatingBinaryAAX;
1454  defm vssub : RISCVSaturatingBinaryAAX;
1455
1456  defm vmerge : RISCVBinaryWithV0;
1457
1458  // Output: (vector)
1459  // Input: (passthru, vector_in, vl)
1460  def int_riscv_vmv_v_v : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1461                                                [LLVMMatchType<0>,
1462                                                 LLVMMatchType<0>,
1463                                                 llvm_anyint_ty],
1464                                                [IntrNoMem]>, RISCVVIntrinsic {
1465    let VLOperand = 2;
1466  }
1467  // Output: (vector)
1468  // Input: (passthru, scalar, vl)
1469  def int_riscv_vmv_v_x : DefaultAttrsIntrinsic<[llvm_anyint_ty],
1470                                                 [LLVMMatchType<0>,
1471                                                  LLVMVectorElementType<0>,
1472                                                  llvm_anyint_ty],
1473                                                 [IntrNoMem]>, RISCVVIntrinsic {
1474    let VLOperand = 2;
1475  }
1476  // Output: (vector)
1477  // Input: (passthru, scalar, vl)
1478  def int_riscv_vfmv_v_f : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
1479                                                 [LLVMMatchType<0>,
1480                                                  LLVMVectorElementType<0>,
1481                                                  llvm_anyint_ty],
1482                                                 [IntrNoMem]>, RISCVVIntrinsic {
1483    let VLOperand = 2;
1484  }
1485
1486  def int_riscv_vmv_x_s : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
1487                                                [llvm_anyint_ty],
1488                                                [IntrNoMem]>, RISCVVIntrinsic;
1489  def int_riscv_vmv_s_x : DefaultAttrsIntrinsic<[llvm_anyint_ty],
1490                                                [LLVMMatchType<0>,
1491                                                 LLVMVectorElementType<0>,
1492                                                 llvm_anyint_ty],
1493                                                [IntrNoMem]>, RISCVVIntrinsic {
1494    let VLOperand = 2;
1495  }
1496
1497  def int_riscv_vfmv_f_s : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
1498                                                 [llvm_anyfloat_ty],
1499                                                 [IntrNoMem]>, RISCVVIntrinsic;
1500  def int_riscv_vfmv_s_f : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
1501                                                 [LLVMMatchType<0>,
1502                                                  LLVMVectorElementType<0>,
1503                                                  llvm_anyint_ty],
1504                                                 [IntrNoMem]>, RISCVVIntrinsic {
1505    let VLOperand = 2;
1506  }
1507
1508  defm vfmul : RISCVBinaryAAXRoundingMode;
1509  defm vfdiv : RISCVBinaryAAXRoundingMode;
1510  defm vfrdiv : RISCVBinaryAAXRoundingMode;
1511
1512  defm vfwmul : RISCVBinaryABXRoundingMode;
1513
1514  defm vfmacc : RISCVTernaryAAXARoundingMode;
1515  defm vfnmacc : RISCVTernaryAAXARoundingMode;
1516  defm vfmsac : RISCVTernaryAAXARoundingMode;
1517  defm vfnmsac : RISCVTernaryAAXARoundingMode;
1518  defm vfmadd : RISCVTernaryAAXARoundingMode;
1519  defm vfnmadd : RISCVTernaryAAXARoundingMode;
1520  defm vfmsub : RISCVTernaryAAXARoundingMode;
1521  defm vfnmsub : RISCVTernaryAAXARoundingMode;
1522
1523  defm vfwmacc : RISCVTernaryWideRoundingMode;
1524  defm vfwmaccbf16 : RISCVTernaryWideRoundingMode;
1525  defm vfwnmacc : RISCVTernaryWideRoundingMode;
1526  defm vfwmsac : RISCVTernaryWideRoundingMode;
1527  defm vfwnmsac : RISCVTernaryWideRoundingMode;
1528
1529  defm vfsqrt : RISCVUnaryAARoundingMode;
1530  defm vfrsqrt7 : RISCVUnaryAA;
1531  defm vfrec7 : RISCVUnaryAARoundingMode;
1532
1533  defm vfmin : RISCVBinaryAAX;
1534  defm vfmax : RISCVBinaryAAX;
1535
1536  defm vfsgnj : RISCVBinaryAAX;
1537  defm vfsgnjn : RISCVBinaryAAX;
1538  defm vfsgnjx : RISCVBinaryAAX;
1539
1540  defm vfclass : RISCVClassify;
1541
1542  defm vfmerge : RISCVBinaryWithV0;
1543
1544  defm vslideup : RVVSlide;
1545  defm vslidedown : RVVSlide;
1546
1547  defm vslide1up : RISCVBinaryAAX;
1548  defm vslide1down : RISCVBinaryAAX;
1549  defm vfslide1up : RISCVBinaryAAX;
1550  defm vfslide1down : RISCVBinaryAAX;
1551
1552  defm vrgather_vv : RISCVRGatherVV;
1553  defm vrgather_vx : RISCVRGatherVX;
1554  defm vrgatherei16_vv : RISCVRGatherEI16VV;
1555
1556  def "int_riscv_vcompress" : RISCVCompress;
1557
1558  defm vaaddu : RISCVSaturatingBinaryAAXRoundingMode;
1559  defm vaadd : RISCVSaturatingBinaryAAXRoundingMode;
1560  defm vasubu : RISCVSaturatingBinaryAAXRoundingMode;
1561  defm vasub : RISCVSaturatingBinaryAAXRoundingMode;
1562
1563  defm vsmul : RISCVSaturatingBinaryAAXRoundingMode;
1564
1565  defm vssrl : RISCVSaturatingBinaryAAShiftRoundingMode;
1566  defm vssra : RISCVSaturatingBinaryAAShiftRoundingMode;
1567
1568  defm vnclipu : RISCVSaturatingBinaryABShiftRoundingMode;
1569  defm vnclip : RISCVSaturatingBinaryABShiftRoundingMode;
1570
1571  defm vmfeq : RISCVCompare;
1572  defm vmfne : RISCVCompare;
1573  defm vmflt : RISCVCompare;
1574  defm vmfle : RISCVCompare;
1575  defm vmfgt : RISCVCompare;
1576  defm vmfge : RISCVCompare;
1577
1578  defm vredsum : RISCVReduction;
1579  defm vredand : RISCVReduction;
1580  defm vredor : RISCVReduction;
1581  defm vredxor : RISCVReduction;
1582  defm vredminu : RISCVReduction;
1583  defm vredmin : RISCVReduction;
1584  defm vredmaxu : RISCVReduction;
1585  defm vredmax : RISCVReduction;
1586
1587  defm vwredsumu : RISCVReduction;
1588  defm vwredsum : RISCVReduction;
1589
1590  defm vfredosum : RISCVReductionRoundingMode;
1591  defm vfredusum : RISCVReductionRoundingMode;
1592  defm vfredmin : RISCVReduction;
1593  defm vfredmax : RISCVReduction;
1594
1595  defm vfwredusum : RISCVReductionRoundingMode;
1596  defm vfwredosum : RISCVReductionRoundingMode;
1597
1598  def int_riscv_vmand: RISCVBinaryAAAUnMasked;
1599  def int_riscv_vmnand: RISCVBinaryAAAUnMasked;
1600  def int_riscv_vmandn: RISCVBinaryAAAUnMasked;
1601  def int_riscv_vmxor: RISCVBinaryAAAUnMasked;
1602  def int_riscv_vmor: RISCVBinaryAAAUnMasked;
1603  def int_riscv_vmnor: RISCVBinaryAAAUnMasked;
1604  def int_riscv_vmorn: RISCVBinaryAAAUnMasked;
1605  def int_riscv_vmxnor: RISCVBinaryAAAUnMasked;
1606  def int_riscv_vmclr : RISCVNullaryIntrinsic;
1607  def int_riscv_vmset : RISCVNullaryIntrinsic;
1608
1609  defm vcpop : RISCVMaskedUnarySOut;
1610  defm vfirst : RISCVMaskedUnarySOut;
1611  defm vmsbf : RISCVMaskedUnaryMOut;
1612  defm vmsof : RISCVMaskedUnaryMOut;
1613  defm vmsif : RISCVMaskedUnaryMOut;
1614
1615  defm vfcvt_xu_f_v : RISCVConversionRoundingMode;
1616  defm vfcvt_x_f_v : RISCVConversionRoundingMode;
1617  defm vfcvt_rtz_xu_f_v : RISCVConversion;
1618  defm vfcvt_rtz_x_f_v : RISCVConversion;
1619  defm vfcvt_f_xu_v : RISCVConversionRoundingMode;
1620  defm vfcvt_f_x_v : RISCVConversionRoundingMode;
1621
1622  defm vfwcvt_f_xu_v : RISCVConversion;
1623  defm vfwcvt_f_x_v : RISCVConversion;
1624  defm vfwcvt_xu_f_v : RISCVConversionRoundingMode;
1625  defm vfwcvt_x_f_v : RISCVConversionRoundingMode;
1626  defm vfwcvt_rtz_xu_f_v : RISCVConversion;
1627  defm vfwcvt_rtz_x_f_v : RISCVConversion;
1628  defm vfwcvt_f_f_v : RISCVConversion;
1629  defm vfwcvtbf16_f_f_v : RISCVConversion;
1630
1631  defm vfncvt_f_xu_w : RISCVConversionRoundingMode;
1632  defm vfncvt_f_x_w : RISCVConversionRoundingMode;
1633  defm vfncvt_xu_f_w : RISCVConversionRoundingMode;
1634  defm vfncvt_x_f_w : RISCVConversionRoundingMode;
1635  defm vfncvt_rtz_xu_f_w : RISCVConversion;
1636  defm vfncvt_rtz_x_f_w : RISCVConversion;
1637  defm vfncvt_f_f_w : RISCVConversionRoundingMode;
1638  defm vfncvtbf16_f_f_w : RISCVConversionRoundingMode;
1639  defm vfncvt_rod_f_f_w : RISCVConversion;
1640
1641  // Output: (vector)
1642  // Input: (passthru, mask type input, vl)
1643  def int_riscv_viota
1644        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1645                                [LLVMMatchType<0>,
1646                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1647                                 llvm_anyint_ty],
1648                                [IntrNoMem]>, RISCVVIntrinsic {
1649    let VLOperand = 2;
1650  }
1651  // Output: (vector)
1652  // Input: (maskedoff, mask type vector_in, mask, vl, policy)
1653  def int_riscv_viota_mask
1654        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1655                                [LLVMMatchType<0>,
1656                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1657                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1658                                 llvm_anyint_ty, LLVMMatchType<1>],
1659                                [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
1660    let VLOperand = 3;
1661  }
1662  // Output: (vector)
1663  // Input: (passthru, vl)
1664  def int_riscv_vid : RISCVID;
1665
1666  // Output: (vector)
1667  // Input: (maskedoff, mask, vl, policy)
1668  def int_riscv_vid_mask
1669        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1670                                [LLVMMatchType<0>,
1671                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1672                                 llvm_anyint_ty, LLVMMatchType<1>],
1673                                [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
1674    let VLOperand = 2;
1675  }
1676
1677  foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
1678    defm vlseg # nf : RISCVUSSegLoad<nf>;
1679    defm vlseg # nf # ff : RISCVUSSegLoadFF<nf>;
1680    defm vlsseg # nf : RISCVSSegLoad<nf>;
1681    defm vloxseg # nf : RISCVISegLoad<nf>;
1682    defm vluxseg # nf : RISCVISegLoad<nf>;
1683    defm vsseg # nf : RISCVUSSegStore<nf>;
1684    defm vssseg # nf : RISCVSSegStore<nf>;
1685    defm vsoxseg # nf : RISCVISegStore<nf>;
1686    defm vsuxseg # nf : RISCVISegStore<nf>;
1687  }
1688
1689  // Strided loads/stores for fixed vectors.
1690  def int_riscv_masked_strided_load
1691        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1692                                [LLVMMatchType<0>, llvm_anyptr_ty,
1693                                 llvm_anyint_ty,
1694                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
1695                                [NoCapture<ArgIndex<1>>, IntrReadMem]>;
1696  def int_riscv_masked_strided_store
1697        : DefaultAttrsIntrinsic<[],
1698                                [llvm_anyvector_ty, llvm_anyptr_ty,
1699                                 llvm_anyint_ty,
1700                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
1701                                [NoCapture<ArgIndex<1>>, IntrWriteMem]>;
1702
1703  // Segment loads/stores for fixed vectors.
1704  foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
1705    def int_riscv_seg # nf # _load
1706          : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty],
1707                                              !listsplat(LLVMMatchType<0>,
1708                                              !add(nf, -1))),
1709                                  [llvm_anyptr_ty, llvm_anyint_ty],
1710                                  [NoCapture<ArgIndex<0>>, IntrReadMem]>;
1711    def int_riscv_seg # nf # _store
1712          : DefaultAttrsIntrinsic<[],
1713                                  !listconcat([llvm_anyvector_ty],
1714                                              !listsplat(LLVMMatchType<0>,
1715                                                          !add(nf, -1)),
1716                                              [llvm_anyptr_ty, llvm_anyint_ty]),
1717                                  [NoCapture<ArgIndex<nf>>, IntrWriteMem]>;
1718  }
1719
1720} // TargetPrefix = "riscv"
1721
1722//===----------------------------------------------------------------------===//
1723// Scalar Cryptography
1724//
1725// These intrinsics will lower directly into the corresponding instructions
1726// added by the scalar cyptography extension, if the extension is present.
1727
1728let TargetPrefix = "riscv" in {
1729
1730class ScalarCryptoByteSelect32
1731    : DefaultAttrsIntrinsic<[llvm_i32_ty],
1732                            [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1733                            [IntrNoMem, IntrSpeculatable,
1734                             ImmArg<ArgIndex<2>>]>;
1735
1736class ScalarCryptoGprGprIntrinsic32
1737    : DefaultAttrsIntrinsic<[llvm_i32_ty],
1738                            [llvm_i32_ty, llvm_i32_ty],
1739                            [IntrNoMem, IntrSpeculatable]>;
1740
1741class ScalarCryptoGprGprIntrinsic64
1742    : DefaultAttrsIntrinsic<[llvm_i64_ty],
1743                            [llvm_i64_ty, llvm_i64_ty],
1744                            [IntrNoMem, IntrSpeculatable]>;
1745
1746class ScalarCryptoGprIntrinsic32
1747    : DefaultAttrsIntrinsic<[llvm_i32_ty],
1748                            [llvm_i32_ty],
1749                            [IntrNoMem, IntrSpeculatable]>;
1750
1751class ScalarCryptoGprIntrinsic64
1752    : DefaultAttrsIntrinsic<[llvm_i64_ty],
1753                            [llvm_i64_ty],
1754                            [IntrNoMem, IntrSpeculatable]>;
1755
1756// Zknd
1757def int_riscv_aes32dsi  : ScalarCryptoByteSelect32,
1758                          ClangBuiltin<"__builtin_riscv_aes32dsi">;
1759def int_riscv_aes32dsmi : ScalarCryptoByteSelect32,
1760                          ClangBuiltin<"__builtin_riscv_aes32dsmi">;
1761
1762def int_riscv_aes64ds   : ScalarCryptoGprGprIntrinsic64,
1763                          ClangBuiltin<"__builtin_riscv_aes64ds">;
1764def int_riscv_aes64dsm  : ScalarCryptoGprGprIntrinsic64,
1765                          ClangBuiltin<"__builtin_riscv_aes64dsm">;
1766
1767def int_riscv_aes64im   : ScalarCryptoGprIntrinsic64,
1768                          ClangBuiltin<"__builtin_riscv_aes64im">;
1769
1770// Zkne
1771def int_riscv_aes32esi  : ScalarCryptoByteSelect32,
1772                          ClangBuiltin<"__builtin_riscv_aes32esi">;
1773def int_riscv_aes32esmi : ScalarCryptoByteSelect32,
1774                          ClangBuiltin<"__builtin_riscv_aes32esmi">;
1775
1776def int_riscv_aes64es   : ScalarCryptoGprGprIntrinsic64,
1777                          ClangBuiltin<"__builtin_riscv_aes64es">;
1778def int_riscv_aes64esm  : ScalarCryptoGprGprIntrinsic64,
1779                          ClangBuiltin<"__builtin_riscv_aes64esm">;
1780
1781// Zknd & Zkne
1782def int_riscv_aes64ks2  : ScalarCryptoGprGprIntrinsic64,
1783                          ClangBuiltin<"__builtin_riscv_aes64ks2">;
1784def int_riscv_aes64ks1i : DefaultAttrsIntrinsic<[llvm_i64_ty],
1785                                                [llvm_i64_ty, llvm_i32_ty],
1786                                                [IntrNoMem, IntrSpeculatable,
1787                                                 ImmArg<ArgIndex<1>>]>,
1788                          ClangBuiltin<"__builtin_riscv_aes64ks1i">;
1789
1790// Zknh
1791def int_riscv_sha256sig0 : ScalarCryptoGprIntrinsic32;
1792def int_riscv_sha256sig1 : ScalarCryptoGprIntrinsic32;
1793def int_riscv_sha256sum0 : ScalarCryptoGprIntrinsic32;
1794def int_riscv_sha256sum1 : ScalarCryptoGprIntrinsic32;
1795
1796def int_riscv_sha512sig0l : ScalarCryptoGprGprIntrinsic32,
1797                            ClangBuiltin<"__builtin_riscv_sha512sig0l">;
1798def int_riscv_sha512sig0h : ScalarCryptoGprGprIntrinsic32,
1799                            ClangBuiltin<"__builtin_riscv_sha512sig0h">;
1800def int_riscv_sha512sig1l : ScalarCryptoGprGprIntrinsic32,
1801                            ClangBuiltin<"__builtin_riscv_sha512sig1l">;
1802def int_riscv_sha512sig1h : ScalarCryptoGprGprIntrinsic32,
1803                            ClangBuiltin<"__builtin_riscv_sha512sig1h">;
1804def int_riscv_sha512sum0r : ScalarCryptoGprGprIntrinsic32,
1805                            ClangBuiltin<"__builtin_riscv_sha512sum0r">;
1806def int_riscv_sha512sum1r : ScalarCryptoGprGprIntrinsic32,
1807                            ClangBuiltin<"__builtin_riscv_sha512sum1r">;
1808
1809def int_riscv_sha512sig0 : ScalarCryptoGprIntrinsic64,
1810                           ClangBuiltin<"__builtin_riscv_sha512sig0">;
1811def int_riscv_sha512sig1 : ScalarCryptoGprIntrinsic64,
1812                           ClangBuiltin<"__builtin_riscv_sha512sig1">;
1813def int_riscv_sha512sum0 : ScalarCryptoGprIntrinsic64,
1814                           ClangBuiltin<"__builtin_riscv_sha512sum0">;
1815def int_riscv_sha512sum1 : ScalarCryptoGprIntrinsic64,
1816                           ClangBuiltin<"__builtin_riscv_sha512sum1">;
1817
1818// Zksed
1819def int_riscv_sm4ks      : ScalarCryptoByteSelect32;
1820def int_riscv_sm4ed      : ScalarCryptoByteSelect32;
1821
1822// Zksh
1823def int_riscv_sm3p0      : ScalarCryptoGprIntrinsic32;
1824def int_riscv_sm3p1      : ScalarCryptoGprIntrinsic32;
1825} // TargetPrefix = "riscv"
1826
1827//===----------------------------------------------------------------------===//
1828// Vector Cryptography
1829//
1830// These intrinsics will lower directly into the corresponding instructions
1831// added by the vector cyptography extension, if the extension is present.
1832let TargetPrefix = "riscv" in {
1833  // Zvkb
1834  defm vandn             : RISCVBinaryAAX;
1835  defm vbrev8            : RISCVUnaryAA;
1836  defm vrev8             : RISCVUnaryAA;
1837  defm vrol              : RISCVBinaryAAX;
1838  defm vror              : RISCVBinaryAAX;
1839
1840  // Zvbb
1841  defm vbrev             : RISCVUnaryAA;
1842  defm vclz              : RISCVUnaryAA;
1843  defm vctz              : RISCVUnaryAA;
1844  defm vcpopv            : RISCVUnaryAA;
1845  defm vwsll             : RISCVBinaryABX;
1846
1847  // Zvbc
1848  defm vclmul            : RISCVBinaryAAX;
1849  defm vclmulh           : RISCVBinaryAAX;
1850
1851  // Zvkg
1852  def int_riscv_vghsh    : RISCVBinaryAAXUnMaskedZvk;
1853  def int_riscv_vgmul_vv : RISCVUnaryAAUnMaskedZvk<IsVS=0>;
1854
1855  // Zvkned
1856  defm vaesdf            : RISCVUnaryAAUnMaskedZvk;
1857  defm vaesdm            : RISCVUnaryAAUnMaskedZvk;
1858  defm vaesef            : RISCVUnaryAAUnMaskedZvk;
1859  defm vaesem            : RISCVUnaryAAUnMaskedZvk;
1860  def int_riscv_vaeskf1  : RISCVBinaryAAXUnMasked<IsVI=1>;
1861  def int_riscv_vaeskf2  : RISCVBinaryAAXUnMaskedZvk<IsVI=1>;
1862  defm vaesz             : RISCVUnaryAAUnMaskedZvk<HasVV=0>;
1863
1864  // Zvknha or Zvknhb
1865  def int_riscv_vsha2ch  : RISCVBinaryAAXUnMaskedZvk;
1866  def int_riscv_vsha2cl  : RISCVBinaryAAXUnMaskedZvk;
1867  def int_riscv_vsha2ms  : RISCVBinaryAAXUnMaskedZvk;
1868
1869  // Zvksed
1870  def int_riscv_vsm4k    : RISCVBinaryAAXUnMasked<IsVI=1>;
1871  defm vsm4r             : RISCVUnaryAAUnMaskedZvk;
1872
1873  // Zvksh
1874  def int_riscv_vsm3c    : RISCVBinaryAAXUnMaskedZvk<IsVI=1>;
1875  def int_riscv_vsm3me   : RISCVBinaryAAXUnMasked;
1876} // TargetPrefix = "riscv"
1877
1878// Vendor extensions
1879//===----------------------------------------------------------------------===//
1880include "llvm/IR/IntrinsicsRISCVXTHead.td"
1881include "llvm/IR/IntrinsicsRISCVXsf.td"
1882include "llvm/IR/IntrinsicsRISCVXCV.td"
1883