1 //===-- llvm/CodeGen/ISDOpcodes.h - CodeGen opcodes -------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file declares codegen opcodes and related utilities.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_CODEGEN_ISDOPCODES_H
14 #define LLVM_CODEGEN_ISDOPCODES_H
15 
16 #include "llvm/CodeGen/ValueTypes.h"
17 
18 namespace llvm {
19 
20 /// ISD namespace - This namespace contains an enum which represents all of the
21 /// SelectionDAG node types and value types.
22 ///
23 namespace ISD {
24 
25 //===--------------------------------------------------------------------===//
26 /// ISD::NodeType enum - This enum defines the target-independent operators
27 /// for a SelectionDAG.
28 ///
29 /// Targets may also define target-dependent operator codes for SDNodes. For
30 /// example, on x86, these are the enum values in the X86ISD namespace.
31 /// Targets should aim to use target-independent operators to model their
32 /// instruction sets as much as possible, and only use target-dependent
33 /// operators when they have special requirements.
34 ///
35 /// Finally, during and after selection proper, SNodes may use special
36 /// operator codes that correspond directly with MachineInstr opcodes. These
37 /// are used to represent selected instructions. See the isMachineOpcode()
38 /// and getMachineOpcode() member functions of SDNode.
39 ///
40 enum NodeType {
41 
42   /// DELETED_NODE - This is an illegal value that is used to catch
43   /// errors.  This opcode is not a legal opcode for any node.
44   DELETED_NODE,
45 
46   /// EntryToken - This is the marker used to indicate the start of a region.
47   EntryToken,
48 
49   /// TokenFactor - This node takes multiple tokens as input and produces a
50   /// single token result. This is used to represent the fact that the operand
51   /// operators are independent of each other.
52   TokenFactor,
53 
54   /// AssertSext, AssertZext - These nodes record if a register contains a
55   /// value that has already been zero or sign extended from a narrower type.
56   /// These nodes take two operands.  The first is the node that has already
57   /// been extended, and the second is a value type node indicating the width
58   /// of the extension.
59   /// NOTE: In case of the source value (or any vector element value) is
60   /// poisoned the assertion will not be true for that value.
61   AssertSext,
62   AssertZext,
63 
64   /// AssertAlign - These nodes record if a register contains a value that
65   /// has a known alignment and the trailing bits are known to be zero.
66   /// NOTE: In case of the source value (or any vector element value) is
67   /// poisoned the assertion will not be true for that value.
68   AssertAlign,
69 
70   /// Various leaf nodes.
71   BasicBlock,
72   VALUETYPE,
73   CONDCODE,
74   Register,
75   RegisterMask,
76   Constant,
77   ConstantFP,
78   GlobalAddress,
79   GlobalTLSAddress,
80   FrameIndex,
81   JumpTable,
82   ConstantPool,
83   ExternalSymbol,
84   BlockAddress,
85 
86   /// The address of the GOT
87   GLOBAL_OFFSET_TABLE,
88 
89   /// FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and
90   /// llvm.returnaddress on the DAG.  These nodes take one operand, the index
91   /// of the frame or return address to return.  An index of zero corresponds
92   /// to the current function's frame or return address, an index of one to
93   /// the parent's frame or return address, and so on.
94   FRAMEADDR,
95   RETURNADDR,
96 
97   /// ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
98   /// This node takes no operand, returns a target-specific pointer to the
99   /// place in the stack frame where the return address of the current
100   /// function is stored.
101   ADDROFRETURNADDR,
102 
103   /// SPONENTRY - Represents the llvm.sponentry intrinsic. Takes no argument
104   /// and returns the stack pointer value at the entry of the current
105   /// function calling this intrinsic.
106   SPONENTRY,
107 
108   /// LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
109   /// Materializes the offset from the local object pointer of another
110   /// function to a particular local object passed to llvm.localescape. The
111   /// operand is the MCSymbol label used to represent this offset, since
112   /// typically the offset is not known until after code generation of the
113   /// parent.
114   LOCAL_RECOVER,
115 
116   /// READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on
117   /// the DAG, which implements the named register global variables extension.
118   READ_REGISTER,
119   WRITE_REGISTER,
120 
121   /// FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to
122   /// first (possible) on-stack argument. This is needed for correct stack
123   /// adjustment during unwind.
124   FRAME_TO_ARGS_OFFSET,
125 
126   /// EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical
127   /// Frame Address (CFA), generally the value of the stack pointer at the
128   /// call site in the previous frame.
129   EH_DWARF_CFA,
130 
131   /// OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents
132   /// 'eh_return' gcc dwarf builtin, which is used to return from
133   /// exception. The general meaning is: adjust stack by OFFSET and pass
134   /// execution to HANDLER. Many platform-related details also :)
135   EH_RETURN,
136 
137   /// RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer)
138   /// This corresponds to the eh.sjlj.setjmp intrinsic.
139   /// It takes an input chain and a pointer to the jump buffer as inputs
140   /// and returns an outchain.
141   EH_SJLJ_SETJMP,
142 
143   /// OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer)
144   /// This corresponds to the eh.sjlj.longjmp intrinsic.
145   /// It takes an input chain and a pointer to the jump buffer as inputs
146   /// and returns an outchain.
147   EH_SJLJ_LONGJMP,
148 
149   /// OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN)
150   /// The target initializes the dispatch table here.
151   EH_SJLJ_SETUP_DISPATCH,
152 
153   /// TargetConstant* - Like Constant*, but the DAG does not do any folding,
154   /// simplification, or lowering of the constant. They are used for constants
155   /// which are known to fit in the immediate fields of their users, or for
156   /// carrying magic numbers which are not values which need to be
157   /// materialized in registers.
158   TargetConstant,
159   TargetConstantFP,
160 
161   /// TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or
162   /// anything else with this node, and this is valid in the target-specific
163   /// dag, turning into a GlobalAddress operand.
164   TargetGlobalAddress,
165   TargetGlobalTLSAddress,
166   TargetFrameIndex,
167   TargetJumpTable,
168   TargetConstantPool,
169   TargetExternalSymbol,
170   TargetBlockAddress,
171 
172   MCSymbol,
173 
174   /// TargetIndex - Like a constant pool entry, but with completely
175   /// target-dependent semantics. Holds target flags, a 32-bit index, and a
176   /// 64-bit index. Targets can use this however they like.
177   TargetIndex,
178 
179   /// RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...)
180   /// This node represents a target intrinsic function with no side effects.
181   /// The first operand is the ID number of the intrinsic from the
182   /// llvm::Intrinsic namespace.  The operands to the intrinsic follow.  The
183   /// node returns the result of the intrinsic.
184   INTRINSIC_WO_CHAIN,
185 
186   /// RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...)
187   /// This node represents a target intrinsic function with side effects that
188   /// returns a result.  The first operand is a chain pointer.  The second is
189   /// the ID number of the intrinsic from the llvm::Intrinsic namespace.  The
190   /// operands to the intrinsic follow.  The node has two results, the result
191   /// of the intrinsic and an output chain.
192   INTRINSIC_W_CHAIN,
193 
194   /// OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...)
195   /// This node represents a target intrinsic function with side effects that
196   /// does not return a result.  The first operand is a chain pointer.  The
197   /// second is the ID number of the intrinsic from the llvm::Intrinsic
198   /// namespace.  The operands to the intrinsic follow.
199   INTRINSIC_VOID,
200 
201   /// CopyToReg - This node has three operands: a chain, a register number to
202   /// set to this value, and a value.
203   CopyToReg,
204 
205   /// CopyFromReg - This node indicates that the input value is a virtual or
206   /// physical register that is defined outside of the scope of this
207   /// SelectionDAG.  The register is available from the RegisterSDNode object.
208   CopyFromReg,
209 
210   /// UNDEF - An undefined node.
211   UNDEF,
212 
213   // FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or
214   // is evaluated to UNDEF), or returns VAL otherwise. Note that each
215   // read of UNDEF can yield different value, but FREEZE(UNDEF) cannot.
216   FREEZE,
217 
218   /// EXTRACT_ELEMENT - This is used to get the lower or upper (determined by
219   /// a Constant, which is required to be operand #1) half of the integer or
220   /// float value specified as operand #0.  This is only for use before
221   /// legalization, for values that will be broken into multiple registers.
222   EXTRACT_ELEMENT,
223 
224   /// BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
225   /// Given two values of the same integer value type, this produces a value
226   /// twice as big.  Like EXTRACT_ELEMENT, this can only be used before
227   /// legalization. The lower part of the composite value should be in
228   /// element 0 and the upper part should be in element 1.
229   BUILD_PAIR,
230 
231   /// MERGE_VALUES - This node takes multiple discrete operands and returns
232   /// them all as its individual results.  This nodes has exactly the same
233   /// number of inputs and outputs. This node is useful for some pieces of the
234   /// code generator that want to think about a single node with multiple
235   /// results, not multiple nodes.
236   MERGE_VALUES,
237 
238   /// Simple integer binary arithmetic operators.
239   ADD,
240   SUB,
241   MUL,
242   SDIV,
243   UDIV,
244   SREM,
245   UREM,
246 
247   /// SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing
248   /// a signed/unsigned value of type i[2*N], and return the full value as
249   /// two results, each of type iN.
250   SMUL_LOHI,
251   UMUL_LOHI,
252 
253   /// SDIVREM/UDIVREM - Divide two integers and produce both a quotient and
254   /// remainder result.
255   SDIVREM,
256   UDIVREM,
257 
258   /// CARRY_FALSE - This node is used when folding other nodes,
259   /// like ADDC/SUBC, which indicate the carry result is always false.
260   CARRY_FALSE,
261 
262   /// Carry-setting nodes for multiple precision addition and subtraction.
263   /// These nodes take two operands of the same value type, and produce two
264   /// results.  The first result is the normal add or sub result, the second
265   /// result is the carry flag result.
266   /// FIXME: These nodes are deprecated in favor of UADDO_CARRY and USUBO_CARRY.
267   /// They are kept around for now to provide a smooth transition path
268   /// toward the use of UADDO_CARRY/USUBO_CARRY and will eventually be removed.
269   ADDC,
270   SUBC,
271 
272   /// Carry-using nodes for multiple precision addition and subtraction. These
273   /// nodes take three operands: The first two are the normal lhs and rhs to
274   /// the add or sub, and the third is the input carry flag.  These nodes
275   /// produce two results; the normal result of the add or sub, and the output
276   /// carry flag.  These nodes both read and write a carry flag to allow them
277   /// to them to be chained together for add and sub of arbitrarily large
278   /// values.
279   ADDE,
280   SUBE,
281 
282   /// Carry-using nodes for multiple precision addition and subtraction.
283   /// These nodes take three operands: The first two are the normal lhs and
284   /// rhs to the add or sub, and the third is a boolean value that is 1 if and
285   /// only if there is an incoming carry/borrow. These nodes produce two
286   /// results: the normal result of the add or sub, and a boolean value that is
287   /// 1 if and only if there is an outgoing carry/borrow.
288   ///
289   /// Care must be taken if these opcodes are lowered to hardware instructions
290   /// that use the inverse logic -- 0 if and only if there is an
291   /// incoming/outgoing carry/borrow.  In such cases, you must preserve the
292   /// semantics of these opcodes by inverting the incoming carry/borrow, feeding
293   /// it to the add/sub hardware instruction, and then inverting the outgoing
294   /// carry/borrow.
295   ///
296   /// The use of these opcodes is preferable to adde/sube if the target supports
297   /// it, as the carry is a regular value rather than a glue, which allows
298   /// further optimisation.
299   ///
300   /// These opcodes are different from [US]{ADD,SUB}O in that
301   /// U{ADD,SUB}O_CARRY consume and produce a carry/borrow, whereas
302   /// [US]{ADD,SUB}O produce an overflow.
303   UADDO_CARRY,
304   USUBO_CARRY,
305 
306   /// Carry-using overflow-aware nodes for multiple precision addition and
307   /// subtraction. These nodes take three operands: The first two are normal lhs
308   /// and rhs to the add or sub, and the third is a boolean indicating if there
309   /// is an incoming carry. They produce two results: the normal result of the
310   /// add or sub, and a boolean that indicates if an overflow occurred (*not*
311   /// flag, because it may be a store to memory, etc.). If the type of the
312   /// boolean is not i1 then the high bits conform to getBooleanContents.
313   SADDO_CARRY,
314   SSUBO_CARRY,
315 
316   /// RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
317   /// These nodes take two operands: the normal LHS and RHS to the add. They
318   /// produce two results: the normal result of the add, and a boolean that
319   /// indicates if an overflow occurred (*not* a flag, because it may be store
320   /// to memory, etc.).  If the type of the boolean is not i1 then the high
321   /// bits conform to getBooleanContents.
322   /// These nodes are generated from llvm.[su]add.with.overflow intrinsics.
323   SADDO,
324   UADDO,
325 
326   /// Same for subtraction.
327   SSUBO,
328   USUBO,
329 
330   /// Same for multiplication.
331   SMULO,
332   UMULO,
333 
334   /// RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2
335   /// integers with the same bit width (W). If the true value of LHS + RHS
336   /// exceeds the largest value that can be represented by W bits, the
337   /// resulting value is this maximum value. Otherwise, if this value is less
338   /// than the smallest value that can be represented by W bits, the
339   /// resulting value is this minimum value.
340   SADDSAT,
341   UADDSAT,
342 
343   /// RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2
344   /// integers with the same bit width (W). If the true value of LHS - RHS
345   /// exceeds the largest value that can be represented by W bits, the
346   /// resulting value is this maximum value. Otherwise, if this value is less
347   /// than the smallest value that can be represented by W bits, the
348   /// resulting value is this minimum value.
349   SSUBSAT,
350   USUBSAT,
351 
352   /// RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift. The first
353   /// operand is the value to be shifted, and the second argument is the amount
354   /// to shift by. Both must be integers of the same bit width (W). If the true
355   /// value of LHS << RHS exceeds the largest value that can be represented by
356   /// W bits, the resulting value is this maximum value, Otherwise, if this
357   /// value is less than the smallest value that can be represented by W bits,
358   /// the resulting value is this minimum value.
359   SSHLSAT,
360   USHLSAT,
361 
362   /// RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication
363   /// on 2 integers with the same width and scale. SCALE represents the scale
364   /// of both operands as fixed point numbers. This SCALE parameter must be a
365   /// constant integer. A scale of zero is effectively performing
366   /// multiplication on 2 integers.
367   SMULFIX,
368   UMULFIX,
369 
370   /// Same as the corresponding unsaturated fixed point instructions, but the
371   /// result is clamped between the min and max values representable by the
372   /// bits of the first 2 operands.
373   SMULFIXSAT,
374   UMULFIXSAT,
375 
376   /// RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on
377   /// 2 integers with the same width and scale. SCALE represents the scale
378   /// of both operands as fixed point numbers. This SCALE parameter must be a
379   /// constant integer.
380   SDIVFIX,
381   UDIVFIX,
382 
383   /// Same as the corresponding unsaturated fixed point instructions, but the
384   /// result is clamped between the min and max values representable by the
385   /// bits of the first 2 operands.
386   SDIVFIXSAT,
387   UDIVFIXSAT,
388 
389   /// Simple binary floating point operators.
390   FADD,
391   FSUB,
392   FMUL,
393   FDIV,
394   FREM,
395 
396   /// Constrained versions of the binary floating point operators.
397   /// These will be lowered to the simple operators before final selection.
398   /// They are used to limit optimizations while the DAG is being
399   /// optimized.
400   STRICT_FADD,
401   STRICT_FSUB,
402   STRICT_FMUL,
403   STRICT_FDIV,
404   STRICT_FREM,
405   STRICT_FMA,
406 
407   /// Constrained versions of libm-equivalent floating point intrinsics.
408   /// These will be lowered to the equivalent non-constrained pseudo-op
409   /// (or expanded to the equivalent library call) before final selection.
410   /// They are used to limit optimizations while the DAG is being optimized.
411   STRICT_FSQRT,
412   STRICT_FPOW,
413   STRICT_FPOWI,
414   STRICT_FLDEXP,
415   STRICT_FSIN,
416   STRICT_FCOS,
417   STRICT_FEXP,
418   STRICT_FEXP2,
419   STRICT_FLOG,
420   STRICT_FLOG10,
421   STRICT_FLOG2,
422   STRICT_FRINT,
423   STRICT_FNEARBYINT,
424   STRICT_FMAXNUM,
425   STRICT_FMINNUM,
426   STRICT_FCEIL,
427   STRICT_FFLOOR,
428   STRICT_FROUND,
429   STRICT_FROUNDEVEN,
430   STRICT_FTRUNC,
431   STRICT_LROUND,
432   STRICT_LLROUND,
433   STRICT_LRINT,
434   STRICT_LLRINT,
435   STRICT_FMAXIMUM,
436   STRICT_FMINIMUM,
437 
438   /// STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or
439   /// unsigned integer. These have the same semantics as fptosi and fptoui
440   /// in IR.
441   /// They are used to limit optimizations while the DAG is being optimized.
442   STRICT_FP_TO_SINT,
443   STRICT_FP_TO_UINT,
444 
445   /// STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to
446   /// a floating point value. These have the same semantics as sitofp and
447   /// uitofp in IR.
448   /// They are used to limit optimizations while the DAG is being optimized.
449   STRICT_SINT_TO_FP,
450   STRICT_UINT_TO_FP,
451 
452   /// X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating
453   /// point type down to the precision of the destination VT.  TRUNC is a
454   /// flag, which is always an integer that is zero or one.  If TRUNC is 0,
455   /// this is a normal rounding, if it is 1, this FP_ROUND is known to not
456   /// change the value of Y.
457   ///
458   /// The TRUNC = 1 case is used in cases where we know that the value will
459   /// not be modified by the node, because Y is not using any of the extra
460   /// precision of source type.  This allows certain transformations like
461   /// STRICT_FP_EXTEND(STRICT_FP_ROUND(X,1)) -> X which are not safe for
462   /// STRICT_FP_EXTEND(STRICT_FP_ROUND(X,0)) because the extra bits aren't
463   /// removed.
464   /// It is used to limit optimizations while the DAG is being optimized.
465   STRICT_FP_ROUND,
466 
467   /// X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP
468   /// type.
469   /// It is used to limit optimizations while the DAG is being optimized.
470   STRICT_FP_EXTEND,
471 
472   /// STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used
473   /// for floating-point operands only.  STRICT_FSETCC performs a quiet
474   /// comparison operation, while STRICT_FSETCCS performs a signaling
475   /// comparison operation.
476   STRICT_FSETCC,
477   STRICT_FSETCCS,
478 
479   // FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
480   FPTRUNC_ROUND,
481 
482   /// FMA - Perform a * b + c with no intermediate rounding step.
483   FMA,
484 
485   /// FMAD - Perform a * b + c, while getting the same result as the
486   /// separately rounded operations.
487   FMAD,
488 
489   /// FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.  NOTE: This
490   /// DAG node does not require that X and Y have the same type, just that
491   /// they are both floating point.  X and the result must have the same type.
492   /// FCOPYSIGN(f32, f64) is allowed.
493   FCOPYSIGN,
494 
495   /// INT = FGETSIGN(FP) - Return the sign bit of the specified floating point
496   /// value as an integer 0/1 value.
497   FGETSIGN,
498 
499   /// Returns platform specific canonical encoding of a floating point number.
500   FCANONICALIZE,
501 
502   /// Performs a check of floating point class property, defined by IEEE-754.
503   /// The first operand is the floating point value to check. The second operand
504   /// specifies the checked property and is a TargetConstant which specifies
505   /// test in the same way as intrinsic 'is_fpclass'.
506   /// Returns boolean value.
507   IS_FPCLASS,
508 
509   /// BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector
510   /// with the specified, possibly variable, elements. The types of the
511   /// operands must match the vector element type, except that integer types
512   /// are allowed to be larger than the element type, in which case the
513   /// operands are implicitly truncated. The types of the operands must all
514   /// be the same.
515   BUILD_VECTOR,
516 
517   /// INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element
518   /// at IDX replaced with VAL. If the type of VAL is larger than the vector
519   /// element type then VAL is truncated before replacement.
520   ///
521   /// If VECTOR is a scalable vector, then IDX may be larger than the minimum
522   /// vector width. IDX is not first scaled by the runtime scaling factor of
523   /// VECTOR.
524   INSERT_VECTOR_ELT,
525 
526   /// EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR
527   /// identified by the (potentially variable) element number IDX. If the return
528   /// type is an integer type larger than the element type of the vector, the
529   /// result is extended to the width of the return type. In that case, the high
530   /// bits are undefined.
531   ///
532   /// If VECTOR is a scalable vector, then IDX may be larger than the minimum
533   /// vector width. IDX is not first scaled by the runtime scaling factor of
534   /// VECTOR.
535   EXTRACT_VECTOR_ELT,
536 
537   /// CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of
538   /// vector type with the same length and element type, this produces a
539   /// concatenated vector result value, with length equal to the sum of the
540   /// lengths of the input vectors. If VECTOR0 is a fixed-width vector, then
541   /// VECTOR1..VECTORN must all be fixed-width vectors. Similarly, if VECTOR0
542   /// is a scalable vector, then VECTOR1..VECTORN must all be scalable vectors.
543   CONCAT_VECTORS,
544 
545   /// INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2
546   /// inserted into VECTOR1. IDX represents the starting element number at which
547   /// VECTOR2 will be inserted. IDX must be a constant multiple of T's known
548   /// minimum vector length. Let the type of VECTOR2 be T, then if T is a
549   /// scalable vector, IDX is first scaled by the runtime scaling factor of T.
550   /// The elements of VECTOR1 starting at IDX are overwritten with VECTOR2.
551   /// Elements IDX through (IDX + num_elements(T) - 1) must be valid VECTOR1
552   /// indices. If this condition cannot be determined statically but is false at
553   /// runtime, then the result vector is undefined. The IDX parameter must be a
554   /// vector index constant type, which for most targets will be an integer
555   /// pointer type.
556   ///
557   /// This operation supports inserting a fixed-width vector into a scalable
558   /// vector, but not the other way around.
559   INSERT_SUBVECTOR,
560 
561   /// EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
562   /// Let the result type be T, then IDX represents the starting element number
563   /// from which a subvector of type T is extracted. IDX must be a constant
564   /// multiple of T's known minimum vector length. If T is a scalable vector,
565   /// IDX is first scaled by the runtime scaling factor of T. Elements IDX
566   /// through (IDX + num_elements(T) - 1) must be valid VECTOR indices. If this
567   /// condition cannot be determined statically but is false at runtime, then
568   /// the result vector is undefined. The IDX parameter must be a vector index
569   /// constant type, which for most targets will be an integer pointer type.
570   ///
571   /// This operation supports extracting a fixed-width vector from a scalable
572   /// vector, but not the other way around.
573   EXTRACT_SUBVECTOR,
574 
575   /// VECTOR_DEINTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and
576   /// output vectors having the same type. The first output contains the even
577   /// indices from CONCAT_VECTORS(VEC1, VEC2), with the second output
578   /// containing the odd indices. The relative order of elements within an
579   /// output match that of the concatenated input.
580   VECTOR_DEINTERLEAVE,
581 
582   /// VECTOR_INTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and
583   /// output vectors having the same type. The first output contains the
584   /// result of interleaving the low half of CONCAT_VECTORS(VEC1, VEC2), with
585   /// the second output containing the result of interleaving the high half.
586   VECTOR_INTERLEAVE,
587 
588   /// VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR,
589   /// whose elements are shuffled using the following algorithm:
590   ///   RESULT[i] = VECTOR[VECTOR.ElementCount - 1 - i]
591   VECTOR_REVERSE,
592 
593   /// VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as
594   /// VEC1/VEC2.  A VECTOR_SHUFFLE node also contains an array of constant int
595   /// values that indicate which value (or undef) each result element will
596   /// get.  These constant ints are accessible through the
597   /// ShuffleVectorSDNode class.  This is quite similar to the Altivec
598   /// 'vperm' instruction, except that the indices must be constants and are
599   /// in terms of the element size of VEC1/VEC2, not in terms of bytes.
600   VECTOR_SHUFFLE,
601 
602   /// VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as
603   /// VEC1/VEC2 from CONCAT_VECTORS(VEC1, VEC2), based on the IMM in two ways.
604   /// Let the result type be T, if IMM is positive it represents the starting
605   /// element number (an index) from which a subvector of type T is extracted
606   /// from CONCAT_VECTORS(VEC1, VEC2). If IMM is negative it represents a count
607   /// specifying the number of trailing elements to extract from VEC1, where the
608   /// elements of T are selected using the following algorithm:
609   ///   RESULT[i] = CONCAT_VECTORS(VEC1,VEC2)[VEC1.ElementCount - ABS(IMM) + i]
610   /// If IMM is not in the range [-VL, VL-1] the result vector is undefined. IMM
611   /// is a constant integer.
612   VECTOR_SPLICE,
613 
614   /// SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a
615   /// scalar value into element 0 of the resultant vector type.  The top
616   /// elements 1 to N-1 of the N-element vector are undefined.  The type
617   /// of the operand must match the vector element type, except when they
618   /// are integer types.  In this case the operand is allowed to be wider
619   /// than the vector element type, and is implicitly truncated to it.
620   SCALAR_TO_VECTOR,
621 
622   /// SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL
623   /// duplicated in all lanes. The type of the operand must match the vector
624   /// element type, except when they are integer types.  In this case the
625   /// operand is allowed to be wider than the vector element type, and is
626   /// implicitly truncated to it.
627   SPLAT_VECTOR,
628 
629   /// SPLAT_VECTOR_PARTS(SCALAR1, SCALAR2, ...) - Returns a vector with the
630   /// scalar values joined together and then duplicated in all lanes. This
631   /// represents a SPLAT_VECTOR that has had its scalar operand expanded. This
632   /// allows representing a 64-bit splat on a target with 32-bit integers. The
633   /// total width of the scalars must cover the element width. SCALAR1 contains
634   /// the least significant bits of the value regardless of endianness and all
635   /// scalars should have the same type.
636   SPLAT_VECTOR_PARTS,
637 
638   /// STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised
639   /// of a linear sequence of unsigned values starting from 0 with a step of
640   /// IMM, where IMM must be a TargetConstant with type equal to the vector
641   /// element type. The arithmetic is performed modulo the bitwidth of the
642   /// element.
643   ///
644   /// The operation does not support returning fixed-width vectors or
645   /// non-constant operands.
646   STEP_VECTOR,
647 
648   /// MULHU/MULHS - Multiply high - Multiply two integers of type iN,
649   /// producing an unsigned/signed value of type i[2*N], then return the top
650   /// part.
651   MULHU,
652   MULHS,
653 
654   /// AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of
655   /// type i[N+1], halving the result by shifting it one bit right.
656   /// shr(add(ext(X), ext(Y)), 1)
657   AVGFLOORS,
658   AVGFLOORU,
659   /// AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an
660   /// integer of type i[N+2], add 1 and halve the result by shifting it one bit
661   /// right. shr(add(ext(X), ext(Y), 1), 1)
662   AVGCEILS,
663   AVGCEILU,
664 
665   // ABDS/ABDU - Absolute difference - Return the absolute difference between
666   // two numbers interpreted as signed/unsigned.
667   // i.e trunc(abs(sext(Op0) - sext(Op1))) becomes abds(Op0, Op1)
668   //  or trunc(abs(zext(Op0) - zext(Op1))) becomes abdu(Op0, Op1)
669   ABDS,
670   ABDU,
671 
672   /// [US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned
673   /// integers.
674   SMIN,
675   SMAX,
676   UMIN,
677   UMAX,
678 
679   /// Bitwise operators - logical and, logical or, logical xor.
680   AND,
681   OR,
682   XOR,
683 
684   /// ABS - Determine the unsigned absolute value of a signed integer value of
685   /// the same bitwidth.
686   /// Note: A value of INT_MIN will return INT_MIN, no saturation or overflow
687   /// is performed.
688   ABS,
689 
690   /// Shift and rotation operations.  After legalization, the type of the
691   /// shift amount is known to be TLI.getShiftAmountTy().  Before legalization
692   /// the shift amount can be any type, but care must be taken to ensure it is
693   /// large enough.  TLI.getShiftAmountTy() is i8 on some targets, but before
694   /// legalization, types like i1024 can occur and i8 doesn't have enough bits
695   /// to represent the shift amount.
696   /// When the 1st operand is a vector, the shift amount must be in the same
697   /// type. (TLI.getShiftAmountTy() will return the same type when the input
698   /// type is a vector.)
699   /// For rotates and funnel shifts, the shift amount is treated as an unsigned
700   /// amount modulo the element size of the first operand.
701   ///
702   /// Funnel 'double' shifts take 3 operands, 2 inputs and the shift amount.
703   /// fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
704   /// fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
705   SHL,
706   SRA,
707   SRL,
708   ROTL,
709   ROTR,
710   FSHL,
711   FSHR,
712 
713   /// Byte Swap and Counting operators.
714   BSWAP,
715   CTTZ,
716   CTLZ,
717   CTPOP,
718   BITREVERSE,
719   PARITY,
720 
721   /// Bit counting operators with an undefined result for zero inputs.
722   CTTZ_ZERO_UNDEF,
723   CTLZ_ZERO_UNDEF,
724 
725   /// Select(COND, TRUEVAL, FALSEVAL).  If the type of the boolean COND is not
726   /// i1 then the high bits must conform to getBooleanContents.
727   SELECT,
728 
729   /// Select with a vector condition (op #0) and two vector operands (ops #1
730   /// and #2), returning a vector result.  All vectors have the same length.
731   /// Much like the scalar select and setcc, each bit in the condition selects
732   /// whether the corresponding result element is taken from op #1 or op #2.
733   /// At first, the VSELECT condition is of vXi1 type. Later, targets may
734   /// change the condition type in order to match the VSELECT node using a
735   /// pattern. The condition follows the BooleanContent format of the target.
736   VSELECT,
737 
738   /// Select with condition operator - This selects between a true value and
739   /// a false value (ops #2 and #3) based on the boolean result of comparing
740   /// the lhs and rhs (ops #0 and #1) of a conditional expression with the
741   /// condition code in op #4, a CondCodeSDNode.
742   SELECT_CC,
743 
744   /// SetCC operator - This evaluates to a true value iff the condition is
745   /// true.  If the result value type is not i1 then the high bits conform
746   /// to getBooleanContents.  The operands to this are the left and right
747   /// operands to compare (ops #0, and #1) and the condition code to compare
748   /// them with (op #2) as a CondCodeSDNode. If the operands are vector types
749   /// then the result type must also be a vector type.
750   SETCC,
751 
752   /// Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but
753   /// op #2 is a boolean indicating if there is an incoming carry. This
754   /// operator checks the result of "LHS - RHS - Carry", and can be used to
755   /// compare two wide integers:
756   /// (setcccarry lhshi rhshi (usubo_carry lhslo rhslo) cc).
757   /// Only valid for integers.
758   SETCCCARRY,
759 
760   /// SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded
761   /// integer shift operations.  The operation ordering is:
762   ///       [Lo,Hi] = op [LoLHS,HiLHS], Amt
763   SHL_PARTS,
764   SRA_PARTS,
765   SRL_PARTS,
766 
767   /// Conversion operators.  These are all single input single output
768   /// operations.  For all of these, the result type must be strictly
769   /// wider or narrower (depending on the operation) than the source
770   /// type.
771 
772   /// SIGN_EXTEND - Used for integer types, replicating the sign bit
773   /// into new bits.
774   SIGN_EXTEND,
775 
776   /// ZERO_EXTEND - Used for integer types, zeroing the new bits. Can carry
777   /// the NonNeg SDNodeFlag to indicate that the input is known to be
778   /// non-negative. If the flag is present and the input is negative, the result
779   /// is poison.
780   ZERO_EXTEND,
781 
782   /// ANY_EXTEND - Used for integer types.  The high bits are undefined.
783   ANY_EXTEND,
784 
785   /// TRUNCATE - Completely drop the high bits.
786   TRUNCATE,
787 
788   /// [SU]INT_TO_FP - These operators convert integers (whose interpreted sign
789   /// depends on the first letter) to floating point.
790   SINT_TO_FP,
791   UINT_TO_FP,
792 
793   /// SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to
794   /// sign extend a small value in a large integer register (e.g. sign
795   /// extending the low 8 bits of a 32-bit register to fill the top 24 bits
796   /// with the 7th bit).  The size of the smaller type is indicated by the 1th
797   /// operand, a ValueType node.
798   SIGN_EXTEND_INREG,
799 
800   /// ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an
801   /// in-register any-extension of the low lanes of an integer vector. The
802   /// result type must have fewer elements than the operand type, and those
803   /// elements must be larger integer types such that the total size of the
804   /// operand type is less than or equal to the size of the result type. Each
805   /// of the low operand elements is any-extended into the corresponding,
806   /// wider result elements with the high bits becoming undef.
807   /// NOTE: The type legalizer prefers to make the operand and result size
808   /// the same to allow expansion to shuffle vector during op legalization.
809   ANY_EXTEND_VECTOR_INREG,
810 
811   /// SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an
812   /// in-register sign-extension of the low lanes of an integer vector. The
813   /// result type must have fewer elements than the operand type, and those
814   /// elements must be larger integer types such that the total size of the
815   /// operand type is less than or equal to the size of the result type. Each
816   /// of the low operand elements is sign-extended into the corresponding,
817   /// wider result elements.
818   /// NOTE: The type legalizer prefers to make the operand and result size
819   /// the same to allow expansion to shuffle vector during op legalization.
820   SIGN_EXTEND_VECTOR_INREG,
821 
822   /// ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an
823   /// in-register zero-extension of the low lanes of an integer vector. The
824   /// result type must have fewer elements than the operand type, and those
825   /// elements must be larger integer types such that the total size of the
826   /// operand type is less than or equal to the size of the result type. Each
827   /// of the low operand elements is zero-extended into the corresponding,
828   /// wider result elements.
829   /// NOTE: The type legalizer prefers to make the operand and result size
830   /// the same to allow expansion to shuffle vector during op legalization.
831   ZERO_EXTEND_VECTOR_INREG,
832 
833   /// FP_TO_[US]INT - Convert a floating point value to a signed or unsigned
834   /// integer. These have the same semantics as fptosi and fptoui in IR. If
835   /// the FP value cannot fit in the integer type, the results are undefined.
836   FP_TO_SINT,
837   FP_TO_UINT,
838 
839   /// FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a
840   /// signed or unsigned scalar integer type given in operand 1 with the
841   /// following semantics:
842   ///
843   ///  * If the value is NaN, zero is returned.
844   ///  * If the value is larger/smaller than the largest/smallest integer,
845   ///    the largest/smallest integer is returned (saturation).
846   ///  * Otherwise the result of rounding the value towards zero is returned.
847   ///
848   /// The scalar width of the type given in operand 1 must be equal to, or
849   /// smaller than, the scalar result type width. It may end up being smaller
850   /// than the result width as a result of integer type legalization.
851   ///
852   /// After converting to the scalar integer type in operand 1, the value is
853   /// extended to the result VT. FP_TO_SINT_SAT sign extends and FP_TO_UINT_SAT
854   /// zero extends.
855   FP_TO_SINT_SAT,
856   FP_TO_UINT_SAT,
857 
858   /// X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type
859   /// down to the precision of the destination VT.  TRUNC is a flag, which is
860   /// always an integer that is zero or one.  If TRUNC is 0, this is a
861   /// normal rounding, if it is 1, this FP_ROUND is known to not change the
862   /// value of Y.
863   ///
864   /// The TRUNC = 1 case is used in cases where we know that the value will
865   /// not be modified by the node, because Y is not using any of the extra
866   /// precision of source type.  This allows certain transformations like
867   /// FP_EXTEND(FP_ROUND(X,1)) -> X which are not safe for
868   /// FP_EXTEND(FP_ROUND(X,0)) because the extra bits aren't removed.
869   FP_ROUND,
870 
871   /// Returns current rounding mode:
872   /// -1 Undefined
873   ///  0 Round to 0
874   ///  1 Round to nearest, ties to even
875   ///  2 Round to +inf
876   ///  3 Round to -inf
877   ///  4 Round to nearest, ties to zero
878   ///  Other values are target dependent.
879   /// Result is rounding mode and chain. Input is a chain.
880   GET_ROUNDING,
881 
882   /// Set rounding mode.
883   /// The first operand is a chain pointer. The second specifies the required
884   /// rounding mode, encoded in the same way as used in '``GET_ROUNDING``'.
885   SET_ROUNDING,
886 
887   /// X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
888   FP_EXTEND,
889 
890   /// BITCAST - This operator converts between integer, vector and FP
891   /// values, as if the value was stored to memory with one type and loaded
892   /// from the same address with the other type (or equivalently for vector
893   /// format conversions, etc).  The source and result are required to have
894   /// the same bit size (e.g.  f32 <-> i32).  This can also be used for
895   /// int-to-int or fp-to-fp conversions, but that is a noop, deleted by
896   /// getNode().
897   ///
898   /// This operator is subtly different from the bitcast instruction from
899   /// LLVM-IR since this node may change the bits in the register. For
900   /// example, this occurs on big-endian NEON and big-endian MSA where the
901   /// layout of the bits in the register depends on the vector type and this
902   /// operator acts as a shuffle operation for some vector type combinations.
903   BITCAST,
904 
905   /// ADDRSPACECAST - This operator converts between pointers of different
906   /// address spaces.
907   ADDRSPACECAST,
908 
909   /// FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions
910   /// and truncation for half-precision (16 bit) floating numbers. These nodes
911   /// form a semi-softened interface for dealing with f16 (as an i16), which
912   /// is often a storage-only type but has native conversions.
913   FP16_TO_FP,
914   FP_TO_FP16,
915   STRICT_FP16_TO_FP,
916   STRICT_FP_TO_FP16,
917 
918   /// BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions
919   /// and truncation for bfloat16. These nodes form a semi-softened interface
920   /// for dealing with bf16 (as an i16), which is often a storage-only type but
921   /// has native conversions.
922   BF16_TO_FP,
923   FP_TO_BF16,
924   STRICT_BF16_TO_FP,
925   STRICT_FP_TO_BF16,
926 
927   /// Perform various unary floating-point operations inspired by libm. For
928   /// FPOWI, the result is undefined if the integer operand doesn't fit into
929   /// sizeof(int).
930   FNEG,
931   FABS,
932   FSQRT,
933   FCBRT,
934   FSIN,
935   FCOS,
936   FPOW,
937   FPOWI,
938   /// FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
939   FLDEXP,
940 
941   /// FFREXP - frexp, extract fractional and exponent component of a
942   /// floating-point value. Returns the two components as separate return
943   /// values.
944   FFREXP,
945 
946   FLOG,
947   FLOG2,
948   FLOG10,
949   FEXP,
950   FEXP2,
951   FEXP10,
952   FCEIL,
953   FTRUNC,
954   FRINT,
955   FNEARBYINT,
956   FROUND,
957   FROUNDEVEN,
958   FFLOOR,
959   LROUND,
960   LLROUND,
961   LRINT,
962   LLRINT,
963 
964   /// FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two
965   /// values.
966   //
967   /// In the case where a single input is a NaN (either signaling or quiet),
968   /// the non-NaN input is returned.
969   ///
970   /// The return value of (FMINNUM 0.0, -0.0) could be either 0.0 or -0.0.
971   FMINNUM,
972   FMAXNUM,
973 
974   /// FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimum or maximum on
975   /// two values, following the IEEE-754 2008 definition. This differs from
976   /// FMINNUM/FMAXNUM in the handling of signaling NaNs. If one input is a
977   /// signaling NaN, returns a quiet NaN.
978   FMINNUM_IEEE,
979   FMAXNUM_IEEE,
980 
981   /// FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0
982   /// as less than 0.0. While FMINNUM_IEEE/FMAXNUM_IEEE follow IEEE 754-2008
983   /// semantics, FMINIMUM/FMAXIMUM follow IEEE 754-2019 semantics.
984   FMINIMUM,
985   FMAXIMUM,
986 
987   /// FSINCOS - Compute both fsin and fcos as a single operation.
988   FSINCOS,
989 
990   /// Gets the current floating-point environment. The first operand is a token
991   /// chain. The results are FP environment, represented by an integer value,
992   /// and a token chain.
993   GET_FPENV,
994 
995   /// Sets the current floating-point environment. The first operand is a token
996   /// chain, the second is FP environment, represented by an integer value. The
997   /// result is a token chain.
998   SET_FPENV,
999 
1000   /// Set floating-point environment to default state. The first operand and the
1001   /// result are token chains.
1002   RESET_FPENV,
1003 
1004   /// Gets the current floating-point environment. The first operand is a token
1005   /// chain, the second is a pointer to memory, where FP environment is stored
1006   /// to. The result is a token chain.
1007   GET_FPENV_MEM,
1008 
1009   /// Sets the current floating point environment. The first operand is a token
1010   /// chain, the second is a pointer to memory, where FP environment is loaded
1011   /// from. The result is a token chain.
1012   SET_FPENV_MEM,
1013 
1014   /// Reads the current dynamic floating-point control modes. The operand is
1015   /// a token chain.
1016   GET_FPMODE,
1017 
1018   /// Sets the current dynamic floating-point control modes. The first operand
1019   /// is a token chain, the second is control modes set represented as integer
1020   /// value.
1021   SET_FPMODE,
1022 
1023   /// Sets default dynamic floating-point control modes. The operand is a
1024   /// token chain.
1025   RESET_FPMODE,
1026 
1027   /// LOAD and STORE have token chains as their first operand, then the same
1028   /// operands as an LLVM load/store instruction, then an offset node that
1029   /// is added / subtracted from the base pointer to form the address (for
1030   /// indexed memory ops).
1031   LOAD,
1032   STORE,
1033 
1034   /// DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned
1035   /// to a specified boundary.  This node always has two return values: a new
1036   /// stack pointer value and a chain. The first operand is the token chain,
1037   /// the second is the number of bytes to allocate, and the third is the
1038   /// alignment boundary.  The size is guaranteed to be a multiple of the
1039   /// stack alignment, and the alignment is guaranteed to be bigger than the
1040   /// stack alignment (if required) or 0 to get standard stack alignment.
1041   DYNAMIC_STACKALLOC,
1042 
1043   /// Control flow instructions.  These all have token chains.
1044 
1045   /// BR - Unconditional branch.  The first operand is the chain
1046   /// operand, the second is the MBB to branch to.
1047   BR,
1048 
1049   /// BRIND - Indirect branch.  The first operand is the chain, the second
1050   /// is the value to branch to, which must be of the same type as the
1051   /// target's pointer type.
1052   BRIND,
1053 
1054   /// BR_JT - Jumptable branch. The first operand is the chain, the second
1055   /// is the jumptable index, the last one is the jumptable entry index.
1056   BR_JT,
1057 
1058   /// JUMP_TABLE_DEBUG_INFO - Jumptable debug info. The first operand is the
1059   /// chain, the second is the jumptable index.
1060   JUMP_TABLE_DEBUG_INFO,
1061 
1062   /// BRCOND - Conditional branch.  The first operand is the chain, the
1063   /// second is the condition, the third is the block to branch to if the
1064   /// condition is true.  If the type of the condition is not i1, then the
1065   /// high bits must conform to getBooleanContents. If the condition is undef,
1066   /// it nondeterministically jumps to the block.
1067   /// TODO: Its semantics w.r.t undef requires further discussion; we need to
1068   /// make it sure that it is consistent with optimizations in MIR & the
1069   /// meaning of IMPLICIT_DEF. See https://reviews.llvm.org/D92015
1070   BRCOND,
1071 
1072   /// BR_CC - Conditional branch.  The behavior is like that of SELECT_CC, in
1073   /// that the condition is represented as condition code, and two nodes to
1074   /// compare, rather than as a combined SetCC node.  The operands in order
1075   /// are chain, cc, lhs, rhs, block to branch to if condition is true. If
1076   /// condition is undef, it nondeterministically jumps to the block.
1077   BR_CC,
1078 
1079   /// INLINEASM - Represents an inline asm block.  This node always has two
1080   /// return values: a chain and a flag result.  The inputs are as follows:
1081   ///   Operand #0  : Input chain.
1082   ///   Operand #1  : a ExternalSymbolSDNode with a pointer to the asm string.
1083   ///   Operand #2  : a MDNodeSDNode with the !srcloc metadata.
1084   ///   Operand #3  : HasSideEffect, IsAlignStack bits.
1085   ///   After this, it is followed by a list of operands with this format:
1086   ///     ConstantSDNode: Flags that encode whether it is a mem or not, the
1087   ///                     of operands that follow, etc.  See InlineAsm.h.
1088   ///     ... however many operands ...
1089   ///   Operand #last: Optional, an incoming flag.
1090   ///
1091   /// The variable width operands are required to represent target addressing
1092   /// modes as a single "operand", even though they may have multiple
1093   /// SDOperands.
1094   INLINEASM,
1095 
1096   /// INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
1097   INLINEASM_BR,
1098 
1099   /// EH_LABEL - Represents a label in mid basic block used to track
1100   /// locations needed for debug and exception handling tables.  These nodes
1101   /// take a chain as input and return a chain.
1102   EH_LABEL,
1103 
1104   /// ANNOTATION_LABEL - Represents a mid basic block label used by
1105   /// annotations. This should remain within the basic block and be ordered
1106   /// with respect to other call instructions, but loads and stores may float
1107   /// past it.
1108   ANNOTATION_LABEL,
1109 
1110   /// CATCHRET - Represents a return from a catch block funclet. Used for
1111   /// MSVC compatible exception handling. Takes a chain operand and a
1112   /// destination basic block operand.
1113   CATCHRET,
1114 
1115   /// CLEANUPRET - Represents a return from a cleanup block funclet.  Used for
1116   /// MSVC compatible exception handling. Takes only a chain operand.
1117   CLEANUPRET,
1118 
1119   /// STACKSAVE - STACKSAVE has one operand, an input chain.  It produces a
1120   /// value, the same type as the pointer type for the system, and an output
1121   /// chain.
1122   STACKSAVE,
1123 
1124   /// STACKRESTORE has two operands, an input chain and a pointer to restore
1125   /// to it returns an output chain.
1126   STACKRESTORE,
1127 
1128   /// CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end
1129   /// of a call sequence, and carry arbitrary information that target might
1130   /// want to know.  The first operand is a chain, the rest are specified by
1131   /// the target and not touched by the DAG optimizers.
1132   /// Targets that may use stack to pass call arguments define additional
1133   /// operands:
1134   /// - size of the call frame part that must be set up within the
1135   ///   CALLSEQ_START..CALLSEQ_END pair,
1136   /// - part of the call frame prepared prior to CALLSEQ_START.
1137   /// Both these parameters must be constants, their sum is the total call
1138   /// frame size.
1139   /// CALLSEQ_START..CALLSEQ_END pairs may not be nested.
1140   CALLSEQ_START, // Beginning of a call sequence
1141   CALLSEQ_END,   // End of a call sequence
1142 
1143   /// VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE,
1144   /// and the alignment. It returns a pair of values: the vaarg value and a
1145   /// new chain.
1146   VAARG,
1147 
1148   /// VACOPY - VACOPY has 5 operands: an input chain, a destination pointer,
1149   /// a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the
1150   /// source.
1151   VACOPY,
1152 
1153   /// VAEND, VASTART - VAEND and VASTART have three operands: an input chain,
1154   /// pointer, and a SRCVALUE.
1155   VAEND,
1156   VASTART,
1157 
1158   // PREALLOCATED_SETUP - This has 2 operands: an input chain and a SRCVALUE
1159   // with the preallocated call Value.
1160   PREALLOCATED_SETUP,
1161   // PREALLOCATED_ARG - This has 3 operands: an input chain, a SRCVALUE
1162   // with the preallocated call Value, and a constant int.
1163   PREALLOCATED_ARG,
1164 
1165   /// SRCVALUE - This is a node type that holds a Value* that is used to
1166   /// make reference to a value in the LLVM IR.
1167   SRCVALUE,
1168 
1169   /// MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to
1170   /// reference metadata in the IR.
1171   MDNODE_SDNODE,
1172 
1173   /// PCMARKER - This corresponds to the pcmarker intrinsic.
1174   PCMARKER,
1175 
1176   /// READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
1177   /// It produces a chain and one i64 value. The only operand is a chain.
1178   /// If i64 is not legal, the result will be expanded into smaller values.
1179   /// Still, it returns an i64, so targets should set legality for i64.
1180   /// The result is the content of the architecture-specific cycle
1181   /// counter-like register (or other high accuracy low latency clock source).
1182   READCYCLECOUNTER,
1183 
1184   /// READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
1185   /// It has the same semantics as the READCYCLECOUNTER implementation except
1186   /// that the result is the content of the architecture-specific fixed
1187   /// frequency counter suitable for measuring elapsed time.
1188   READSTEADYCOUNTER,
1189 
1190   /// HANDLENODE node - Used as a handle for various purposes.
1191   HANDLENODE,
1192 
1193   /// INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.  It
1194   /// takes as input a token chain, the pointer to the trampoline, the pointer
1195   /// to the nested function, the pointer to pass for the 'nest' parameter, a
1196   /// SRCVALUE for the trampoline and another for the nested function
1197   /// (allowing targets to access the original Function*).
1198   /// It produces a token chain as output.
1199   INIT_TRAMPOLINE,
1200 
1201   /// ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
1202   /// It takes a pointer to the trampoline and produces a (possibly) new
1203   /// pointer to the same trampoline with platform-specific adjustments
1204   /// applied.  The pointer it returns points to an executable block of code.
1205   ADJUST_TRAMPOLINE,
1206 
1207   /// TRAP - Trapping instruction
1208   TRAP,
1209 
1210   /// DEBUGTRAP - Trap intended to get the attention of a debugger.
1211   DEBUGTRAP,
1212 
1213   /// UBSANTRAP - Trap with an immediate describing the kind of sanitizer
1214   /// failure.
1215   UBSANTRAP,
1216 
1217   /// PREFETCH - This corresponds to a prefetch intrinsic. The first operand
1218   /// is the chain.  The other operands are the address to prefetch,
1219   /// read / write specifier, locality specifier and instruction / data cache
1220   /// specifier.
1221   PREFETCH,
1222 
1223   /// ARITH_FENCE - This corresponds to a arithmetic fence intrinsic. Both its
1224   /// operand and output are the same floating type.
1225   ARITH_FENCE,
1226 
1227   /// MEMBARRIER - Compiler barrier only; generate a no-op.
1228   MEMBARRIER,
1229 
1230   /// OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope)
1231   /// This corresponds to the fence instruction. It takes an input chain, and
1232   /// two integer constants: an AtomicOrdering and a SynchronizationScope.
1233   ATOMIC_FENCE,
1234 
1235   /// Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr)
1236   /// This corresponds to "load atomic" instruction.
1237   ATOMIC_LOAD,
1238 
1239   /// OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val)
1240   /// This corresponds to "store atomic" instruction.
1241   ATOMIC_STORE,
1242 
1243   /// Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
1244   /// For double-word atomic operations:
1245   /// ValLo, ValHi, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmpLo, cmpHi,
1246   ///                                          swapLo, swapHi)
1247   /// This corresponds to the cmpxchg instruction.
1248   ATOMIC_CMP_SWAP,
1249 
1250   /// Val, Success, OUTCHAIN
1251   ///     = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap)
1252   /// N.b. this is still a strong cmpxchg operation, so
1253   /// Success == "Val == cmp".
1254   ATOMIC_CMP_SWAP_WITH_SUCCESS,
1255 
1256   /// Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt)
1257   /// Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt)
1258   /// For double-word atomic operations:
1259   /// ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi)
1260   /// ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi)
1261   /// These correspond to the atomicrmw instruction.
1262   ATOMIC_SWAP,
1263   ATOMIC_LOAD_ADD,
1264   ATOMIC_LOAD_SUB,
1265   ATOMIC_LOAD_AND,
1266   ATOMIC_LOAD_CLR,
1267   ATOMIC_LOAD_OR,
1268   ATOMIC_LOAD_XOR,
1269   ATOMIC_LOAD_NAND,
1270   ATOMIC_LOAD_MIN,
1271   ATOMIC_LOAD_MAX,
1272   ATOMIC_LOAD_UMIN,
1273   ATOMIC_LOAD_UMAX,
1274   ATOMIC_LOAD_FADD,
1275   ATOMIC_LOAD_FSUB,
1276   ATOMIC_LOAD_FMAX,
1277   ATOMIC_LOAD_FMIN,
1278   ATOMIC_LOAD_UINC_WRAP,
1279   ATOMIC_LOAD_UDEC_WRAP,
1280 
1281   // Masked load and store - consecutive vector load and store operations
1282   // with additional mask operand that prevents memory accesses to the
1283   // masked-off lanes.
1284   //
1285   // Val, OutChain = MLOAD(BasePtr, Mask, PassThru)
1286   // OutChain = MSTORE(Value, BasePtr, Mask)
1287   MLOAD,
1288   MSTORE,
1289 
1290   // Masked gather and scatter - load and store operations for a vector of
1291   // random addresses with additional mask operand that prevents memory
1292   // accesses to the masked-off lanes.
1293   //
1294   // Val, OutChain = GATHER(InChain, PassThru, Mask, BasePtr, Index, Scale)
1295   // OutChain = SCATTER(InChain, Value, Mask, BasePtr, Index, Scale)
1296   //
1297   // The Index operand can have more vector elements than the other operands
1298   // due to type legalization. The extra elements are ignored.
1299   MGATHER,
1300   MSCATTER,
1301 
1302   /// This corresponds to the llvm.lifetime.* intrinsics. The first operand
1303   /// is the chain and the second operand is the alloca pointer.
1304   LIFETIME_START,
1305   LIFETIME_END,
1306 
1307   /// GC_TRANSITION_START/GC_TRANSITION_END - These operators mark the
1308   /// beginning and end of GC transition  sequence, and carry arbitrary
1309   /// information that target might need for lowering.  The first operand is
1310   /// a chain, the rest are specified by the target and not touched by the DAG
1311   /// optimizers. GC_TRANSITION_START..GC_TRANSITION_END pairs may not be
1312   /// nested.
1313   GC_TRANSITION_START,
1314   GC_TRANSITION_END,
1315 
1316   /// GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of
1317   /// the most recent dynamic alloca. For most targets that would be 0, but
1318   /// for some others (e.g. PowerPC, PowerPC64) that would be compile-time
1319   /// known nonzero constant. The only operand here is the chain.
1320   GET_DYNAMIC_AREA_OFFSET,
1321 
1322   /// Pseudo probe for AutoFDO, as a place holder in a basic block to improve
1323   /// the sample counts quality.
1324   PSEUDO_PROBE,
1325 
1326   /// VSCALE(IMM) - Returns the runtime scaling factor used to calculate the
1327   /// number of elements within a scalable vector. IMM is a constant integer
1328   /// multiplier that is applied to the runtime value.
1329   VSCALE,
1330 
1331   /// Generic reduction nodes. These nodes represent horizontal vector
1332   /// reduction operations, producing a scalar result.
1333   /// The SEQ variants perform reductions in sequential order. The first
1334   /// operand is an initial scalar accumulator value, and the second operand
1335   /// is the vector to reduce.
1336   /// E.g. RES = VECREDUCE_SEQ_FADD f32 ACC, <4 x f32> SRC_VEC
1337   ///  ... is equivalent to
1338   /// RES = (((ACC + SRC_VEC[0]) + SRC_VEC[1]) + SRC_VEC[2]) + SRC_VEC[3]
1339   VECREDUCE_SEQ_FADD,
1340   VECREDUCE_SEQ_FMUL,
1341 
1342   /// These reductions have relaxed evaluation order semantics, and have a
1343   /// single vector operand. The order of evaluation is unspecified. For
1344   /// pow-of-2 vectors, one valid legalizer expansion is to use a tree
1345   /// reduction, i.e.:
1346   /// For RES = VECREDUCE_FADD <8 x f16> SRC_VEC
1347   ///   PART_RDX = FADD SRC_VEC[0:3], SRC_VEC[4:7]
1348   ///   PART_RDX2 = FADD PART_RDX[0:1], PART_RDX[2:3]
1349   ///   RES = FADD PART_RDX2[0], PART_RDX2[1]
1350   /// For non-pow-2 vectors, this can be computed by extracting each element
1351   /// and performing the operation as if it were scalarized.
1352   VECREDUCE_FADD,
1353   VECREDUCE_FMUL,
1354   /// FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
1355   VECREDUCE_FMAX,
1356   VECREDUCE_FMIN,
1357   /// FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the
1358   /// llvm.minimum and llvm.maximum semantics.
1359   VECREDUCE_FMAXIMUM,
1360   VECREDUCE_FMINIMUM,
1361   /// Integer reductions may have a result type larger than the vector element
1362   /// type. However, the reduction is performed using the vector element type
1363   /// and the value in the top bits is unspecified.
1364   VECREDUCE_ADD,
1365   VECREDUCE_MUL,
1366   VECREDUCE_AND,
1367   VECREDUCE_OR,
1368   VECREDUCE_XOR,
1369   VECREDUCE_SMAX,
1370   VECREDUCE_SMIN,
1371   VECREDUCE_UMAX,
1372   VECREDUCE_UMIN,
1373 
1374   // The `llvm.experimental.stackmap` intrinsic.
1375   // Operands: input chain, glue, <id>, <numShadowBytes>, [live0[, live1...]]
1376   // Outputs: output chain, glue
1377   STACKMAP,
1378 
1379   // The `llvm.experimental.patchpoint.*` intrinsic.
1380   // Operands: input chain, [glue], reg-mask, <id>, <numShadowBytes>, callee,
1381   //   <numArgs>, cc, ...
1382   // Outputs: [rv], output chain, glue
1383   PATCHPOINT,
1384 
1385 // Vector Predication
1386 #define BEGIN_REGISTER_VP_SDNODE(VPSDID, ...) VPSDID,
1387 #include "llvm/IR/VPIntrinsics.def"
1388 
1389   // The `llvm.experimental.convergence.*` intrinsics.
1390   CONVERGENCECTRL_ANCHOR,
1391   CONVERGENCECTRL_ENTRY,
1392   CONVERGENCECTRL_LOOP,
1393   // This does not correspond to any convergence control intrinsic. It is used
1394   // to glue a convergence control token to a convergent operation in the DAG,
1395   // which is later translated to an implicit use in the MIR.
1396   CONVERGENCECTRL_GLUE,
1397 
1398   /// BUILTIN_OP_END - This must be the last enum value in this list.
1399   /// The target-specific pre-isel opcode values start here.
1400   BUILTIN_OP_END
1401 };
1402 
1403 /// FIRST_TARGET_STRICTFP_OPCODE - Target-specific pre-isel operations
1404 /// which cannot raise FP exceptions should be less than this value.
1405 /// Those that do must not be less than this value.
1406 static const int FIRST_TARGET_STRICTFP_OPCODE = BUILTIN_OP_END + 400;
1407 
1408 /// FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations
1409 /// which do not reference a specific memory location should be less than
1410 /// this value. Those that do must not be less than this value, and can
1411 /// be used with SelectionDAG::getMemIntrinsicNode.
1412 static const int FIRST_TARGET_MEMORY_OPCODE = BUILTIN_OP_END + 500;
1413 
1414 /// Whether this is bitwise logic opcode.
isBitwiseLogicOp(unsigned Opcode)1415 inline bool isBitwiseLogicOp(unsigned Opcode) {
1416   return Opcode == ISD::AND || Opcode == ISD::OR || Opcode == ISD::XOR;
1417 }
1418 
1419 /// Get underlying scalar opcode for VECREDUCE opcode.
1420 /// For example ISD::AND for ISD::VECREDUCE_AND.
1421 NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode);
1422 
1423 /// Whether this is a vector-predicated Opcode.
1424 bool isVPOpcode(unsigned Opcode);
1425 
1426 /// Whether this is a vector-predicated binary operation opcode.
1427 bool isVPBinaryOp(unsigned Opcode);
1428 
1429 /// Whether this is a vector-predicated reduction opcode.
1430 bool isVPReduction(unsigned Opcode);
1431 
1432 /// The operand position of the vector mask.
1433 std::optional<unsigned> getVPMaskIdx(unsigned Opcode);
1434 
1435 /// The operand position of the explicit vector length parameter.
1436 std::optional<unsigned> getVPExplicitVectorLengthIdx(unsigned Opcode);
1437 
1438 /// Translate this VP Opcode to its corresponding non-VP Opcode.
1439 std::optional<unsigned> getBaseOpcodeForVP(unsigned Opcode, bool hasFPExcept);
1440 
1441 /// Translate this non-VP Opcode to its corresponding VP Opcode.
1442 unsigned getVPForBaseOpcode(unsigned Opcode);
1443 
1444 //===--------------------------------------------------------------------===//
1445 /// MemIndexedMode enum - This enum defines the load / store indexed
1446 /// addressing modes.
1447 ///
1448 /// UNINDEXED    "Normal" load / store. The effective address is already
1449 ///              computed and is available in the base pointer. The offset
1450 ///              operand is always undefined. In addition to producing a
1451 ///              chain, an unindexed load produces one value (result of the
1452 ///              load); an unindexed store does not produce a value.
1453 ///
1454 /// PRE_INC      Similar to the unindexed mode where the effective address is
1455 /// PRE_DEC      the value of the base pointer add / subtract the offset.
1456 ///              It considers the computation as being folded into the load /
1457 ///              store operation (i.e. the load / store does the address
1458 ///              computation as well as performing the memory transaction).
1459 ///              The base operand is always undefined. In addition to
1460 ///              producing a chain, pre-indexed load produces two values
1461 ///              (result of the load and the result of the address
1462 ///              computation); a pre-indexed store produces one value (result
1463 ///              of the address computation).
1464 ///
1465 /// POST_INC     The effective address is the value of the base pointer. The
1466 /// POST_DEC     value of the offset operand is then added to / subtracted
1467 ///              from the base after memory transaction. In addition to
1468 ///              producing a chain, post-indexed load produces two values
1469 ///              (the result of the load and the result of the base +/- offset
1470 ///              computation); a post-indexed store produces one value (the
1471 ///              the result of the base +/- offset computation).
1472 enum MemIndexedMode { UNINDEXED = 0, PRE_INC, PRE_DEC, POST_INC, POST_DEC };
1473 
1474 static const int LAST_INDEXED_MODE = POST_DEC + 1;
1475 
1476 //===--------------------------------------------------------------------===//
1477 /// MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's
1478 /// index parameter when calculating addresses.
1479 ///
1480 /// SIGNED_SCALED     Addr = Base + ((signed)Index * Scale)
1481 /// UNSIGNED_SCALED   Addr = Base + ((unsigned)Index * Scale)
1482 ///
1483 /// NOTE: The value of Scale is typically only known to the node owning the
1484 /// IndexType, with a value of 1 the equivalent of being unscaled.
1485 enum MemIndexType { SIGNED_SCALED = 0, UNSIGNED_SCALED };
1486 
1487 static const int LAST_MEM_INDEX_TYPE = UNSIGNED_SCALED + 1;
1488 
isIndexTypeSigned(MemIndexType IndexType)1489 inline bool isIndexTypeSigned(MemIndexType IndexType) {
1490   return IndexType == SIGNED_SCALED;
1491 }
1492 
1493 //===--------------------------------------------------------------------===//
1494 /// LoadExtType enum - This enum defines the three variants of LOADEXT
1495 /// (load with extension).
1496 ///
1497 /// SEXTLOAD loads the integer operand and sign extends it to a larger
1498 ///          integer result type.
1499 /// ZEXTLOAD loads the integer operand and zero extends it to a larger
1500 ///          integer result type.
1501 /// EXTLOAD  is used for two things: floating point extending loads and
1502 ///          integer extending loads [the top bits are undefined].
1503 enum LoadExtType { NON_EXTLOAD = 0, EXTLOAD, SEXTLOAD, ZEXTLOAD };
1504 
1505 static const int LAST_LOADEXT_TYPE = ZEXTLOAD + 1;
1506 
1507 NodeType getExtForLoadExtType(bool IsFP, LoadExtType);
1508 
1509 //===--------------------------------------------------------------------===//
1510 /// ISD::CondCode enum - These are ordered carefully to make the bitfields
1511 /// below work out, when considering SETFALSE (something that never exists
1512 /// dynamically) as 0.  "U" -> Unsigned (for integer operands) or Unordered
1513 /// (for floating point), "L" -> Less than, "G" -> Greater than, "E" -> Equal
1514 /// to.  If the "N" column is 1, the result of the comparison is undefined if
1515 /// the input is a NAN.
1516 ///
1517 /// All of these (except for the 'always folded ops') should be handled for
1518 /// floating point.  For integer, only the SETEQ,SETNE,SETLT,SETLE,SETGT,
1519 /// SETGE,SETULT,SETULE,SETUGT, and SETUGE opcodes are used.
1520 ///
1521 /// Note that these are laid out in a specific order to allow bit-twiddling
1522 /// to transform conditions.
1523 enum CondCode {
1524   // Opcode       N U L G E       Intuitive operation
1525   SETFALSE, //      0 0 0 0       Always false (always folded)
1526   SETOEQ,   //      0 0 0 1       True if ordered and equal
1527   SETOGT,   //      0 0 1 0       True if ordered and greater than
1528   SETOGE,   //      0 0 1 1       True if ordered and greater than or equal
1529   SETOLT,   //      0 1 0 0       True if ordered and less than
1530   SETOLE,   //      0 1 0 1       True if ordered and less than or equal
1531   SETONE,   //      0 1 1 0       True if ordered and operands are unequal
1532   SETO,     //      0 1 1 1       True if ordered (no nans)
1533   SETUO,    //      1 0 0 0       True if unordered: isnan(X) | isnan(Y)
1534   SETUEQ,   //      1 0 0 1       True if unordered or equal
1535   SETUGT,   //      1 0 1 0       True if unordered or greater than
1536   SETUGE,   //      1 0 1 1       True if unordered, greater than, or equal
1537   SETULT,   //      1 1 0 0       True if unordered or less than
1538   SETULE,   //      1 1 0 1       True if unordered, less than, or equal
1539   SETUNE,   //      1 1 1 0       True if unordered or not equal
1540   SETTRUE,  //      1 1 1 1       Always true (always folded)
1541   // Don't care operations: undefined if the input is a nan.
1542   SETFALSE2, //   1 X 0 0 0       Always false (always folded)
1543   SETEQ,     //   1 X 0 0 1       True if equal
1544   SETGT,     //   1 X 0 1 0       True if greater than
1545   SETGE,     //   1 X 0 1 1       True if greater than or equal
1546   SETLT,     //   1 X 1 0 0       True if less than
1547   SETLE,     //   1 X 1 0 1       True if less than or equal
1548   SETNE,     //   1 X 1 1 0       True if not equal
1549   SETTRUE2,  //   1 X 1 1 1       Always true (always folded)
1550 
1551   SETCC_INVALID // Marker value.
1552 };
1553 
1554 /// Return true if this is a setcc instruction that performs a signed
1555 /// comparison when used with integer operands.
isSignedIntSetCC(CondCode Code)1556 inline bool isSignedIntSetCC(CondCode Code) {
1557   return Code == SETGT || Code == SETGE || Code == SETLT || Code == SETLE;
1558 }
1559 
1560 /// Return true if this is a setcc instruction that performs an unsigned
1561 /// comparison when used with integer operands.
isUnsignedIntSetCC(CondCode Code)1562 inline bool isUnsignedIntSetCC(CondCode Code) {
1563   return Code == SETUGT || Code == SETUGE || Code == SETULT || Code == SETULE;
1564 }
1565 
1566 /// Return true if this is a setcc instruction that performs an equality
1567 /// comparison when used with integer operands.
isIntEqualitySetCC(CondCode Code)1568 inline bool isIntEqualitySetCC(CondCode Code) {
1569   return Code == SETEQ || Code == SETNE;
1570 }
1571 
1572 /// Return true if this is a setcc instruction that performs an equality
1573 /// comparison when used with floating point operands.
isFPEqualitySetCC(CondCode Code)1574 inline bool isFPEqualitySetCC(CondCode Code) {
1575   return Code == SETOEQ || Code == SETONE || Code == SETUEQ || Code == SETUNE;
1576 }
1577 
1578 /// Return true if the specified condition returns true if the two operands to
1579 /// the condition are equal. Note that if one of the two operands is a NaN,
1580 /// this value is meaningless.
isTrueWhenEqual(CondCode Cond)1581 inline bool isTrueWhenEqual(CondCode Cond) { return ((int)Cond & 1) != 0; }
1582 
1583 /// This function returns 0 if the condition is always false if an operand is
1584 /// a NaN, 1 if the condition is always true if the operand is a NaN, and 2 if
1585 /// the condition is undefined if the operand is a NaN.
getUnorderedFlavor(CondCode Cond)1586 inline unsigned getUnorderedFlavor(CondCode Cond) {
1587   return ((int)Cond >> 3) & 3;
1588 }
1589 
1590 /// Return the operation corresponding to !(X op Y), where 'op' is a valid
1591 /// SetCC operation.
1592 CondCode getSetCCInverse(CondCode Operation, EVT Type);
1593 
isExtOpcode(unsigned Opcode)1594 inline bool isExtOpcode(unsigned Opcode) {
1595   return Opcode == ISD::ANY_EXTEND || Opcode == ISD::ZERO_EXTEND ||
1596          Opcode == ISD::SIGN_EXTEND;
1597 }
1598 
isExtVecInRegOpcode(unsigned Opcode)1599 inline bool isExtVecInRegOpcode(unsigned Opcode) {
1600   return Opcode == ISD::ANY_EXTEND_VECTOR_INREG ||
1601          Opcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
1602          Opcode == ISD::SIGN_EXTEND_VECTOR_INREG;
1603 }
1604 
1605 namespace GlobalISel {
1606 /// Return the operation corresponding to !(X op Y), where 'op' is a valid
1607 /// SetCC operation. The U bit of the condition code has different meanings
1608 /// between floating point and integer comparisons and LLT's don't provide
1609 /// this distinction. As such we need to be told whether the comparison is
1610 /// floating point or integer-like. Pointers should use integer-like
1611 /// comparisons.
1612 CondCode getSetCCInverse(CondCode Operation, bool isIntegerLike);
1613 } // end namespace GlobalISel
1614 
1615 /// Return the operation corresponding to (Y op X) when given the operation
1616 /// for (X op Y).
1617 CondCode getSetCCSwappedOperands(CondCode Operation);
1618 
1619 /// Return the result of a logical OR between different comparisons of
1620 /// identical values: ((X op1 Y) | (X op2 Y)). This function returns
1621 /// SETCC_INVALID if it is not possible to represent the resultant comparison.
1622 CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, EVT Type);
1623 
1624 /// Return the result of a logical AND between different comparisons of
1625 /// identical values: ((X op1 Y) & (X op2 Y)). This function returns
1626 /// SETCC_INVALID if it is not possible to represent the resultant comparison.
1627 CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, EVT Type);
1628 
1629 } // namespace ISD
1630 
1631 } // namespace llvm
1632 
1633 #endif
1634