1 //===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file describes how to lower LLVM code to machine code.  This has two
11 /// main components:
12 ///
13 ///  1. Which ValueTypes are natively supported by the target.
14 ///  2. Which operations are supported for supported ValueTypes.
15 ///  3. Cost thresholds for alternative implementations of certain operations.
16 ///
17 /// In addition it has a few other components, like information about FP
18 /// immediates.
19 ///
20 //===----------------------------------------------------------------------===//
21 
22 #ifndef LLVM_CODEGEN_TARGETLOWERING_H
23 #define LLVM_CODEGEN_TARGETLOWERING_H
24 
25 #include "llvm/ADT/APInt.h"
26 #include "llvm/ADT/ArrayRef.h"
27 #include "llvm/ADT/DenseMap.h"
28 #include "llvm/ADT/SmallVector.h"
29 #include "llvm/ADT/StringRef.h"
30 #include "llvm/CodeGen/DAGCombine.h"
31 #include "llvm/CodeGen/ISDOpcodes.h"
32 #include "llvm/CodeGen/LowLevelTypeUtils.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/CodeGen/RuntimeLibcalls.h"
35 #include "llvm/CodeGen/SelectionDAG.h"
36 #include "llvm/CodeGen/SelectionDAGNodes.h"
37 #include "llvm/CodeGen/TargetCallingConv.h"
38 #include "llvm/CodeGen/ValueTypes.h"
39 #include "llvm/CodeGenTypes/MachineValueType.h"
40 #include "llvm/IR/Attributes.h"
41 #include "llvm/IR/CallingConv.h"
42 #include "llvm/IR/DataLayout.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/InlineAsm.h"
46 #include "llvm/IR/Instruction.h"
47 #include "llvm/IR/Instructions.h"
48 #include "llvm/IR/Type.h"
49 #include "llvm/Support/Alignment.h"
50 #include "llvm/Support/AtomicOrdering.h"
51 #include "llvm/Support/Casting.h"
52 #include "llvm/Support/ErrorHandling.h"
53 #include <algorithm>
54 #include <cassert>
55 #include <climits>
56 #include <cstdint>
57 #include <iterator>
58 #include <map>
59 #include <string>
60 #include <utility>
61 #include <vector>
62 
63 namespace llvm {
64 
65 class AssumptionCache;
66 class CCState;
67 class CCValAssign;
68 enum class ComplexDeinterleavingOperation;
69 enum class ComplexDeinterleavingRotation;
70 class Constant;
71 class FastISel;
72 class FunctionLoweringInfo;
73 class GlobalValue;
74 class Loop;
75 class GISelKnownBits;
76 class IntrinsicInst;
77 class IRBuilderBase;
78 struct KnownBits;
79 class LLVMContext;
80 class MachineBasicBlock;
81 class MachineFunction;
82 class MachineInstr;
83 class MachineJumpTableInfo;
84 class MachineLoop;
85 class MachineRegisterInfo;
86 class MCContext;
87 class MCExpr;
88 class Module;
89 class ProfileSummaryInfo;
90 class TargetLibraryInfo;
91 class TargetMachine;
92 class TargetRegisterClass;
93 class TargetRegisterInfo;
94 class TargetTransformInfo;
95 class Value;
96 
97 namespace Sched {
98 
99 enum Preference {
100   None,        // No preference
101   Source,      // Follow source order.
102   RegPressure, // Scheduling for lowest register pressure.
103   Hybrid,      // Scheduling for both latency and register pressure.
104   ILP,         // Scheduling for ILP in low register pressure mode.
105   VLIW,        // Scheduling for VLIW targets.
106   Fast,        // Fast suboptimal list scheduling
107   Linearize    // Linearize DAG, no scheduling
108 };
109 
110 } // end namespace Sched
111 
112 // MemOp models a memory operation, either memset or memcpy/memmove.
113 struct MemOp {
114 private:
115   // Shared
116   uint64_t Size;
117   bool DstAlignCanChange; // true if destination alignment can satisfy any
118                           // constraint.
119   Align DstAlign;         // Specified alignment of the memory operation.
120 
121   bool AllowOverlap;
122   // memset only
123   bool IsMemset;   // If setthis memory operation is a memset.
124   bool ZeroMemset; // If set clears out memory with zeros.
125   // memcpy only
126   bool MemcpyStrSrc; // Indicates whether the memcpy source is an in-register
127                      // constant so it does not need to be loaded.
128   Align SrcAlign;    // Inferred alignment of the source or default value if the
129                      // memory operation does not need to load the value.
130 public:
131   static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
132                     Align SrcAlign, bool IsVolatile,
133                     bool MemcpyStrSrc = false) {
134     MemOp Op;
135     Op.Size = Size;
136     Op.DstAlignCanChange = DstAlignCanChange;
137     Op.DstAlign = DstAlign;
138     Op.AllowOverlap = !IsVolatile;
139     Op.IsMemset = false;
140     Op.ZeroMemset = false;
141     Op.MemcpyStrSrc = MemcpyStrSrc;
142     Op.SrcAlign = SrcAlign;
143     return Op;
144   }
145 
SetMemOp146   static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
147                    bool IsZeroMemset, bool IsVolatile) {
148     MemOp Op;
149     Op.Size = Size;
150     Op.DstAlignCanChange = DstAlignCanChange;
151     Op.DstAlign = DstAlign;
152     Op.AllowOverlap = !IsVolatile;
153     Op.IsMemset = true;
154     Op.ZeroMemset = IsZeroMemset;
155     Op.MemcpyStrSrc = false;
156     return Op;
157   }
158 
sizeMemOp159   uint64_t size() const { return Size; }
getDstAlignMemOp160   Align getDstAlign() const {
161     assert(!DstAlignCanChange);
162     return DstAlign;
163   }
isFixedDstAlignMemOp164   bool isFixedDstAlign() const { return !DstAlignCanChange; }
allowOverlapMemOp165   bool allowOverlap() const { return AllowOverlap; }
isMemsetMemOp166   bool isMemset() const { return IsMemset; }
isMemcpyMemOp167   bool isMemcpy() const { return !IsMemset; }
isMemcpyWithFixedDstAlignMemOp168   bool isMemcpyWithFixedDstAlign() const {
169     return isMemcpy() && !DstAlignCanChange;
170   }
isZeroMemsetMemOp171   bool isZeroMemset() const { return isMemset() && ZeroMemset; }
isMemcpyStrSrcMemOp172   bool isMemcpyStrSrc() const {
173     assert(isMemcpy() && "Must be a memcpy");
174     return MemcpyStrSrc;
175   }
getSrcAlignMemOp176   Align getSrcAlign() const {
177     assert(isMemcpy() && "Must be a memcpy");
178     return SrcAlign;
179   }
isSrcAlignedMemOp180   bool isSrcAligned(Align AlignCheck) const {
181     return isMemset() || llvm::isAligned(AlignCheck, SrcAlign.value());
182   }
isDstAlignedMemOp183   bool isDstAligned(Align AlignCheck) const {
184     return DstAlignCanChange || llvm::isAligned(AlignCheck, DstAlign.value());
185   }
isAlignedMemOp186   bool isAligned(Align AlignCheck) const {
187     return isSrcAligned(AlignCheck) && isDstAligned(AlignCheck);
188   }
189 };
190 
191 /// This base class for TargetLowering contains the SelectionDAG-independent
192 /// parts that can be used from the rest of CodeGen.
193 class TargetLoweringBase {
194 public:
195   /// This enum indicates whether operations are valid for a target, and if not,
196   /// what action should be used to make them valid.
197   enum LegalizeAction : uint8_t {
198     Legal,      // The target natively supports this operation.
199     Promote,    // This operation should be executed in a larger type.
200     Expand,     // Try to expand this to other ops, otherwise use a libcall.
201     LibCall,    // Don't try to expand this to other ops, always use a libcall.
202     Custom      // Use the LowerOperation hook to implement custom lowering.
203   };
204 
205   /// This enum indicates whether a types are legal for a target, and if not,
206   /// what action should be used to make them valid.
207   enum LegalizeTypeAction : uint8_t {
208     TypeLegal,           // The target natively supports this type.
209     TypePromoteInteger,  // Replace this integer with a larger one.
210     TypeExpandInteger,   // Split this integer into two of half the size.
211     TypeSoftenFloat,     // Convert this float to a same size integer type.
212     TypeExpandFloat,     // Split this float into two of half the size.
213     TypeScalarizeVector, // Replace this one-element vector with its element.
214     TypeSplitVector,     // Split this vector into two of half the size.
215     TypeWidenVector,     // This vector should be widened into a larger vector.
216     TypePromoteFloat,    // Replace this float with a larger one.
217     TypeSoftPromoteHalf, // Soften half to i16 and use float to do arithmetic.
218     TypeScalarizeScalableVector, // This action is explicitly left unimplemented.
219                                  // While it is theoretically possible to
220                                  // legalize operations on scalable types with a
221                                  // loop that handles the vscale * #lanes of the
222                                  // vector, this is non-trivial at SelectionDAG
223                                  // level and these types are better to be
224                                  // widened or promoted.
225   };
226 
227   /// LegalizeKind holds the legalization kind that needs to happen to EVT
228   /// in order to type-legalize it.
229   using LegalizeKind = std::pair<LegalizeTypeAction, EVT>;
230 
231   /// Enum that describes how the target represents true/false values.
232   enum BooleanContent {
233     UndefinedBooleanContent,    // Only bit 0 counts, the rest can hold garbage.
234     ZeroOrOneBooleanContent,        // All bits zero except for bit 0.
235     ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
236   };
237 
238   /// Enum that describes what type of support for selects the target has.
239   enum SelectSupportKind {
240     ScalarValSelect,      // The target supports scalar selects (ex: cmov).
241     ScalarCondVectorVal,  // The target supports selects with a scalar condition
242                           // and vector values (ex: cmov).
243     VectorMaskSelect      // The target supports vector selects with a vector
244                           // mask (ex: x86 blends).
245   };
246 
247   /// Enum that specifies what an atomic load/AtomicRMWInst is expanded
248   /// to, if at all. Exists because different targets have different levels of
249   /// support for these atomic instructions, and also have different options
250   /// w.r.t. what they should expand to.
251   enum class AtomicExpansionKind {
252     None,    // Don't expand the instruction.
253     CastToInteger,    // Cast the atomic instruction to another type, e.g. from
254                       // floating-point to integer type.
255     LLSC,    // Expand the instruction into loadlinked/storeconditional; used
256              // by ARM/AArch64.
257     LLOnly,  // Expand the (load) instruction into just a load-linked, which has
258              // greater atomic guarantees than a normal load.
259     CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
260     MaskedIntrinsic,  // Use a target-specific intrinsic for the LL/SC loop.
261     BitTestIntrinsic, // Use a target-specific intrinsic for special bit
262                       // operations; used by X86.
263     CmpArithIntrinsic,// Use a target-specific intrinsic for special compare
264                       // operations; used by X86.
265     Expand,           // Generic expansion in terms of other atomic operations.
266 
267     // Rewrite to a non-atomic form for use in a known non-preemptible
268     // environment.
269     NotAtomic
270   };
271 
272   /// Enum that specifies when a multiplication should be expanded.
273   enum class MulExpansionKind {
274     Always,            // Always expand the instruction.
275     OnlyLegalOrCustom, // Only expand when the resulting instructions are legal
276                        // or custom.
277   };
278 
279   /// Enum that specifies when a float negation is beneficial.
280   enum class NegatibleCost {
281     Cheaper = 0,    // Negated expression is cheaper.
282     Neutral = 1,    // Negated expression has the same cost.
283     Expensive = 2   // Negated expression is more expensive.
284   };
285 
286   /// Enum of different potentially desirable ways to fold (and/or (setcc ...),
287   /// (setcc ...)).
288   enum AndOrSETCCFoldKind : uint8_t {
289     None = 0,   // No fold is preferable.
290     AddAnd = 1, // Fold with `Add` op and `And` op is preferable.
291     NotAnd = 2, // Fold with `Not` op and `And` op is preferable.
292     ABS = 4,    // Fold with `llvm.abs` op is preferable.
293   };
294 
295   class ArgListEntry {
296   public:
297     Value *Val = nullptr;
298     SDValue Node = SDValue();
299     Type *Ty = nullptr;
300     bool IsSExt : 1;
301     bool IsZExt : 1;
302     bool IsInReg : 1;
303     bool IsSRet : 1;
304     bool IsNest : 1;
305     bool IsByVal : 1;
306     bool IsByRef : 1;
307     bool IsInAlloca : 1;
308     bool IsPreallocated : 1;
309     bool IsReturned : 1;
310     bool IsSwiftSelf : 1;
311     bool IsSwiftAsync : 1;
312     bool IsSwiftError : 1;
313     bool IsCFGuardTarget : 1;
314     MaybeAlign Alignment = std::nullopt;
315     Type *IndirectType = nullptr;
316 
ArgListEntry()317     ArgListEntry()
318         : IsSExt(false), IsZExt(false), IsInReg(false), IsSRet(false),
319           IsNest(false), IsByVal(false), IsByRef(false), IsInAlloca(false),
320           IsPreallocated(false), IsReturned(false), IsSwiftSelf(false),
321           IsSwiftAsync(false), IsSwiftError(false), IsCFGuardTarget(false) {}
322 
323     void setAttributes(const CallBase *Call, unsigned ArgIdx);
324   };
325   using ArgListTy = std::vector<ArgListEntry>;
326 
markLibCallAttributes(MachineFunction * MF,unsigned CC,ArgListTy & Args)327   virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC,
328                                      ArgListTy &Args) const {};
329 
getExtendForContent(BooleanContent Content)330   static ISD::NodeType getExtendForContent(BooleanContent Content) {
331     switch (Content) {
332     case UndefinedBooleanContent:
333       // Extend by adding rubbish bits.
334       return ISD::ANY_EXTEND;
335     case ZeroOrOneBooleanContent:
336       // Extend by adding zero bits.
337       return ISD::ZERO_EXTEND;
338     case ZeroOrNegativeOneBooleanContent:
339       // Extend by copying the sign bit.
340       return ISD::SIGN_EXTEND;
341     }
342     llvm_unreachable("Invalid content kind");
343   }
344 
345   explicit TargetLoweringBase(const TargetMachine &TM);
346   TargetLoweringBase(const TargetLoweringBase &) = delete;
347   TargetLoweringBase &operator=(const TargetLoweringBase &) = delete;
348   virtual ~TargetLoweringBase() = default;
349 
350   /// Return true if the target support strict float operation
isStrictFPEnabled()351   bool isStrictFPEnabled() const {
352     return IsStrictFPEnabled;
353   }
354 
355 protected:
356   /// Initialize all of the actions to default values.
357   void initActions();
358 
359 public:
getTargetMachine()360   const TargetMachine &getTargetMachine() const { return TM; }
361 
useSoftFloat()362   virtual bool useSoftFloat() const { return false; }
363 
364   /// Return the pointer type for the given address space, defaults to
365   /// the pointer type from the data layout.
366   /// FIXME: The default needs to be removed once all the code is updated.
367   virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const {
368     return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
369   }
370 
371   /// Return the in-memory pointer type for the given address space, defaults to
372   /// the pointer type from the data layout.
373   /// FIXME: The default needs to be removed once all the code is updated.
374   virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS = 0) const {
375     return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
376   }
377 
378   /// Return the type for frame index, which is determined by
379   /// the alloca address space specified through the data layout.
getFrameIndexTy(const DataLayout & DL)380   MVT getFrameIndexTy(const DataLayout &DL) const {
381     return getPointerTy(DL, DL.getAllocaAddrSpace());
382   }
383 
384   /// Return the type for code pointers, which is determined by the program
385   /// address space specified through the data layout.
getProgramPointerTy(const DataLayout & DL)386   MVT getProgramPointerTy(const DataLayout &DL) const {
387     return getPointerTy(DL, DL.getProgramAddressSpace());
388   }
389 
390   /// Return the type for operands of fence.
391   /// TODO: Let fence operands be of i32 type and remove this.
getFenceOperandTy(const DataLayout & DL)392   virtual MVT getFenceOperandTy(const DataLayout &DL) const {
393     return getPointerTy(DL);
394   }
395 
396   /// Return the type to use for a scalar shift opcode, given the shifted amount
397   /// type. Targets should return a legal type if the input type is legal.
398   /// Targets can return a type that is too small if the input type is illegal.
399   virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
400 
401   /// Returns the type for the shift amount of a shift opcode. For vectors,
402   /// returns the input type. For scalars, behavior depends on \p LegalTypes. If
403   /// \p LegalTypes is true, calls getScalarShiftAmountTy, otherwise uses
404   /// pointer type. If getScalarShiftAmountTy or pointer type cannot represent
405   /// all possible shift amounts, returns MVT::i32. In general, \p LegalTypes
406   /// should be set to true for calls during type legalization and after type
407   /// legalization has been completed.
408   EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL,
409                        bool LegalTypes = true) const;
410 
411   /// Return the preferred type to use for a shift opcode, given the shifted
412   /// amount type is \p ShiftValueTy.
413   LLVM_READONLY
getPreferredShiftAmountTy(LLT ShiftValueTy)414   virtual LLT getPreferredShiftAmountTy(LLT ShiftValueTy) const {
415     return ShiftValueTy;
416   }
417 
418   /// Returns the type to be used for the index operand of:
419   /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
420   /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
getVectorIdxTy(const DataLayout & DL)421   virtual MVT getVectorIdxTy(const DataLayout &DL) const {
422     return getPointerTy(DL);
423   }
424 
425   /// Returns the type to be used for the EVL/AVL operand of VP nodes:
426   /// ISD::VP_ADD, ISD::VP_SUB, etc. It must be a legal scalar integer type,
427   /// and must be at least as large as i32. The EVL is implicitly zero-extended
428   /// to any larger type.
getVPExplicitVectorLengthTy()429   virtual MVT getVPExplicitVectorLengthTy() const { return MVT::i32; }
430 
431   /// This callback is used to inspect load/store instructions and add
432   /// target-specific MachineMemOperand flags to them.  The default
433   /// implementation does nothing.
getTargetMMOFlags(const Instruction & I)434   virtual MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const {
435     return MachineMemOperand::MONone;
436   }
437 
438   /// This callback is used to inspect load/store SDNode.
439   /// The default implementation does nothing.
440   virtual MachineMemOperand::Flags
getTargetMMOFlags(const MemSDNode & Node)441   getTargetMMOFlags(const MemSDNode &Node) const {
442     return MachineMemOperand::MONone;
443   }
444 
445   MachineMemOperand::Flags
446   getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL,
447                          AssumptionCache *AC = nullptr,
448                          const TargetLibraryInfo *LibInfo = nullptr) const;
449   MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI,
450                                                    const DataLayout &DL) const;
451   MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI,
452                                                     const DataLayout &DL) const;
453 
isSelectSupported(SelectSupportKind)454   virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
455     return true;
456   }
457 
458   /// Return true if the @llvm.get.active.lane.mask intrinsic should be expanded
459   /// using generic code in SelectionDAGBuilder.
shouldExpandGetActiveLaneMask(EVT VT,EVT OpVT)460   virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const {
461     return true;
462   }
463 
shouldExpandGetVectorLength(EVT CountVT,unsigned VF,bool IsScalable)464   virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF,
465                                            bool IsScalable) const {
466     return true;
467   }
468 
469   /// Return true if the @llvm.experimental.cttz.elts intrinsic should be
470   /// expanded using generic code in SelectionDAGBuilder.
shouldExpandCttzElements(EVT VT)471   virtual bool shouldExpandCttzElements(EVT VT) const { return true; }
472 
473   // Return true if op(vecreduce(x), vecreduce(y)) should be reassociated to
474   // vecreduce(op(x, y)) for the reduction opcode RedOpc.
shouldReassociateReduction(unsigned RedOpc,EVT VT)475   virtual bool shouldReassociateReduction(unsigned RedOpc, EVT VT) const {
476     return true;
477   }
478 
479   /// Return true if it is profitable to convert a select of FP constants into
480   /// a constant pool load whose address depends on the select condition. The
481   /// parameter may be used to differentiate a select with FP compare from
482   /// integer compare.
reduceSelectOfFPConstantLoads(EVT CmpOpVT)483   virtual bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
484     return true;
485   }
486 
487   /// Return true if multiple condition registers are available.
hasMultipleConditionRegisters()488   bool hasMultipleConditionRegisters() const {
489     return HasMultipleConditionRegisters;
490   }
491 
492   /// Return true if the target has BitExtract instructions.
hasExtractBitsInsn()493   bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
494 
495   /// Return the preferred vector type legalization action.
496   virtual TargetLoweringBase::LegalizeTypeAction
getPreferredVectorAction(MVT VT)497   getPreferredVectorAction(MVT VT) const {
498     // The default action for one element vectors is to scalarize
499     if (VT.getVectorElementCount().isScalar())
500       return TypeScalarizeVector;
501     // The default action for an odd-width vector is to widen.
502     if (!VT.isPow2VectorType())
503       return TypeWidenVector;
504     // The default action for other vectors is to promote
505     return TypePromoteInteger;
506   }
507 
508   // Return true if the half type should be promoted using soft promotion rules
509   // where each operation is promoted to f32 individually, then converted to
510   // fp16. The default behavior is to promote chains of operations, keeping
511   // intermediate results in f32 precision and range.
softPromoteHalfType()512   virtual bool softPromoteHalfType() const { return false; }
513 
514   // Return true if, for soft-promoted half, the half type should be passed
515   // passed to and returned from functions as f32. The default behavior is to
516   // pass as i16. If soft-promoted half is not used, this function is ignored
517   // and values are always passed and returned as f32.
useFPRegsForHalfType()518   virtual bool useFPRegsForHalfType() const { return false; }
519 
520   // There are two general methods for expanding a BUILD_VECTOR node:
521   //  1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
522   //     them together.
523   //  2. Build the vector on the stack and then load it.
524   // If this function returns true, then method (1) will be used, subject to
525   // the constraint that all of the necessary shuffles are legal (as determined
526   // by isShuffleMaskLegal). If this function returns false, then method (2) is
527   // always used. The vector type, and the number of defined values, are
528   // provided.
529   virtual bool
shouldExpandBuildVectorWithShuffles(EVT,unsigned DefinedValues)530   shouldExpandBuildVectorWithShuffles(EVT /* VT */,
531                                       unsigned DefinedValues) const {
532     return DefinedValues < 3;
533   }
534 
535   /// Return true if integer divide is usually cheaper than a sequence of
536   /// several shifts, adds, and multiplies for this target.
537   /// The definition of "cheaper" may depend on whether we're optimizing
538   /// for speed or for size.
isIntDivCheap(EVT VT,AttributeList Attr)539   virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; }
540 
541   /// Return true if the target can handle a standalone remainder operation.
hasStandaloneRem(EVT VT)542   virtual bool hasStandaloneRem(EVT VT) const {
543     return true;
544   }
545 
546   /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
isFsqrtCheap(SDValue X,SelectionDAG & DAG)547   virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const {
548     // Default behavior is to replace SQRT(X) with X*RSQRT(X).
549     return false;
550   }
551 
552   /// Reciprocal estimate status values used by the functions below.
553   enum ReciprocalEstimate : int {
554     Unspecified = -1,
555     Disabled = 0,
556     Enabled = 1
557   };
558 
559   /// Return a ReciprocalEstimate enum value for a square root of the given type
560   /// based on the function's attributes. If the operation is not overridden by
561   /// the function's attributes, "Unspecified" is returned and target defaults
562   /// are expected to be used for instruction selection.
563   int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const;
564 
565   /// Return a ReciprocalEstimate enum value for a division of the given type
566   /// based on the function's attributes. If the operation is not overridden by
567   /// the function's attributes, "Unspecified" is returned and target defaults
568   /// are expected to be used for instruction selection.
569   int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const;
570 
571   /// Return the refinement step count for a square root of the given type based
572   /// on the function's attributes. If the operation is not overridden by
573   /// the function's attributes, "Unspecified" is returned and target defaults
574   /// are expected to be used for instruction selection.
575   int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const;
576 
577   /// Return the refinement step count for a division of the given type based
578   /// on the function's attributes. If the operation is not overridden by
579   /// the function's attributes, "Unspecified" is returned and target defaults
580   /// are expected to be used for instruction selection.
581   int getDivRefinementSteps(EVT VT, MachineFunction &MF) const;
582 
583   /// Returns true if target has indicated at least one type should be bypassed.
isSlowDivBypassed()584   bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
585 
586   /// Returns map of slow types for division or remainder with corresponding
587   /// fast types
getBypassSlowDivWidths()588   const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
589     return BypassSlowDivWidths;
590   }
591 
592   /// Return true only if vscale must be a power of two.
isVScaleKnownToBeAPowerOfTwo()593   virtual bool isVScaleKnownToBeAPowerOfTwo() const { return false; }
594 
595   /// Return true if Flow Control is an expensive operation that should be
596   /// avoided.
isJumpExpensive()597   bool isJumpExpensive() const { return JumpIsExpensive; }
598 
599   // Costs parameters used by
600   // SelectionDAGBuilder::shouldKeepJumpConditionsTogether.
601   // shouldKeepJumpConditionsTogether will use these parameter value to
602   // determine if two conditions in the form `br (and/or cond1, cond2)` should
603   // be split into two branches or left as one.
604   //
605   // BaseCost is the cost threshold (in latency). If the estimated latency of
606   // computing both `cond1` and `cond2` is below the cost of just computing
607   // `cond1` + BaseCost, the two conditions will be kept together. Otherwise
608   // they will be split.
609   //
610   // LikelyBias increases BaseCost if branch probability info indicates that it
611   // is likely that both `cond1` and `cond2` will be computed.
612   //
613   // UnlikelyBias decreases BaseCost if branch probability info indicates that
614   // it is likely that both `cond1` and `cond2` will be computed.
615   //
616   // Set any field to -1 to make it ignored (setting BaseCost to -1 results in
617   // `shouldKeepJumpConditionsTogether` always returning false).
618   struct CondMergingParams {
619     int BaseCost;
620     int LikelyBias;
621     int UnlikelyBias;
622   };
623   // Return params for deciding if we should keep two branch conditions merged
624   // or split them into two separate branches.
625   // Arg0: The binary op joining the two conditions (and/or).
626   // Arg1: The first condition (cond1)
627   // Arg2: The second condition (cond2)
628   virtual CondMergingParams
getJumpConditionMergingParams(Instruction::BinaryOps,const Value *,const Value *)629   getJumpConditionMergingParams(Instruction::BinaryOps, const Value *,
630                                 const Value *) const {
631     // -1 will always result in splitting.
632     return {-1, -1, -1};
633   }
634 
635   /// Return true if selects are only cheaper than branches if the branch is
636   /// unlikely to be predicted right.
isPredictableSelectExpensive()637   bool isPredictableSelectExpensive() const {
638     return PredictableSelectIsExpensive;
639   }
640 
fallBackToDAGISel(const Instruction & Inst)641   virtual bool fallBackToDAGISel(const Instruction &Inst) const {
642     return false;
643   }
644 
645   /// Return true if the following transform is beneficial:
646   /// fold (conv (load x)) -> (load (conv*)x)
647   /// On architectures that don't natively support some vector loads
648   /// efficiently, casting the load to a smaller vector of larger types and
649   /// loading is more efficient, however, this can be undone by optimizations in
650   /// dag combiner.
651   virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
652                                        const SelectionDAG &DAG,
653                                        const MachineMemOperand &MMO) const;
654 
655   /// Return true if the following transform is beneficial:
656   /// (store (y (conv x)), y*)) -> (store x, (x*))
isStoreBitCastBeneficial(EVT StoreVT,EVT BitcastVT,const SelectionDAG & DAG,const MachineMemOperand & MMO)657   virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT,
658                                         const SelectionDAG &DAG,
659                                         const MachineMemOperand &MMO) const {
660     // Default to the same logic as loads.
661     return isLoadBitCastBeneficial(StoreVT, BitcastVT, DAG, MMO);
662   }
663 
664   /// Return true if it is expected to be cheaper to do a store of vector
665   /// constant with the given size and type for the address space than to
666   /// store the individual scalar element constants.
storeOfVectorConstantIsCheap(bool IsZero,EVT MemVT,unsigned NumElem,unsigned AddrSpace)667   virtual bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT,
668                                             unsigned NumElem,
669                                             unsigned AddrSpace) const {
670     return IsZero;
671   }
672 
673   /// Allow store merging for the specified type after legalization in addition
674   /// to before legalization. This may transform stores that do not exist
675   /// earlier (for example, stores created from intrinsics).
mergeStoresAfterLegalization(EVT MemVT)676   virtual bool mergeStoresAfterLegalization(EVT MemVT) const {
677     return true;
678   }
679 
680   /// Returns if it's reasonable to merge stores to MemVT size.
canMergeStoresTo(unsigned AS,EVT MemVT,const MachineFunction & MF)681   virtual bool canMergeStoresTo(unsigned AS, EVT MemVT,
682                                 const MachineFunction &MF) const {
683     return true;
684   }
685 
686   /// Return true if it is cheap to speculate a call to intrinsic cttz.
isCheapToSpeculateCttz(Type * Ty)687   virtual bool isCheapToSpeculateCttz(Type *Ty) const {
688     return false;
689   }
690 
691   /// Return true if it is cheap to speculate a call to intrinsic ctlz.
isCheapToSpeculateCtlz(Type * Ty)692   virtual bool isCheapToSpeculateCtlz(Type *Ty) const {
693     return false;
694   }
695 
696   /// Return true if ctlz instruction is fast.
isCtlzFast()697   virtual bool isCtlzFast() const {
698     return false;
699   }
700 
701   /// Return true if ctpop instruction is fast.
isCtpopFast(EVT VT)702   virtual bool isCtpopFast(EVT VT) const {
703     return isOperationLegal(ISD::CTPOP, VT);
704   }
705 
706   /// Return the maximum number of "x & (x - 1)" operations that can be done
707   /// instead of deferring to a custom CTPOP.
getCustomCtpopCost(EVT VT,ISD::CondCode Cond)708   virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const {
709     return 1;
710   }
711 
712   /// Return true if instruction generated for equality comparison is folded
713   /// with instruction generated for signed comparison.
isEqualityCmpFoldedWithSignedCmp()714   virtual bool isEqualityCmpFoldedWithSignedCmp() const { return true; }
715 
716   /// Return true if the heuristic to prefer icmp eq zero should be used in code
717   /// gen prepare.
preferZeroCompareBranch()718   virtual bool preferZeroCompareBranch() const { return false; }
719 
720   /// Return true if it is cheaper to split the store of a merged int val
721   /// from a pair of smaller values into multiple stores.
isMultiStoresCheaperThanBitsMerge(EVT LTy,EVT HTy)722   virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const {
723     return false;
724   }
725 
726   /// Return if the target supports combining a
727   /// chain like:
728   /// \code
729   ///   %andResult = and %val1, #mask
730   ///   %icmpResult = icmp %andResult, 0
731   /// \endcode
732   /// into a single machine instruction of a form like:
733   /// \code
734   ///   cc = test %register, #mask
735   /// \endcode
isMaskAndCmp0FoldingBeneficial(const Instruction & AndI)736   virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
737     return false;
738   }
739 
740   /// Return true if it is valid to merge the TargetMMOFlags in two SDNodes.
741   virtual bool
areTwoSDNodeTargetMMOFlagsMergeable(const MemSDNode & NodeX,const MemSDNode & NodeY)742   areTwoSDNodeTargetMMOFlagsMergeable(const MemSDNode &NodeX,
743                                       const MemSDNode &NodeY) const {
744     return true;
745   }
746 
747   /// Use bitwise logic to make pairs of compares more efficient. For example:
748   /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
749   /// This should be true when it takes more than one instruction to lower
750   /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on
751   /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win.
convertSetCCLogicToBitwiseLogic(EVT VT)752   virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const {
753     return false;
754   }
755 
756   /// Return the preferred operand type if the target has a quick way to compare
757   /// integer values of the given size. Assume that any legal integer type can
758   /// be compared efficiently. Targets may override this to allow illegal wide
759   /// types to return a vector type if there is support to compare that type.
hasFastEqualityCompare(unsigned NumBits)760   virtual MVT hasFastEqualityCompare(unsigned NumBits) const {
761     MVT VT = MVT::getIntegerVT(NumBits);
762     return isTypeLegal(VT) ? VT : MVT::INVALID_SIMPLE_VALUE_TYPE;
763   }
764 
765   /// Return true if the target should transform:
766   /// (X & Y) == Y ---> (~X & Y) == 0
767   /// (X & Y) != Y ---> (~X & Y) != 0
768   ///
769   /// This may be profitable if the target has a bitwise and-not operation that
770   /// sets comparison flags. A target may want to limit the transformation based
771   /// on the type of Y or if Y is a constant.
772   ///
773   /// Note that the transform will not occur if Y is known to be a power-of-2
774   /// because a mask and compare of a single bit can be handled by inverting the
775   /// predicate, for example:
776   /// (X & 8) == 8 ---> (X & 8) != 0
hasAndNotCompare(SDValue Y)777   virtual bool hasAndNotCompare(SDValue Y) const {
778     return false;
779   }
780 
781   /// Return true if the target has a bitwise and-not operation:
782   /// X = ~A & B
783   /// This can be used to simplify select or other instructions.
hasAndNot(SDValue X)784   virtual bool hasAndNot(SDValue X) const {
785     // If the target has the more complex version of this operation, assume that
786     // it has this operation too.
787     return hasAndNotCompare(X);
788   }
789 
790   /// Return true if the target has a bit-test instruction:
791   ///   (X & (1 << Y)) ==/!= 0
792   /// This knowledge can be used to prevent breaking the pattern,
793   /// or creating it if it could be recognized.
hasBitTest(SDValue X,SDValue Y)794   virtual bool hasBitTest(SDValue X, SDValue Y) const { return false; }
795 
796   /// There are two ways to clear extreme bits (either low or high):
797   /// Mask:    x &  (-1 << y)  (the instcombine canonical form)
798   /// Shifts:  x >> y << y
799   /// Return true if the variant with 2 variable shifts is preferred.
800   /// Return false if there is no preference.
shouldFoldMaskToVariableShiftPair(SDValue X)801   virtual bool shouldFoldMaskToVariableShiftPair(SDValue X) const {
802     // By default, let's assume that no one prefers shifts.
803     return false;
804   }
805 
806   /// Return true if it is profitable to fold a pair of shifts into a mask.
807   /// This is usually true on most targets. But some targets, like Thumb1,
808   /// have immediate shift instructions, but no immediate "and" instruction;
809   /// this makes the fold unprofitable.
shouldFoldConstantShiftPairToMask(const SDNode * N,CombineLevel Level)810   virtual bool shouldFoldConstantShiftPairToMask(const SDNode *N,
811                                                  CombineLevel Level) const {
812     return true;
813   }
814 
815   /// Should we tranform the IR-optimal check for whether given truncation
816   /// down into KeptBits would be truncating or not:
817   ///   (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits)
818   /// Into it's more traditional form:
819   ///   ((%x << C) a>> C) dstcond %x
820   /// Return true if we should transform.
821   /// Return false if there is no preference.
shouldTransformSignedTruncationCheck(EVT XVT,unsigned KeptBits)822   virtual bool shouldTransformSignedTruncationCheck(EVT XVT,
823                                                     unsigned KeptBits) const {
824     // By default, let's assume that no one prefers shifts.
825     return false;
826   }
827 
828   /// Given the pattern
829   ///   (X & (C l>>/<< Y)) ==/!= 0
830   /// return true if it should be transformed into:
831   ///   ((X <</l>> Y) & C) ==/!= 0
832   /// WARNING: if 'X' is a constant, the fold may deadlock!
833   /// FIXME: we could avoid passing XC, but we can't use isConstOrConstSplat()
834   ///        here because it can end up being not linked in.
shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X,ConstantSDNode * XC,ConstantSDNode * CC,SDValue Y,unsigned OldShiftOpcode,unsigned NewShiftOpcode,SelectionDAG & DAG)835   virtual bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
836       SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
837       unsigned OldShiftOpcode, unsigned NewShiftOpcode,
838       SelectionDAG &DAG) const {
839     if (hasBitTest(X, Y)) {
840       // One interesting pattern that we'd want to form is 'bit test':
841       //   ((1 << Y) & C) ==/!= 0
842       // But we also need to be careful not to try to reverse that fold.
843 
844       // Is this '1 << Y' ?
845       if (OldShiftOpcode == ISD::SHL && CC->isOne())
846         return false; // Keep the 'bit test' pattern.
847 
848       // Will it be '1 << Y' after the transform ?
849       if (XC && NewShiftOpcode == ISD::SHL && XC->isOne())
850         return true; // Do form the 'bit test' pattern.
851     }
852 
853     // If 'X' is a constant, and we transform, then we will immediately
854     // try to undo the fold, thus causing endless combine loop.
855     // So by default, let's assume everyone prefers the fold
856     // iff 'X' is not a constant.
857     return !XC;
858   }
859 
860   // Return true if its desirable to perform the following transform:
861   // (fmul C, (uitofp Pow2))
862   //     -> (bitcast_to_FP (add (bitcast_to_INT C), Log2(Pow2) << mantissa))
863   // (fdiv C, (uitofp Pow2))
864   //     -> (bitcast_to_FP (sub (bitcast_to_INT C), Log2(Pow2) << mantissa))
865   //
866   // This is only queried after we have verified the transform will be bitwise
867   // equals.
868   //
869   // SDNode *N      : The FDiv/FMul node we want to transform.
870   // SDValue FPConst: The Float constant operand in `N`.
871   // SDValue IntPow2: The Integer power of 2 operand in `N`.
optimizeFMulOrFDivAsShiftAddBitcast(SDNode * N,SDValue FPConst,SDValue IntPow2)872   virtual bool optimizeFMulOrFDivAsShiftAddBitcast(SDNode *N, SDValue FPConst,
873                                                    SDValue IntPow2) const {
874     // Default to avoiding fdiv which is often very expensive.
875     return N->getOpcode() == ISD::FDIV;
876   }
877 
878   // Given:
879   //    (icmp eq/ne (and X, C0), (shift X, C1))
880   // or
881   //    (icmp eq/ne X, (rotate X, CPow2))
882 
883   // If C0 is a mask or shifted mask and the shift amt (C1) isolates the
884   // remaining bits (i.e something like `(x64 & UINT32_MAX) == (x64 >> 32)`)
885   // Do we prefer the shift to be shift-right, shift-left, or rotate.
886   // Note: Its only valid to convert the rotate version to the shift version iff
887   // the shift-amt (`C1`) is a power of 2 (including 0).
888   // If ShiftOpc (current Opcode) is returned, do nothing.
preferedOpcodeForCmpEqPiecesOfOperand(EVT VT,unsigned ShiftOpc,bool MayTransformRotate,const APInt & ShiftOrRotateAmt,const std::optional<APInt> & AndMask)889   virtual unsigned preferedOpcodeForCmpEqPiecesOfOperand(
890       EVT VT, unsigned ShiftOpc, bool MayTransformRotate,
891       const APInt &ShiftOrRotateAmt,
892       const std::optional<APInt> &AndMask) const {
893     return ShiftOpc;
894   }
895 
896   /// These two forms are equivalent:
897   ///   sub %y, (xor %x, -1)
898   ///   add (add %x, 1), %y
899   /// The variant with two add's is IR-canonical.
900   /// Some targets may prefer one to the other.
preferIncOfAddToSubOfNot(EVT VT)901   virtual bool preferIncOfAddToSubOfNot(EVT VT) const {
902     // By default, let's assume that everyone prefers the form with two add's.
903     return true;
904   }
905 
906   // By default prefer folding (abs (sub nsw x, y)) -> abds(x, y). Some targets
907   // may want to avoid this to prevent loss of sub_nsw pattern.
preferABDSToABSWithNSW(EVT VT)908   virtual bool preferABDSToABSWithNSW(EVT VT) const {
909     return true;
910   }
911 
912   // Return true if the target wants to transform Op(Splat(X)) -> Splat(Op(X))
preferScalarizeSplat(SDNode * N)913   virtual bool preferScalarizeSplat(SDNode *N) const { return true; }
914 
915   // Return true if the target wants to transform:
916   // (TruncVT truncate(sext_in_reg(VT X, ExtVT))
917   //  -> (TruncVT sext_in_reg(truncate(VT X), ExtVT))
918   // Some targets might prefer pre-sextinreg to improve truncation/saturation.
preferSextInRegOfTruncate(EVT TruncVT,EVT VT,EVT ExtVT)919   virtual bool preferSextInRegOfTruncate(EVT TruncVT, EVT VT, EVT ExtVT) const {
920     return true;
921   }
922 
923   /// Return true if the target wants to use the optimization that
924   /// turns ext(promotableInst1(...(promotableInstN(load)))) into
925   /// promotedInst1(...(promotedInstN(ext(load)))).
enableExtLdPromotion()926   bool enableExtLdPromotion() const { return EnableExtLdPromotion; }
927 
928   /// Return true if the target can combine store(extractelement VectorTy,
929   /// Idx).
930   /// \p Cost[out] gives the cost of that transformation when this is true.
canCombineStoreAndExtract(Type * VectorTy,Value * Idx,unsigned & Cost)931   virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
932                                          unsigned &Cost) const {
933     return false;
934   }
935 
936   /// Return true if the target shall perform extract vector element and store
937   /// given that the vector is known to be splat of constant.
938   /// \p Index[out] gives the index of the vector element to be extracted when
939   /// this is true.
shallExtractConstSplatVectorElementToStore(Type * VectorTy,unsigned ElemSizeInBits,unsigned & Index)940   virtual bool shallExtractConstSplatVectorElementToStore(
941       Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const {
942     return false;
943   }
944 
945   /// Return true if inserting a scalar into a variable element of an undef
946   /// vector is more efficiently handled by splatting the scalar instead.
shouldSplatInsEltVarIndex(EVT)947   virtual bool shouldSplatInsEltVarIndex(EVT) const {
948     return false;
949   }
950 
951   /// Return true if target always benefits from combining into FMA for a
952   /// given value type. This must typically return false on targets where FMA
953   /// takes more cycles to execute than FADD.
enableAggressiveFMAFusion(EVT VT)954   virtual bool enableAggressiveFMAFusion(EVT VT) const { return false; }
955 
956   /// Return true if target always benefits from combining into FMA for a
957   /// given value type. This must typically return false on targets where FMA
958   /// takes more cycles to execute than FADD.
enableAggressiveFMAFusion(LLT Ty)959   virtual bool enableAggressiveFMAFusion(LLT Ty) const { return false; }
960 
961   /// Return the ValueType of the result of SETCC operations.
962   virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
963                                  EVT VT) const;
964 
965   /// Return the ValueType for comparison libcalls. Comparison libcalls include
966   /// floating point comparison calls, and Ordered/Unordered check calls on
967   /// floating point numbers.
968   virtual
969   MVT::SimpleValueType getCmpLibcallReturnType() const;
970 
971   /// For targets without i1 registers, this gives the nature of the high-bits
972   /// of boolean values held in types wider than i1.
973   ///
974   /// "Boolean values" are special true/false values produced by nodes like
975   /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
976   /// Not to be confused with general values promoted from i1.  Some cpus
977   /// distinguish between vectors of boolean and scalars; the isVec parameter
978   /// selects between the two kinds.  For example on X86 a scalar boolean should
979   /// be zero extended from i1, while the elements of a vector of booleans
980   /// should be sign extended from i1.
981   ///
982   /// Some cpus also treat floating point types the same way as they treat
983   /// vectors instead of the way they treat scalars.
getBooleanContents(bool isVec,bool isFloat)984   BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
985     if (isVec)
986       return BooleanVectorContents;
987     return isFloat ? BooleanFloatContents : BooleanContents;
988   }
989 
getBooleanContents(EVT Type)990   BooleanContent getBooleanContents(EVT Type) const {
991     return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
992   }
993 
994   /// Promote the given target boolean to a target boolean of the given type.
995   /// A target boolean is an integer value, not necessarily of type i1, the bits
996   /// of which conform to getBooleanContents.
997   ///
998   /// ValVT is the type of values that produced the boolean.
promoteTargetBoolean(SelectionDAG & DAG,SDValue Bool,EVT ValVT)999   SDValue promoteTargetBoolean(SelectionDAG &DAG, SDValue Bool,
1000                                EVT ValVT) const {
1001     SDLoc dl(Bool);
1002     EVT BoolVT =
1003         getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ValVT);
1004     ISD::NodeType ExtendCode = getExtendForContent(getBooleanContents(ValVT));
1005     return DAG.getNode(ExtendCode, dl, BoolVT, Bool);
1006   }
1007 
1008   /// Return target scheduling preference.
getSchedulingPreference()1009   Sched::Preference getSchedulingPreference() const {
1010     return SchedPreferenceInfo;
1011   }
1012 
1013   /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
1014   /// for different nodes. This function returns the preference (or none) for
1015   /// the given node.
getSchedulingPreference(SDNode *)1016   virtual Sched::Preference getSchedulingPreference(SDNode *) const {
1017     return Sched::None;
1018   }
1019 
1020   /// Return the register class that should be used for the specified value
1021   /// type.
1022   virtual const TargetRegisterClass *getRegClassFor(MVT VT, bool isDivergent = false) const {
1023     (void)isDivergent;
1024     const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
1025     assert(RC && "This value type is not natively supported!");
1026     return RC;
1027   }
1028 
1029   /// Allows target to decide about the register class of the
1030   /// specific value that is live outside the defining block.
1031   /// Returns true if the value needs uniform register class.
requiresUniformRegister(MachineFunction & MF,const Value *)1032   virtual bool requiresUniformRegister(MachineFunction &MF,
1033                                        const Value *) const {
1034     return false;
1035   }
1036 
1037   /// Return the 'representative' register class for the specified value
1038   /// type.
1039   ///
1040   /// The 'representative' register class is the largest legal super-reg
1041   /// register class for the register class of the value type.  For example, on
1042   /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
1043   /// register class is GR64 on x86_64.
getRepRegClassFor(MVT VT)1044   virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
1045     const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
1046     return RC;
1047   }
1048 
1049   /// Return the cost of the 'representative' register class for the specified
1050   /// value type.
getRepRegClassCostFor(MVT VT)1051   virtual uint8_t getRepRegClassCostFor(MVT VT) const {
1052     return RepRegClassCostForVT[VT.SimpleTy];
1053   }
1054 
1055   /// Return the preferred strategy to legalize tihs SHIFT instruction, with
1056   /// \p ExpansionFactor being the recursion depth - how many expansion needed.
1057   enum class ShiftLegalizationStrategy {
1058     ExpandToParts,
1059     ExpandThroughStack,
1060     LowerToLibcall
1061   };
1062   virtual ShiftLegalizationStrategy
preferredShiftLegalizationStrategy(SelectionDAG & DAG,SDNode * N,unsigned ExpansionFactor)1063   preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N,
1064                                      unsigned ExpansionFactor) const {
1065     if (ExpansionFactor == 1)
1066       return ShiftLegalizationStrategy::ExpandToParts;
1067     return ShiftLegalizationStrategy::ExpandThroughStack;
1068   }
1069 
1070   /// Return true if the target has native support for the specified value type.
1071   /// This means that it has a register that directly holds it without
1072   /// promotions or expansions.
isTypeLegal(EVT VT)1073   bool isTypeLegal(EVT VT) const {
1074     assert(!VT.isSimple() ||
1075            (unsigned)VT.getSimpleVT().SimpleTy < std::size(RegClassForVT));
1076     return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
1077   }
1078 
1079   class ValueTypeActionImpl {
1080     /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
1081     /// that indicates how instruction selection should deal with the type.
1082     LegalizeTypeAction ValueTypeActions[MVT::VALUETYPE_SIZE];
1083 
1084   public:
ValueTypeActionImpl()1085     ValueTypeActionImpl() {
1086       std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions),
1087                 TypeLegal);
1088     }
1089 
getTypeAction(MVT VT)1090     LegalizeTypeAction getTypeAction(MVT VT) const {
1091       return ValueTypeActions[VT.SimpleTy];
1092     }
1093 
setTypeAction(MVT VT,LegalizeTypeAction Action)1094     void setTypeAction(MVT VT, LegalizeTypeAction Action) {
1095       ValueTypeActions[VT.SimpleTy] = Action;
1096     }
1097   };
1098 
getValueTypeActions()1099   const ValueTypeActionImpl &getValueTypeActions() const {
1100     return ValueTypeActions;
1101   }
1102 
1103   /// Return pair that represents the legalization kind (first) that needs to
1104   /// happen to EVT (second) in order to type-legalize it.
1105   ///
1106   /// First: how we should legalize values of this type, either it is already
1107   /// legal (return 'Legal') or we need to promote it to a larger type (return
1108   /// 'Promote'), or we need to expand it into multiple registers of smaller
1109   /// integer type (return 'Expand').  'Custom' is not an option.
1110   ///
1111   /// Second: for types supported by the target, this is an identity function.
1112   /// For types that must be promoted to larger types, this returns the larger
1113   /// type to promote to.  For integer types that are larger than the largest
1114   /// integer register, this contains one step in the expansion to get to the
1115   /// smaller register. For illegal floating point types, this returns the
1116   /// integer type to transform to.
1117   LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
1118 
1119   /// Return how we should legalize values of this type, either it is already
1120   /// legal (return 'Legal') or we need to promote it to a larger type (return
1121   /// 'Promote'), or we need to expand it into multiple registers of smaller
1122   /// integer type (return 'Expand').  'Custom' is not an option.
getTypeAction(LLVMContext & Context,EVT VT)1123   LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
1124     return getTypeConversion(Context, VT).first;
1125   }
getTypeAction(MVT VT)1126   LegalizeTypeAction getTypeAction(MVT VT) const {
1127     return ValueTypeActions.getTypeAction(VT);
1128   }
1129 
1130   /// For types supported by the target, this is an identity function.  For
1131   /// types that must be promoted to larger types, this returns the larger type
1132   /// to promote to.  For integer types that are larger than the largest integer
1133   /// register, this contains one step in the expansion to get to the smaller
1134   /// register. For illegal floating point types, this returns the integer type
1135   /// to transform to.
getTypeToTransformTo(LLVMContext & Context,EVT VT)1136   virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
1137     return getTypeConversion(Context, VT).second;
1138   }
1139 
1140   /// For types supported by the target, this is an identity function.  For
1141   /// types that must be expanded (i.e. integer types that are larger than the
1142   /// largest integer register or illegal floating point types), this returns
1143   /// the largest legal type it will be expanded to.
getTypeToExpandTo(LLVMContext & Context,EVT VT)1144   EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
1145     assert(!VT.isVector());
1146     while (true) {
1147       switch (getTypeAction(Context, VT)) {
1148       case TypeLegal:
1149         return VT;
1150       case TypeExpandInteger:
1151         VT = getTypeToTransformTo(Context, VT);
1152         break;
1153       default:
1154         llvm_unreachable("Type is not legal nor is it to be expanded!");
1155       }
1156     }
1157   }
1158 
1159   /// Vector types are broken down into some number of legal first class types.
1160   /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
1161   /// promoted EVT::f64 values with the X86 FP stack.  Similarly, EVT::v2i64
1162   /// turns into 4 EVT::i32 values with both PPC and X86.
1163   ///
1164   /// This method returns the number of registers needed, and the VT for each
1165   /// register.  It also returns the VT and quantity of the intermediate values
1166   /// before they are promoted/expanded.
1167   unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
1168                                   EVT &IntermediateVT,
1169                                   unsigned &NumIntermediates,
1170                                   MVT &RegisterVT) const;
1171 
1172   /// Certain targets such as MIPS require that some types such as vectors are
1173   /// always broken down into scalars in some contexts. This occurs even if the
1174   /// vector type is legal.
getVectorTypeBreakdownForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT,EVT & IntermediateVT,unsigned & NumIntermediates,MVT & RegisterVT)1175   virtual unsigned getVectorTypeBreakdownForCallingConv(
1176       LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
1177       unsigned &NumIntermediates, MVT &RegisterVT) const {
1178     return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates,
1179                                   RegisterVT);
1180   }
1181 
1182   struct IntrinsicInfo {
1183     unsigned     opc = 0;          // target opcode
1184     EVT          memVT;            // memory VT
1185 
1186     // value representing memory location
1187     PointerUnion<const Value *, const PseudoSourceValue *> ptrVal;
1188 
1189     // Fallback address space for use if ptrVal is nullptr. std::nullopt means
1190     // unknown address space.
1191     std::optional<unsigned> fallbackAddressSpace;
1192 
1193     int          offset = 0;       // offset off of ptrVal
1194     uint64_t     size = 0;         // the size of the memory location
1195                                    // (taken from memVT if zero)
1196     MaybeAlign align = Align(1);   // alignment
1197 
1198     MachineMemOperand::Flags flags = MachineMemOperand::MONone;
1199     IntrinsicInfo() = default;
1200   };
1201 
1202   /// Given an intrinsic, checks if on the target the intrinsic will need to map
1203   /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
1204   /// true and store the intrinsic information into the IntrinsicInfo that was
1205   /// passed to the function.
getTgtMemIntrinsic(IntrinsicInfo &,const CallInst &,MachineFunction &,unsigned)1206   virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
1207                                   MachineFunction &,
1208                                   unsigned /*Intrinsic*/) const {
1209     return false;
1210   }
1211 
1212   /// Returns true if the target can instruction select the specified FP
1213   /// immediate natively. If false, the legalizer will materialize the FP
1214   /// immediate as a load from a constant pool.
1215   virtual bool isFPImmLegal(const APFloat & /*Imm*/, EVT /*VT*/,
1216                             bool ForCodeSize = false) const {
1217     return false;
1218   }
1219 
1220   /// Targets can use this to indicate that they only support *some*
1221   /// VECTOR_SHUFFLE operations, those with specific masks.  By default, if a
1222   /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
1223   /// legal.
isShuffleMaskLegal(ArrayRef<int>,EVT)1224   virtual bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const {
1225     return true;
1226   }
1227 
1228   /// Returns true if the operation can trap for the value type.
1229   ///
1230   /// VT must be a legal type. By default, we optimistically assume most
1231   /// operations don't trap except for integer divide and remainder.
1232   virtual bool canOpTrap(unsigned Op, EVT VT) const;
1233 
1234   /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there
1235   /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a
1236   /// constant pool entry.
isVectorClearMaskLegal(ArrayRef<int>,EVT)1237   virtual bool isVectorClearMaskLegal(ArrayRef<int> /*Mask*/,
1238                                       EVT /*VT*/) const {
1239     return false;
1240   }
1241 
1242   /// How to legalize this custom operation?
getCustomOperationAction(SDNode & Op)1243   virtual LegalizeAction getCustomOperationAction(SDNode &Op) const {
1244     return Legal;
1245   }
1246 
1247   /// Return how this operation should be treated: either it is legal, needs to
1248   /// be promoted to a larger size, needs to be expanded to some other code
1249   /// sequence, or the target has a custom expander for it.
getOperationAction(unsigned Op,EVT VT)1250   LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
1251     if (VT.isExtended()) return Expand;
1252     // If a target-specific SDNode requires legalization, require the target
1253     // to provide custom legalization for it.
1254     if (Op >= std::size(OpActions[0]))
1255       return Custom;
1256     return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
1257   }
1258 
1259   /// Custom method defined by each target to indicate if an operation which
1260   /// may require a scale is supported natively by the target.
1261   /// If not, the operation is illegal.
isSupportedFixedPointOperation(unsigned Op,EVT VT,unsigned Scale)1262   virtual bool isSupportedFixedPointOperation(unsigned Op, EVT VT,
1263                                               unsigned Scale) const {
1264     return false;
1265   }
1266 
1267   /// Some fixed point operations may be natively supported by the target but
1268   /// only for specific scales. This method allows for checking
1269   /// if the width is supported by the target for a given operation that may
1270   /// depend on scale.
getFixedPointOperationAction(unsigned Op,EVT VT,unsigned Scale)1271   LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT,
1272                                               unsigned Scale) const {
1273     auto Action = getOperationAction(Op, VT);
1274     if (Action != Legal)
1275       return Action;
1276 
1277     // This operation is supported in this type but may only work on specific
1278     // scales.
1279     bool Supported;
1280     switch (Op) {
1281     default:
1282       llvm_unreachable("Unexpected fixed point operation.");
1283     case ISD::SMULFIX:
1284     case ISD::SMULFIXSAT:
1285     case ISD::UMULFIX:
1286     case ISD::UMULFIXSAT:
1287     case ISD::SDIVFIX:
1288     case ISD::SDIVFIXSAT:
1289     case ISD::UDIVFIX:
1290     case ISD::UDIVFIXSAT:
1291       Supported = isSupportedFixedPointOperation(Op, VT, Scale);
1292       break;
1293     }
1294 
1295     return Supported ? Action : Expand;
1296   }
1297 
1298   // If Op is a strict floating-point operation, return the result
1299   // of getOperationAction for the equivalent non-strict operation.
getStrictFPOperationAction(unsigned Op,EVT VT)1300   LegalizeAction getStrictFPOperationAction(unsigned Op, EVT VT) const {
1301     unsigned EqOpc;
1302     switch (Op) {
1303       default: llvm_unreachable("Unexpected FP pseudo-opcode");
1304 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
1305       case ISD::STRICT_##DAGN: EqOpc = ISD::DAGN; break;
1306 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
1307       case ISD::STRICT_##DAGN: EqOpc = ISD::SETCC; break;
1308 #include "llvm/IR/ConstrainedOps.def"
1309     }
1310 
1311     return getOperationAction(EqOpc, VT);
1312   }
1313 
1314   /// Return true if the specified operation is legal on this target or can be
1315   /// made legal with custom lowering. This is used to help guide high-level
1316   /// lowering decisions. LegalOnly is an optional convenience for code paths
1317   /// traversed pre and post legalisation.
1318   bool isOperationLegalOrCustom(unsigned Op, EVT VT,
1319                                 bool LegalOnly = false) const {
1320     if (LegalOnly)
1321       return isOperationLegal(Op, VT);
1322 
1323     return (VT == MVT::Other || isTypeLegal(VT)) &&
1324       (getOperationAction(Op, VT) == Legal ||
1325        getOperationAction(Op, VT) == Custom);
1326   }
1327 
1328   /// Return true if the specified operation is legal on this target or can be
1329   /// made legal using promotion. This is used to help guide high-level lowering
1330   /// decisions. LegalOnly is an optional convenience for code paths traversed
1331   /// pre and post legalisation.
1332   bool isOperationLegalOrPromote(unsigned Op, EVT VT,
1333                                  bool LegalOnly = false) const {
1334     if (LegalOnly)
1335       return isOperationLegal(Op, VT);
1336 
1337     return (VT == MVT::Other || isTypeLegal(VT)) &&
1338       (getOperationAction(Op, VT) == Legal ||
1339        getOperationAction(Op, VT) == Promote);
1340   }
1341 
1342   /// Return true if the specified operation is legal on this target or can be
1343   /// made legal with custom lowering or using promotion. This is used to help
1344   /// guide high-level lowering decisions. LegalOnly is an optional convenience
1345   /// for code paths traversed pre and post legalisation.
1346   bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT,
1347                                          bool LegalOnly = false) const {
1348     if (LegalOnly)
1349       return isOperationLegal(Op, VT);
1350 
1351     return (VT == MVT::Other || isTypeLegal(VT)) &&
1352       (getOperationAction(Op, VT) == Legal ||
1353        getOperationAction(Op, VT) == Custom ||
1354        getOperationAction(Op, VT) == Promote);
1355   }
1356 
1357   /// Return true if the operation uses custom lowering, regardless of whether
1358   /// the type is legal or not.
isOperationCustom(unsigned Op,EVT VT)1359   bool isOperationCustom(unsigned Op, EVT VT) const {
1360     return getOperationAction(Op, VT) == Custom;
1361   }
1362 
1363   /// Return true if lowering to a jump table is allowed.
areJTsAllowed(const Function * Fn)1364   virtual bool areJTsAllowed(const Function *Fn) const {
1365     if (Fn->getFnAttribute("no-jump-tables").getValueAsBool())
1366       return false;
1367 
1368     return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
1369            isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
1370   }
1371 
1372   /// Check whether the range [Low,High] fits in a machine word.
rangeFitsInWord(const APInt & Low,const APInt & High,const DataLayout & DL)1373   bool rangeFitsInWord(const APInt &Low, const APInt &High,
1374                        const DataLayout &DL) const {
1375     // FIXME: Using the pointer type doesn't seem ideal.
1376     uint64_t BW = DL.getIndexSizeInBits(0u);
1377     uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1;
1378     return Range <= BW;
1379   }
1380 
1381   /// Return true if lowering to a jump table is suitable for a set of case
1382   /// clusters which may contain \p NumCases cases, \p Range range of values.
1383   virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases,
1384                                       uint64_t Range, ProfileSummaryInfo *PSI,
1385                                       BlockFrequencyInfo *BFI) const;
1386 
1387   /// Returns preferred type for switch condition.
1388   virtual MVT getPreferredSwitchConditionType(LLVMContext &Context,
1389                                               EVT ConditionVT) const;
1390 
1391   /// Return true if lowering to a bit test is suitable for a set of case
1392   /// clusters which contains \p NumDests unique destinations, \p Low and
1393   /// \p High as its lowest and highest case values, and expects \p NumCmps
1394   /// case value comparisons. Check if the number of destinations, comparison
1395   /// metric, and range are all suitable.
isSuitableForBitTests(unsigned NumDests,unsigned NumCmps,const APInt & Low,const APInt & High,const DataLayout & DL)1396   bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps,
1397                              const APInt &Low, const APInt &High,
1398                              const DataLayout &DL) const {
1399     // FIXME: I don't think NumCmps is the correct metric: a single case and a
1400     // range of cases both require only one branch to lower. Just looking at the
1401     // number of clusters and destinations should be enough to decide whether to
1402     // build bit tests.
1403 
1404     // To lower a range with bit tests, the range must fit the bitwidth of a
1405     // machine word.
1406     if (!rangeFitsInWord(Low, High, DL))
1407       return false;
1408 
1409     // Decide whether it's profitable to lower this range with bit tests. Each
1410     // destination requires a bit test and branch, and there is an overall range
1411     // check branch. For a small number of clusters, separate comparisons might
1412     // be cheaper, and for many destinations, splitting the range might be
1413     // better.
1414     return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) ||
1415            (NumDests == 3 && NumCmps >= 6);
1416   }
1417 
1418   /// Return true if the specified operation is illegal on this target or
1419   /// unlikely to be made legal with custom lowering. This is used to help guide
1420   /// high-level lowering decisions.
isOperationExpand(unsigned Op,EVT VT)1421   bool isOperationExpand(unsigned Op, EVT VT) const {
1422     return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
1423   }
1424 
1425   /// Return true if the specified operation is legal on this target.
isOperationLegal(unsigned Op,EVT VT)1426   bool isOperationLegal(unsigned Op, EVT VT) const {
1427     return (VT == MVT::Other || isTypeLegal(VT)) &&
1428            getOperationAction(Op, VT) == Legal;
1429   }
1430 
1431   /// Return how this load with extension should be treated: either it is legal,
1432   /// needs to be promoted to a larger size, needs to be expanded to some other
1433   /// code sequence, or the target has a custom expander for it.
getLoadExtAction(unsigned ExtType,EVT ValVT,EVT MemVT)1434   LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
1435                                   EVT MemVT) const {
1436     if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1437     unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
1438     unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
1439     assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::VALUETYPE_SIZE &&
1440            MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!");
1441     unsigned Shift = 4 * ExtType;
1442     return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf);
1443   }
1444 
1445   /// Return true if the specified load with extension is legal on this target.
isLoadExtLegal(unsigned ExtType,EVT ValVT,EVT MemVT)1446   bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1447     return getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
1448   }
1449 
1450   /// Return true if the specified load with extension is legal or custom
1451   /// on this target.
isLoadExtLegalOrCustom(unsigned ExtType,EVT ValVT,EVT MemVT)1452   bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1453     return getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
1454            getLoadExtAction(ExtType, ValVT, MemVT) == Custom;
1455   }
1456 
1457   /// Return how this store with truncation should be treated: either it is
1458   /// legal, needs to be promoted to a larger size, needs to be expanded to some
1459   /// other code sequence, or the target has a custom expander for it.
getTruncStoreAction(EVT ValVT,EVT MemVT)1460   LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
1461     if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1462     unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
1463     unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
1464     assert(ValI < MVT::VALUETYPE_SIZE && MemI < MVT::VALUETYPE_SIZE &&
1465            "Table isn't big enough!");
1466     return TruncStoreActions[ValI][MemI];
1467   }
1468 
1469   /// Return true if the specified store with truncation is legal on this
1470   /// target.
isTruncStoreLegal(EVT ValVT,EVT MemVT)1471   bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
1472     return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal;
1473   }
1474 
1475   /// Return true if the specified store with truncation has solution on this
1476   /// target.
isTruncStoreLegalOrCustom(EVT ValVT,EVT MemVT)1477   bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const {
1478     return isTypeLegal(ValVT) &&
1479       (getTruncStoreAction(ValVT, MemVT) == Legal ||
1480        getTruncStoreAction(ValVT, MemVT) == Custom);
1481   }
1482 
canCombineTruncStore(EVT ValVT,EVT MemVT,bool LegalOnly)1483   virtual bool canCombineTruncStore(EVT ValVT, EVT MemVT,
1484                                     bool LegalOnly) const {
1485     if (LegalOnly)
1486       return isTruncStoreLegal(ValVT, MemVT);
1487 
1488     return isTruncStoreLegalOrCustom(ValVT, MemVT);
1489   }
1490 
1491   /// Return how the indexed load should be treated: either it is legal, needs
1492   /// to be promoted to a larger size, needs to be expanded to some other code
1493   /// sequence, or the target has a custom expander for it.
getIndexedLoadAction(unsigned IdxMode,MVT VT)1494   LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
1495     return getIndexedModeAction(IdxMode, VT, IMAB_Load);
1496   }
1497 
1498   /// Return true if the specified indexed load is legal on this target.
isIndexedLoadLegal(unsigned IdxMode,EVT VT)1499   bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
1500     return VT.isSimple() &&
1501       (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1502        getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
1503   }
1504 
1505   /// Return how the indexed store should be treated: either it is legal, needs
1506   /// to be promoted to a larger size, needs to be expanded to some other code
1507   /// sequence, or the target has a custom expander for it.
getIndexedStoreAction(unsigned IdxMode,MVT VT)1508   LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
1509     return getIndexedModeAction(IdxMode, VT, IMAB_Store);
1510   }
1511 
1512   /// Return true if the specified indexed load is legal on this target.
isIndexedStoreLegal(unsigned IdxMode,EVT VT)1513   bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
1514     return VT.isSimple() &&
1515       (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1516        getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
1517   }
1518 
1519   /// Return how the indexed load should be treated: either it is legal, needs
1520   /// to be promoted to a larger size, needs to be expanded to some other code
1521   /// sequence, or the target has a custom expander for it.
getIndexedMaskedLoadAction(unsigned IdxMode,MVT VT)1522   LegalizeAction getIndexedMaskedLoadAction(unsigned IdxMode, MVT VT) const {
1523     return getIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad);
1524   }
1525 
1526   /// Return true if the specified indexed load is legal on this target.
isIndexedMaskedLoadLegal(unsigned IdxMode,EVT VT)1527   bool isIndexedMaskedLoadLegal(unsigned IdxMode, EVT VT) const {
1528     return VT.isSimple() &&
1529            (getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1530             getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
1531   }
1532 
1533   /// Return how the indexed store should be treated: either it is legal, needs
1534   /// to be promoted to a larger size, needs to be expanded to some other code
1535   /// sequence, or the target has a custom expander for it.
getIndexedMaskedStoreAction(unsigned IdxMode,MVT VT)1536   LegalizeAction getIndexedMaskedStoreAction(unsigned IdxMode, MVT VT) const {
1537     return getIndexedModeAction(IdxMode, VT, IMAB_MaskedStore);
1538   }
1539 
1540   /// Return true if the specified indexed load is legal on this target.
isIndexedMaskedStoreLegal(unsigned IdxMode,EVT VT)1541   bool isIndexedMaskedStoreLegal(unsigned IdxMode, EVT VT) const {
1542     return VT.isSimple() &&
1543            (getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1544             getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
1545   }
1546 
1547   /// Returns true if the index type for a masked gather/scatter requires
1548   /// extending
shouldExtendGSIndex(EVT VT,EVT & EltTy)1549   virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const { return false; }
1550 
1551   // Returns true if Extend can be folded into the index of a masked gathers/scatters
1552   // on this target.
shouldRemoveExtendFromGSIndex(SDValue Extend,EVT DataVT)1553   virtual bool shouldRemoveExtendFromGSIndex(SDValue Extend, EVT DataVT) const {
1554     return false;
1555   }
1556 
1557   // Return true if the target supports a scatter/gather instruction with
1558   // indices which are scaled by the particular value.  Note that all targets
1559   // must by definition support scale of 1.
isLegalScaleForGatherScatter(uint64_t Scale,uint64_t ElemSize)1560   virtual bool isLegalScaleForGatherScatter(uint64_t Scale,
1561                                             uint64_t ElemSize) const {
1562     // MGATHER/MSCATTER are only required to support scaling by one or by the
1563     // element size.
1564     if (Scale != ElemSize && Scale != 1)
1565       return false;
1566     return true;
1567   }
1568 
1569   /// Return how the condition code should be treated: either it is legal, needs
1570   /// to be expanded to some other code sequence, or the target has a custom
1571   /// expander for it.
1572   LegalizeAction
getCondCodeAction(ISD::CondCode CC,MVT VT)1573   getCondCodeAction(ISD::CondCode CC, MVT VT) const {
1574     assert((unsigned)CC < std::size(CondCodeActions) &&
1575            ((unsigned)VT.SimpleTy >> 3) < std::size(CondCodeActions[0]) &&
1576            "Table isn't big enough!");
1577     // See setCondCodeAction for how this is encoded.
1578     uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
1579     uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3];
1580     LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF);
1581     assert(Action != Promote && "Can't promote condition code!");
1582     return Action;
1583   }
1584 
1585   /// Return true if the specified condition code is legal on this target.
isCondCodeLegal(ISD::CondCode CC,MVT VT)1586   bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
1587     return getCondCodeAction(CC, VT) == Legal;
1588   }
1589 
1590   /// Return true if the specified condition code is legal or custom on this
1591   /// target.
isCondCodeLegalOrCustom(ISD::CondCode CC,MVT VT)1592   bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const {
1593     return getCondCodeAction(CC, VT) == Legal ||
1594            getCondCodeAction(CC, VT) == Custom;
1595   }
1596 
1597   /// If the action for this operation is to promote, this method returns the
1598   /// ValueType to promote to.
getTypeToPromoteTo(unsigned Op,MVT VT)1599   MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
1600     assert(getOperationAction(Op, VT) == Promote &&
1601            "This operation isn't promoted!");
1602 
1603     // See if this has an explicit type specified.
1604     std::map<std::pair<unsigned, MVT::SimpleValueType>,
1605              MVT::SimpleValueType>::const_iterator PTTI =
1606       PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
1607     if (PTTI != PromoteToType.end()) return PTTI->second;
1608 
1609     assert((VT.isInteger() || VT.isFloatingPoint()) &&
1610            "Cannot autopromote this type, add it with AddPromotedToType.");
1611 
1612     uint64_t VTBits = VT.getScalarSizeInBits();
1613     MVT NVT = VT;
1614     do {
1615       NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
1616       assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
1617              "Didn't find type to promote to!");
1618     } while (VTBits >= NVT.getScalarSizeInBits() || !isTypeLegal(NVT) ||
1619              getOperationAction(Op, NVT) == Promote);
1620     return NVT;
1621   }
1622 
1623   virtual EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty,
1624                                      bool AllowUnknown = false) const {
1625     return getValueType(DL, Ty, AllowUnknown);
1626   }
1627 
1628   /// Return the EVT corresponding to this LLVM type.  This is fixed by the LLVM
1629   /// operations except for the pointer size.  If AllowUnknown is true, this
1630   /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
1631   /// otherwise it will assert.
1632   EVT getValueType(const DataLayout &DL, Type *Ty,
1633                    bool AllowUnknown = false) const {
1634     // Lower scalar pointers to native pointer types.
1635     if (auto *PTy = dyn_cast<PointerType>(Ty))
1636       return getPointerTy(DL, PTy->getAddressSpace());
1637 
1638     if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1639       Type *EltTy = VTy->getElementType();
1640       // Lower vectors of pointers to native pointer types.
1641       if (auto *PTy = dyn_cast<PointerType>(EltTy)) {
1642         EVT PointerTy(getPointerTy(DL, PTy->getAddressSpace()));
1643         EltTy = PointerTy.getTypeForEVT(Ty->getContext());
1644       }
1645       return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false),
1646                               VTy->getElementCount());
1647     }
1648 
1649     return EVT::getEVT(Ty, AllowUnknown);
1650   }
1651 
1652   EVT getMemValueType(const DataLayout &DL, Type *Ty,
1653                       bool AllowUnknown = false) const {
1654     // Lower scalar pointers to native pointer types.
1655     if (auto *PTy = dyn_cast<PointerType>(Ty))
1656       return getPointerMemTy(DL, PTy->getAddressSpace());
1657 
1658     if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1659       Type *EltTy = VTy->getElementType();
1660       if (auto *PTy = dyn_cast<PointerType>(EltTy)) {
1661         EVT PointerTy(getPointerMemTy(DL, PTy->getAddressSpace()));
1662         EltTy = PointerTy.getTypeForEVT(Ty->getContext());
1663       }
1664       return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false),
1665                               VTy->getElementCount());
1666     }
1667 
1668     return getValueType(DL, Ty, AllowUnknown);
1669   }
1670 
1671 
1672   /// Return the MVT corresponding to this LLVM type. See getValueType.
1673   MVT getSimpleValueType(const DataLayout &DL, Type *Ty,
1674                          bool AllowUnknown = false) const {
1675     return getValueType(DL, Ty, AllowUnknown).getSimpleVT();
1676   }
1677 
1678   /// Return the desired alignment for ByVal or InAlloca aggregate function
1679   /// arguments in the caller parameter area.  This is the actual alignment, not
1680   /// its logarithm.
1681   virtual uint64_t getByValTypeAlignment(Type *Ty, const DataLayout &DL) const;
1682 
1683   /// Return the type of registers that this ValueType will eventually require.
getRegisterType(MVT VT)1684   MVT getRegisterType(MVT VT) const {
1685     assert((unsigned)VT.SimpleTy < std::size(RegisterTypeForVT));
1686     return RegisterTypeForVT[VT.SimpleTy];
1687   }
1688 
1689   /// Return the type of registers that this ValueType will eventually require.
getRegisterType(LLVMContext & Context,EVT VT)1690   MVT getRegisterType(LLVMContext &Context, EVT VT) const {
1691     if (VT.isSimple())
1692       return getRegisterType(VT.getSimpleVT());
1693     if (VT.isVector()) {
1694       EVT VT1;
1695       MVT RegisterVT;
1696       unsigned NumIntermediates;
1697       (void)getVectorTypeBreakdown(Context, VT, VT1,
1698                                    NumIntermediates, RegisterVT);
1699       return RegisterVT;
1700     }
1701     if (VT.isInteger()) {
1702       return getRegisterType(Context, getTypeToTransformTo(Context, VT));
1703     }
1704     llvm_unreachable("Unsupported extended type!");
1705   }
1706 
1707   /// Return the number of registers that this ValueType will eventually
1708   /// require.
1709   ///
1710   /// This is one for any types promoted to live in larger registers, but may be
1711   /// more than one for types (like i64) that are split into pieces.  For types
1712   /// like i140, which are first promoted then expanded, it is the number of
1713   /// registers needed to hold all the bits of the original type.  For an i140
1714   /// on a 32 bit machine this means 5 registers.
1715   ///
1716   /// RegisterVT may be passed as a way to override the default settings, for
1717   /// instance with i128 inline assembly operands on SystemZ.
1718   virtual unsigned
1719   getNumRegisters(LLVMContext &Context, EVT VT,
1720                   std::optional<MVT> RegisterVT = std::nullopt) const {
1721     if (VT.isSimple()) {
1722       assert((unsigned)VT.getSimpleVT().SimpleTy <
1723              std::size(NumRegistersForVT));
1724       return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
1725     }
1726     if (VT.isVector()) {
1727       EVT VT1;
1728       MVT VT2;
1729       unsigned NumIntermediates;
1730       return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
1731     }
1732     if (VT.isInteger()) {
1733       unsigned BitWidth = VT.getSizeInBits();
1734       unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
1735       return (BitWidth + RegWidth - 1) / RegWidth;
1736     }
1737     llvm_unreachable("Unsupported extended type!");
1738   }
1739 
1740   /// Certain combinations of ABIs, Targets and features require that types
1741   /// are legal for some operations and not for other operations.
1742   /// For MIPS all vector types must be passed through the integer register set.
getRegisterTypeForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT)1743   virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context,
1744                                             CallingConv::ID CC, EVT VT) const {
1745     return getRegisterType(Context, VT);
1746   }
1747 
1748   /// Certain targets require unusual breakdowns of certain types. For MIPS,
1749   /// this occurs when a vector type is used, as vector are passed through the
1750   /// integer register set.
getNumRegistersForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT)1751   virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context,
1752                                                  CallingConv::ID CC,
1753                                                  EVT VT) const {
1754     return getNumRegisters(Context, VT);
1755   }
1756 
1757   /// Certain targets have context sensitive alignment requirements, where one
1758   /// type has the alignment requirement of another type.
getABIAlignmentForCallingConv(Type * ArgTy,const DataLayout & DL)1759   virtual Align getABIAlignmentForCallingConv(Type *ArgTy,
1760                                               const DataLayout &DL) const {
1761     return DL.getABITypeAlign(ArgTy);
1762   }
1763 
1764   /// If true, then instruction selection should seek to shrink the FP constant
1765   /// of the specified type to a smaller type in order to save space and / or
1766   /// reduce runtime.
ShouldShrinkFPConstant(EVT)1767   virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
1768 
1769   /// Return true if it is profitable to reduce a load to a smaller type.
1770   /// Example: (i16 (trunc (i32 (load x))) -> i16 load x
shouldReduceLoadWidth(SDNode * Load,ISD::LoadExtType ExtTy,EVT NewVT)1771   virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
1772                                      EVT NewVT) const {
1773     // By default, assume that it is cheaper to extract a subvector from a wide
1774     // vector load rather than creating multiple narrow vector loads.
1775     if (NewVT.isVector() && !Load->hasOneUse())
1776       return false;
1777 
1778     return true;
1779   }
1780 
1781   /// Return true (the default) if it is profitable to remove a sext_inreg(x)
1782   /// where the sext is redundant, and use x directly.
shouldRemoveRedundantExtend(SDValue Op)1783   virtual bool shouldRemoveRedundantExtend(SDValue Op) const { return true; }
1784 
1785   /// When splitting a value of the specified type into parts, does the Lo
1786   /// or Hi part come first?  This usually follows the endianness, except
1787   /// for ppcf128, where the Hi part always comes first.
hasBigEndianPartOrdering(EVT VT,const DataLayout & DL)1788   bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const {
1789     return DL.isBigEndian() || VT == MVT::ppcf128;
1790   }
1791 
1792   /// If true, the target has custom DAG combine transformations that it can
1793   /// perform for the specified node.
hasTargetDAGCombine(ISD::NodeType NT)1794   bool hasTargetDAGCombine(ISD::NodeType NT) const {
1795     assert(unsigned(NT >> 3) < std::size(TargetDAGCombineArray));
1796     return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
1797   }
1798 
getGatherAllAliasesMaxDepth()1799   unsigned getGatherAllAliasesMaxDepth() const {
1800     return GatherAllAliasesMaxDepth;
1801   }
1802 
1803   /// Returns the size of the platform's va_list object.
getVaListSizeInBits(const DataLayout & DL)1804   virtual unsigned getVaListSizeInBits(const DataLayout &DL) const {
1805     return getPointerTy(DL).getSizeInBits();
1806   }
1807 
1808   /// Get maximum # of store operations permitted for llvm.memset
1809   ///
1810   /// This function returns the maximum number of store operations permitted
1811   /// to replace a call to llvm.memset. The value is set by the target at the
1812   /// performance threshold for such a replacement. If OptSize is true,
1813   /// return the limit for functions that have OptSize attribute.
getMaxStoresPerMemset(bool OptSize)1814   unsigned getMaxStoresPerMemset(bool OptSize) const {
1815     return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
1816   }
1817 
1818   /// Get maximum # of store operations permitted for llvm.memcpy
1819   ///
1820   /// This function returns the maximum number of store operations permitted
1821   /// to replace a call to llvm.memcpy. The value is set by the target at the
1822   /// performance threshold for such a replacement. If OptSize is true,
1823   /// return the limit for functions that have OptSize attribute.
getMaxStoresPerMemcpy(bool OptSize)1824   unsigned getMaxStoresPerMemcpy(bool OptSize) const {
1825     return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
1826   }
1827 
1828   /// \brief Get maximum # of store operations to be glued together
1829   ///
1830   /// This function returns the maximum number of store operations permitted
1831   /// to glue together during lowering of llvm.memcpy. The value is set by
1832   //  the target at the performance threshold for such a replacement.
getMaxGluedStoresPerMemcpy()1833   virtual unsigned getMaxGluedStoresPerMemcpy() const {
1834     return MaxGluedStoresPerMemcpy;
1835   }
1836 
1837   /// Get maximum # of load operations permitted for memcmp
1838   ///
1839   /// This function returns the maximum number of load operations permitted
1840   /// to replace a call to memcmp. The value is set by the target at the
1841   /// performance threshold for such a replacement. If OptSize is true,
1842   /// return the limit for functions that have OptSize attribute.
getMaxExpandSizeMemcmp(bool OptSize)1843   unsigned getMaxExpandSizeMemcmp(bool OptSize) const {
1844     return OptSize ? MaxLoadsPerMemcmpOptSize : MaxLoadsPerMemcmp;
1845   }
1846 
1847   /// Get maximum # of store operations permitted for llvm.memmove
1848   ///
1849   /// This function returns the maximum number of store operations permitted
1850   /// to replace a call to llvm.memmove. The value is set by the target at the
1851   /// performance threshold for such a replacement. If OptSize is true,
1852   /// return the limit for functions that have OptSize attribute.
getMaxStoresPerMemmove(bool OptSize)1853   unsigned getMaxStoresPerMemmove(bool OptSize) const {
1854     return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove;
1855   }
1856 
1857   /// Determine if the target supports unaligned memory accesses.
1858   ///
1859   /// This function returns true if the target allows unaligned memory accesses
1860   /// of the specified type in the given address space. If true, it also returns
1861   /// a relative speed of the unaligned memory access in the last argument by
1862   /// reference. The higher the speed number the faster the operation comparing
1863   /// to a number returned by another such call. This is used, for example, in
1864   /// situations where an array copy/move/set is converted to a sequence of
1865   /// store operations. Its use helps to ensure that such replacements don't
1866   /// generate code that causes an alignment error (trap) on the target machine.
1867   virtual bool allowsMisalignedMemoryAccesses(
1868       EVT, unsigned AddrSpace = 0, Align Alignment = Align(1),
1869       MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1870       unsigned * /*Fast*/ = nullptr) const {
1871     return false;
1872   }
1873 
1874   /// LLT handling variant.
1875   virtual bool allowsMisalignedMemoryAccesses(
1876       LLT, unsigned AddrSpace = 0, Align Alignment = Align(1),
1877       MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1878       unsigned * /*Fast*/ = nullptr) const {
1879     return false;
1880   }
1881 
1882   /// This function returns true if the memory access is aligned or if the
1883   /// target allows this specific unaligned memory access. If the access is
1884   /// allowed, the optional final parameter returns a relative speed of the
1885   /// access (as defined by the target).
1886   bool allowsMemoryAccessForAlignment(
1887       LLVMContext &Context, const DataLayout &DL, EVT VT,
1888       unsigned AddrSpace = 0, Align Alignment = Align(1),
1889       MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1890       unsigned *Fast = nullptr) const;
1891 
1892   /// Return true if the memory access of this type is aligned or if the target
1893   /// allows this specific unaligned access for the given MachineMemOperand.
1894   /// If the access is allowed, the optional final parameter returns a relative
1895   /// speed of the access (as defined by the target).
1896   bool allowsMemoryAccessForAlignment(LLVMContext &Context,
1897                                       const DataLayout &DL, EVT VT,
1898                                       const MachineMemOperand &MMO,
1899                                       unsigned *Fast = nullptr) const;
1900 
1901   /// Return true if the target supports a memory access of this type for the
1902   /// given address space and alignment. If the access is allowed, the optional
1903   /// final parameter returns the relative speed of the access (as defined by
1904   /// the target).
1905   virtual bool
1906   allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1907                      unsigned AddrSpace = 0, Align Alignment = Align(1),
1908                      MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1909                      unsigned *Fast = nullptr) const;
1910 
1911   /// Return true if the target supports a memory access of this type for the
1912   /// given MachineMemOperand. If the access is allowed, the optional
1913   /// final parameter returns the relative access speed (as defined by the
1914   /// target).
1915   bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1916                           const MachineMemOperand &MMO,
1917                           unsigned *Fast = nullptr) const;
1918 
1919   /// LLT handling variant.
1920   bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, LLT Ty,
1921                           const MachineMemOperand &MMO,
1922                           unsigned *Fast = nullptr) const;
1923 
1924   /// Returns the target specific optimal type for load and store operations as
1925   /// a result of memset, memcpy, and memmove lowering.
1926   /// It returns EVT::Other if the type should be determined using generic
1927   /// target-independent logic.
1928   virtual EVT
getOptimalMemOpType(const MemOp & Op,const AttributeList &)1929   getOptimalMemOpType(const MemOp &Op,
1930                       const AttributeList & /*FuncAttributes*/) const {
1931     return MVT::Other;
1932   }
1933 
1934   /// LLT returning variant.
1935   virtual LLT
getOptimalMemOpLLT(const MemOp & Op,const AttributeList &)1936   getOptimalMemOpLLT(const MemOp &Op,
1937                      const AttributeList & /*FuncAttributes*/) const {
1938     return LLT();
1939   }
1940 
1941   /// Returns true if it's safe to use load / store of the specified type to
1942   /// expand memcpy / memset inline.
1943   ///
1944   /// This is mostly true for all types except for some special cases. For
1945   /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
1946   /// fstpl which also does type conversion. Note the specified type doesn't
1947   /// have to be legal as the hook is used before type legalization.
isSafeMemOpType(MVT)1948   virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
1949 
1950   /// Return lower limit for number of blocks in a jump table.
1951   virtual unsigned getMinimumJumpTableEntries() const;
1952 
1953   /// Return lower limit of the density in a jump table.
1954   unsigned getMinimumJumpTableDensity(bool OptForSize) const;
1955 
1956   /// Return upper limit for number of entries in a jump table.
1957   /// Zero if no limit.
1958   unsigned getMaximumJumpTableSize() const;
1959 
1960   virtual bool isJumpTableRelative() const;
1961 
1962   /// If a physical register, this specifies the register that
1963   /// llvm.savestack/llvm.restorestack should save and restore.
getStackPointerRegisterToSaveRestore()1964   Register getStackPointerRegisterToSaveRestore() const {
1965     return StackPointerRegisterToSaveRestore;
1966   }
1967 
1968   /// If a physical register, this returns the register that receives the
1969   /// exception address on entry to an EH pad.
1970   virtual Register
getExceptionPointerRegister(const Constant * PersonalityFn)1971   getExceptionPointerRegister(const Constant *PersonalityFn) const {
1972     return Register();
1973   }
1974 
1975   /// If a physical register, this returns the register that receives the
1976   /// exception typeid on entry to a landing pad.
1977   virtual Register
getExceptionSelectorRegister(const Constant * PersonalityFn)1978   getExceptionSelectorRegister(const Constant *PersonalityFn) const {
1979     return Register();
1980   }
1981 
needsFixedCatchObjects()1982   virtual bool needsFixedCatchObjects() const {
1983     report_fatal_error("Funclet EH is not implemented for this target");
1984   }
1985 
1986   /// Return the minimum stack alignment of an argument.
getMinStackArgumentAlignment()1987   Align getMinStackArgumentAlignment() const {
1988     return MinStackArgumentAlignment;
1989   }
1990 
1991   /// Return the minimum function alignment.
getMinFunctionAlignment()1992   Align getMinFunctionAlignment() const { return MinFunctionAlignment; }
1993 
1994   /// Return the preferred function alignment.
getPrefFunctionAlignment()1995   Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; }
1996 
1997   /// Return the preferred loop alignment.
1998   virtual Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const;
1999 
2000   /// Return the maximum amount of bytes allowed to be emitted when padding for
2001   /// alignment
2002   virtual unsigned
2003   getMaxPermittedBytesForAlignment(MachineBasicBlock *MBB) const;
2004 
2005   /// Should loops be aligned even when the function is marked OptSize (but not
2006   /// MinSize).
alignLoopsWithOptSize()2007   virtual bool alignLoopsWithOptSize() const { return false; }
2008 
2009   /// If the target has a standard location for the stack protector guard,
2010   /// returns the address of that location. Otherwise, returns nullptr.
2011   /// DEPRECATED: please override useLoadStackGuardNode and customize
2012   ///             LOAD_STACK_GUARD, or customize \@llvm.stackguard().
2013   virtual Value *getIRStackGuard(IRBuilderBase &IRB) const;
2014 
2015   /// Inserts necessary declarations for SSP (stack protection) purpose.
2016   /// Should be used only when getIRStackGuard returns nullptr.
2017   virtual void insertSSPDeclarations(Module &M) const;
2018 
2019   /// Return the variable that's previously inserted by insertSSPDeclarations,
2020   /// if any, otherwise return nullptr. Should be used only when
2021   /// getIRStackGuard returns nullptr.
2022   virtual Value *getSDagStackGuard(const Module &M) const;
2023 
2024   /// If this function returns true, stack protection checks should XOR the
2025   /// frame pointer (or whichever pointer is used to address locals) into the
2026   /// stack guard value before checking it. getIRStackGuard must return nullptr
2027   /// if this returns true.
useStackGuardXorFP()2028   virtual bool useStackGuardXorFP() const { return false; }
2029 
2030   /// If the target has a standard stack protection check function that
2031   /// performs validation and error handling, returns the function. Otherwise,
2032   /// returns nullptr. Must be previously inserted by insertSSPDeclarations.
2033   /// Should be used only when getIRStackGuard returns nullptr.
2034   virtual Function *getSSPStackGuardCheck(const Module &M) const;
2035 
2036 protected:
2037   Value *getDefaultSafeStackPointerLocation(IRBuilderBase &IRB,
2038                                             bool UseTLS) const;
2039 
2040 public:
2041   /// Returns the target-specific address of the unsafe stack pointer.
2042   virtual Value *getSafeStackPointerLocation(IRBuilderBase &IRB) const;
2043 
2044   /// Returns the name of the symbol used to emit stack probes or the empty
2045   /// string if not applicable.
hasStackProbeSymbol(const MachineFunction & MF)2046   virtual bool hasStackProbeSymbol(const MachineFunction &MF) const { return false; }
2047 
hasInlineStackProbe(const MachineFunction & MF)2048   virtual bool hasInlineStackProbe(const MachineFunction &MF) const { return false; }
2049 
getStackProbeSymbolName(const MachineFunction & MF)2050   virtual StringRef getStackProbeSymbolName(const MachineFunction &MF) const {
2051     return "";
2052   }
2053 
2054   /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
2055   /// are happy to sink it into basic blocks. A cast may be free, but not
2056   /// necessarily a no-op. e.g. a free truncate from a 64-bit to 32-bit pointer.
2057   virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const;
2058 
2059   /// Return true if the pointer arguments to CI should be aligned by aligning
2060   /// the object whose address is being passed. If so then MinSize is set to the
2061   /// minimum size the object must be to be aligned and PrefAlign is set to the
2062   /// preferred alignment.
shouldAlignPointerArgs(CallInst *,unsigned &,Align &)2063   virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
2064                                       Align & /*PrefAlign*/) const {
2065     return false;
2066   }
2067 
2068   //===--------------------------------------------------------------------===//
2069   /// \name Helpers for TargetTransformInfo implementations
2070   /// @{
2071 
2072   /// Get the ISD node that corresponds to the Instruction class opcode.
2073   int InstructionOpcodeToISD(unsigned Opcode) const;
2074 
2075   /// @}
2076 
2077   //===--------------------------------------------------------------------===//
2078   /// \name Helpers for atomic expansion.
2079   /// @{
2080 
2081   /// Returns the maximum atomic operation size (in bits) supported by
2082   /// the backend. Atomic operations greater than this size (as well
2083   /// as ones that are not naturally aligned), will be expanded by
2084   /// AtomicExpandPass into an __atomic_* library call.
getMaxAtomicSizeInBitsSupported()2085   unsigned getMaxAtomicSizeInBitsSupported() const {
2086     return MaxAtomicSizeInBitsSupported;
2087   }
2088 
2089   /// Returns the size in bits of the maximum div/rem the backend supports.
2090   /// Larger operations will be expanded by ExpandLargeDivRem.
getMaxDivRemBitWidthSupported()2091   unsigned getMaxDivRemBitWidthSupported() const {
2092     return MaxDivRemBitWidthSupported;
2093   }
2094 
2095   /// Returns the size in bits of the maximum larget fp convert the backend
2096   /// supports. Larger operations will be expanded by ExpandLargeFPConvert.
getMaxLargeFPConvertBitWidthSupported()2097   unsigned getMaxLargeFPConvertBitWidthSupported() const {
2098     return MaxLargeFPConvertBitWidthSupported;
2099   }
2100 
2101   /// Returns the size of the smallest cmpxchg or ll/sc instruction
2102   /// the backend supports.  Any smaller operations are widened in
2103   /// AtomicExpandPass.
2104   ///
2105   /// Note that *unlike* operations above the maximum size, atomic ops
2106   /// are still natively supported below the minimum; they just
2107   /// require a more complex expansion.
getMinCmpXchgSizeInBits()2108   unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; }
2109 
2110   /// Whether the target supports unaligned atomic operations.
supportsUnalignedAtomics()2111   bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; }
2112 
2113   /// Whether AtomicExpandPass should automatically insert fences and reduce
2114   /// ordering for this atomic. This should be true for most architectures with
2115   /// weak memory ordering. Defaults to false.
shouldInsertFencesForAtomic(const Instruction * I)2116   virtual bool shouldInsertFencesForAtomic(const Instruction *I) const {
2117     return false;
2118   }
2119 
2120   /// Whether AtomicExpandPass should automatically insert a trailing fence
2121   /// without reducing the ordering for this atomic. Defaults to false.
2122   virtual bool
shouldInsertTrailingFenceForAtomicStore(const Instruction * I)2123   shouldInsertTrailingFenceForAtomicStore(const Instruction *I) const {
2124     return false;
2125   }
2126 
2127   /// Perform a load-linked operation on Addr, returning a "Value *" with the
2128   /// corresponding pointee type. This may entail some non-trivial operations to
2129   /// truncate or reconstruct types that will be illegal in the backend. See
2130   /// ARMISelLowering for an example implementation.
emitLoadLinked(IRBuilderBase & Builder,Type * ValueTy,Value * Addr,AtomicOrdering Ord)2131   virtual Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy,
2132                                 Value *Addr, AtomicOrdering Ord) const {
2133     llvm_unreachable("Load linked unimplemented on this target");
2134   }
2135 
2136   /// Perform a store-conditional operation to Addr. Return the status of the
2137   /// store. This should be 0 if the store succeeded, non-zero otherwise.
emitStoreConditional(IRBuilderBase & Builder,Value * Val,Value * Addr,AtomicOrdering Ord)2138   virtual Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val,
2139                                       Value *Addr, AtomicOrdering Ord) const {
2140     llvm_unreachable("Store conditional unimplemented on this target");
2141   }
2142 
2143   /// Perform a masked atomicrmw using a target-specific intrinsic. This
2144   /// represents the core LL/SC loop which will be lowered at a late stage by
2145   /// the backend. The target-specific intrinsic returns the loaded value and
2146   /// is not responsible for masking and shifting the result.
emitMaskedAtomicRMWIntrinsic(IRBuilderBase & Builder,AtomicRMWInst * AI,Value * AlignedAddr,Value * Incr,Value * Mask,Value * ShiftAmt,AtomicOrdering Ord)2147   virtual Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder,
2148                                               AtomicRMWInst *AI,
2149                                               Value *AlignedAddr, Value *Incr,
2150                                               Value *Mask, Value *ShiftAmt,
2151                                               AtomicOrdering Ord) const {
2152     llvm_unreachable("Masked atomicrmw expansion unimplemented on this target");
2153   }
2154 
2155   /// Perform a atomicrmw expansion using a target-specific way. This is
2156   /// expected to be called when masked atomicrmw and bit test atomicrmw don't
2157   /// work, and the target supports another way to lower atomicrmw.
emitExpandAtomicRMW(AtomicRMWInst * AI)2158   virtual void emitExpandAtomicRMW(AtomicRMWInst *AI) const {
2159     llvm_unreachable(
2160         "Generic atomicrmw expansion unimplemented on this target");
2161   }
2162 
2163   /// Perform a bit test atomicrmw using a target-specific intrinsic. This
2164   /// represents the combined bit test intrinsic which will be lowered at a late
2165   /// stage by the backend.
emitBitTestAtomicRMWIntrinsic(AtomicRMWInst * AI)2166   virtual void emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const {
2167     llvm_unreachable(
2168         "Bit test atomicrmw expansion unimplemented on this target");
2169   }
2170 
2171   /// Perform a atomicrmw which the result is only used by comparison, using a
2172   /// target-specific intrinsic. This represents the combined atomic and compare
2173   /// intrinsic which will be lowered at a late stage by the backend.
emitCmpArithAtomicRMWIntrinsic(AtomicRMWInst * AI)2174   virtual void emitCmpArithAtomicRMWIntrinsic(AtomicRMWInst *AI) const {
2175     llvm_unreachable(
2176         "Compare arith atomicrmw expansion unimplemented on this target");
2177   }
2178 
2179   /// Perform a masked cmpxchg using a target-specific intrinsic. This
2180   /// represents the core LL/SC loop which will be lowered at a late stage by
2181   /// the backend. The target-specific intrinsic returns the loaded value and
2182   /// is not responsible for masking and shifting the result.
emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase & Builder,AtomicCmpXchgInst * CI,Value * AlignedAddr,Value * CmpVal,Value * NewVal,Value * Mask,AtomicOrdering Ord)2183   virtual Value *emitMaskedAtomicCmpXchgIntrinsic(
2184       IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
2185       Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
2186     llvm_unreachable("Masked cmpxchg expansion unimplemented on this target");
2187   }
2188 
2189   //===--------------------------------------------------------------------===//
2190   /// \name KCFI check lowering.
2191   /// @{
2192 
EmitKCFICheck(MachineBasicBlock & MBB,MachineBasicBlock::instr_iterator & MBBI,const TargetInstrInfo * TII)2193   virtual MachineInstr *EmitKCFICheck(MachineBasicBlock &MBB,
2194                                       MachineBasicBlock::instr_iterator &MBBI,
2195                                       const TargetInstrInfo *TII) const {
2196     llvm_unreachable("KCFI is not supported on this target");
2197   }
2198 
2199   /// @}
2200 
2201   /// Inserts in the IR a target-specific intrinsic specifying a fence.
2202   /// It is called by AtomicExpandPass before expanding an
2203   ///   AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
2204   ///   if shouldInsertFencesForAtomic returns true.
2205   ///
2206   /// Inst is the original atomic instruction, prior to other expansions that
2207   /// may be performed.
2208   ///
2209   /// This function should either return a nullptr, or a pointer to an IR-level
2210   ///   Instruction*. Even complex fence sequences can be represented by a
2211   ///   single Instruction* through an intrinsic to be lowered later.
2212   ///
2213   /// The default implementation emits an IR fence before any release (or
2214   ///   stronger) operation that stores, and after any acquire (or stronger)
2215   ///   operation. This is generally a correct implementation, but backends may
2216   ///   override if they wish to use alternative schemes (e.g. the PowerPC
2217   ///   standard ABI uses a fence before a seq_cst load instead of after a
2218   ///   seq_cst store).
2219   /// @{
2220   virtual Instruction *emitLeadingFence(IRBuilderBase &Builder,
2221                                         Instruction *Inst,
2222                                         AtomicOrdering Ord) const;
2223 
2224   virtual Instruction *emitTrailingFence(IRBuilderBase &Builder,
2225                                          Instruction *Inst,
2226                                          AtomicOrdering Ord) const;
2227   /// @}
2228 
2229   // Emits code that executes when the comparison result in the ll/sc
2230   // expansion of a cmpxchg instruction is such that the store-conditional will
2231   // not execute.  This makes it possible to balance out the load-linked with
2232   // a dedicated instruction, if desired.
2233   // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would
2234   // be unnecessarily held, except if clrex, inserted by this hook, is executed.
emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase & Builder)2235   virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const {}
2236 
2237   /// Returns true if arguments should be sign-extended in lib calls.
shouldSignExtendTypeInLibCall(EVT Type,bool IsSigned)2238   virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
2239     return IsSigned;
2240   }
2241 
2242   /// Returns true if arguments should be extended in lib calls.
shouldExtendTypeInLibCall(EVT Type)2243   virtual bool shouldExtendTypeInLibCall(EVT Type) const {
2244     return true;
2245   }
2246 
2247   /// Returns how the given (atomic) load should be expanded by the
2248   /// IR-level AtomicExpand pass.
shouldExpandAtomicLoadInIR(LoadInst * LI)2249   virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const {
2250     return AtomicExpansionKind::None;
2251   }
2252 
2253   /// Returns how the given (atomic) load should be cast by the IR-level
2254   /// AtomicExpand pass.
shouldCastAtomicLoadInIR(LoadInst * LI)2255   virtual AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const {
2256     if (LI->getType()->isFloatingPointTy())
2257       return AtomicExpansionKind::CastToInteger;
2258     return AtomicExpansionKind::None;
2259   }
2260 
2261   /// Returns how the given (atomic) store should be expanded by the IR-level
2262   /// AtomicExpand pass into. For instance AtomicExpansionKind::Expand will try
2263   /// to use an atomicrmw xchg.
shouldExpandAtomicStoreInIR(StoreInst * SI)2264   virtual AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const {
2265     return AtomicExpansionKind::None;
2266   }
2267 
2268   /// Returns how the given (atomic) store should be cast by the IR-level
2269   /// AtomicExpand pass into. For instance AtomicExpansionKind::CastToInteger
2270   /// will try to cast the operands to integer values.
shouldCastAtomicStoreInIR(StoreInst * SI)2271   virtual AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const {
2272     if (SI->getValueOperand()->getType()->isFloatingPointTy())
2273       return AtomicExpansionKind::CastToInteger;
2274     return AtomicExpansionKind::None;
2275   }
2276 
2277   /// Returns how the given atomic cmpxchg should be expanded by the IR-level
2278   /// AtomicExpand pass.
2279   virtual AtomicExpansionKind
shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst * AI)2280   shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
2281     return AtomicExpansionKind::None;
2282   }
2283 
2284   /// Returns how the IR-level AtomicExpand pass should expand the given
2285   /// AtomicRMW, if at all. Default is to never expand.
shouldExpandAtomicRMWInIR(AtomicRMWInst * RMW)2286   virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
2287     return RMW->isFloatingPointOperation() ?
2288       AtomicExpansionKind::CmpXChg : AtomicExpansionKind::None;
2289   }
2290 
2291   /// Returns how the given atomic atomicrmw should be cast by the IR-level
2292   /// AtomicExpand pass.
2293   virtual AtomicExpansionKind
shouldCastAtomicRMWIInIR(AtomicRMWInst * RMWI)2294   shouldCastAtomicRMWIInIR(AtomicRMWInst *RMWI) const {
2295     if (RMWI->getOperation() == AtomicRMWInst::Xchg &&
2296         (RMWI->getValOperand()->getType()->isFloatingPointTy() ||
2297          RMWI->getValOperand()->getType()->isPointerTy()))
2298       return AtomicExpansionKind::CastToInteger;
2299 
2300     return AtomicExpansionKind::None;
2301   }
2302 
2303   /// On some platforms, an AtomicRMW that never actually modifies the value
2304   /// (such as fetch_add of 0) can be turned into a fence followed by an
2305   /// atomic load. This may sound useless, but it makes it possible for the
2306   /// processor to keep the cacheline shared, dramatically improving
2307   /// performance. And such idempotent RMWs are useful for implementing some
2308   /// kinds of locks, see for example (justification + benchmarks):
2309   /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
2310   /// This method tries doing that transformation, returning the atomic load if
2311   /// it succeeds, and nullptr otherwise.
2312   /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
2313   /// another round of expansion.
2314   virtual LoadInst *
lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst * RMWI)2315   lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
2316     return nullptr;
2317   }
2318 
2319   /// Returns how the platform's atomic operations are extended (ZERO_EXTEND,
2320   /// SIGN_EXTEND, or ANY_EXTEND).
getExtendForAtomicOps()2321   virtual ISD::NodeType getExtendForAtomicOps() const {
2322     return ISD::ZERO_EXTEND;
2323   }
2324 
2325   /// Returns how the platform's atomic compare and swap expects its comparison
2326   /// value to be extended (ZERO_EXTEND, SIGN_EXTEND, or ANY_EXTEND). This is
2327   /// separate from getExtendForAtomicOps, which is concerned with the
2328   /// sign-extension of the instruction's output, whereas here we are concerned
2329   /// with the sign-extension of the input. For targets with compare-and-swap
2330   /// instructions (or sub-word comparisons in their LL/SC loop expansions),
2331   /// the input can be ANY_EXTEND, but the output will still have a specific
2332   /// extension.
getExtendForAtomicCmpSwapArg()2333   virtual ISD::NodeType getExtendForAtomicCmpSwapArg() const {
2334     return ISD::ANY_EXTEND;
2335   }
2336 
2337   /// @}
2338 
2339   /// Returns true if we should normalize
2340   /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
2341   /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
2342   /// that it saves us from materializing N0 and N1 in an integer register.
2343   /// Targets that are able to perform and/or on flags should return false here.
shouldNormalizeToSelectSequence(LLVMContext & Context,EVT VT)2344   virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context,
2345                                                EVT VT) const {
2346     // If a target has multiple condition registers, then it likely has logical
2347     // operations on those registers.
2348     if (hasMultipleConditionRegisters())
2349       return false;
2350     // Only do the transform if the value won't be split into multiple
2351     // registers.
2352     LegalizeTypeAction Action = getTypeAction(Context, VT);
2353     return Action != TypeExpandInteger && Action != TypeExpandFloat &&
2354       Action != TypeSplitVector;
2355   }
2356 
isProfitableToCombineMinNumMaxNum(EVT VT)2357   virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const { return true; }
2358 
2359   /// Return true if a select of constants (select Cond, C1, C2) should be
2360   /// transformed into simple math ops with the condition value. For example:
2361   /// select Cond, C1, C1-1 --> add (zext Cond), C1-1
convertSelectOfConstantsToMath(EVT VT)2362   virtual bool convertSelectOfConstantsToMath(EVT VT) const {
2363     return false;
2364   }
2365 
2366   /// Return true if it is profitable to transform an integer
2367   /// multiplication-by-constant into simpler operations like shifts and adds.
2368   /// This may be true if the target does not directly support the
2369   /// multiplication operation for the specified type or the sequence of simpler
2370   /// ops is faster than the multiply.
decomposeMulByConstant(LLVMContext & Context,EVT VT,SDValue C)2371   virtual bool decomposeMulByConstant(LLVMContext &Context,
2372                                       EVT VT, SDValue C) const {
2373     return false;
2374   }
2375 
2376   /// Return true if it may be profitable to transform
2377   /// (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2).
2378   /// This may not be true if c1 and c2 can be represented as immediates but
2379   /// c1*c2 cannot, for example.
2380   /// The target should check if c1, c2 and c1*c2 can be represented as
2381   /// immediates, or have to be materialized into registers. If it is not sure
2382   /// about some cases, a default true can be returned to let the DAGCombiner
2383   /// decide.
2384   /// AddNode is (add x, c1), and ConstNode is c2.
isMulAddWithConstProfitable(SDValue AddNode,SDValue ConstNode)2385   virtual bool isMulAddWithConstProfitable(SDValue AddNode,
2386                                            SDValue ConstNode) const {
2387     return true;
2388   }
2389 
2390   /// Return true if it is more correct/profitable to use strict FP_TO_INT
2391   /// conversion operations - canonicalizing the FP source value instead of
2392   /// converting all cases and then selecting based on value.
2393   /// This may be true if the target throws exceptions for out of bounds
2394   /// conversions or has fast FP CMOV.
shouldUseStrictFP_TO_INT(EVT FpVT,EVT IntVT,bool IsSigned)2395   virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT,
2396                                         bool IsSigned) const {
2397     return false;
2398   }
2399 
2400   /// Return true if it is beneficial to expand an @llvm.powi.* intrinsic.
2401   /// If not optimizing for size, expanding @llvm.powi.* intrinsics is always
2402   /// considered beneficial.
2403   /// If optimizing for size, expansion is only considered beneficial for upto
2404   /// 5 multiplies and a divide (if the exponent is negative).
isBeneficialToExpandPowI(int64_t Exponent,bool OptForSize)2405   bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const {
2406     if (Exponent < 0)
2407       Exponent = -Exponent;
2408     uint64_t E = static_cast<uint64_t>(Exponent);
2409     return !OptForSize || (llvm::popcount(E) + Log2_64(E) < 7);
2410   }
2411 
2412   //===--------------------------------------------------------------------===//
2413   // TargetLowering Configuration Methods - These methods should be invoked by
2414   // the derived class constructor to configure this object for the target.
2415   //
2416 protected:
2417   /// Specify how the target extends the result of integer and floating point
2418   /// boolean values from i1 to a wider type.  See getBooleanContents.
setBooleanContents(BooleanContent Ty)2419   void setBooleanContents(BooleanContent Ty) {
2420     BooleanContents = Ty;
2421     BooleanFloatContents = Ty;
2422   }
2423 
2424   /// Specify how the target extends the result of integer and floating point
2425   /// boolean values from i1 to a wider type.  See getBooleanContents.
setBooleanContents(BooleanContent IntTy,BooleanContent FloatTy)2426   void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) {
2427     BooleanContents = IntTy;
2428     BooleanFloatContents = FloatTy;
2429   }
2430 
2431   /// Specify how the target extends the result of a vector boolean value from a
2432   /// vector of i1 to a wider type.  See getBooleanContents.
setBooleanVectorContents(BooleanContent Ty)2433   void setBooleanVectorContents(BooleanContent Ty) {
2434     BooleanVectorContents = Ty;
2435   }
2436 
2437   /// Specify the target scheduling preference.
setSchedulingPreference(Sched::Preference Pref)2438   void setSchedulingPreference(Sched::Preference Pref) {
2439     SchedPreferenceInfo = Pref;
2440   }
2441 
2442   /// Indicate the minimum number of blocks to generate jump tables.
2443   void setMinimumJumpTableEntries(unsigned Val);
2444 
2445   /// Indicate the maximum number of entries in jump tables.
2446   /// Set to zero to generate unlimited jump tables.
2447   void setMaximumJumpTableSize(unsigned);
2448 
2449   /// If set to a physical register, this specifies the register that
2450   /// llvm.savestack/llvm.restorestack should save and restore.
setStackPointerRegisterToSaveRestore(Register R)2451   void setStackPointerRegisterToSaveRestore(Register R) {
2452     StackPointerRegisterToSaveRestore = R;
2453   }
2454 
2455   /// Tells the code generator that the target has multiple (allocatable)
2456   /// condition registers that can be used to store the results of comparisons
2457   /// for use by selects and conditional branches. With multiple condition
2458   /// registers, the code generator will not aggressively sink comparisons into
2459   /// the blocks of their users.
2460   void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
2461     HasMultipleConditionRegisters = hasManyRegs;
2462   }
2463 
2464   /// Tells the code generator that the target has BitExtract instructions.
2465   /// The code generator will aggressively sink "shift"s into the blocks of
2466   /// their users if the users will generate "and" instructions which can be
2467   /// combined with "shift" to BitExtract instructions.
2468   void setHasExtractBitsInsn(bool hasExtractInsn = true) {
2469     HasExtractBitsInsn = hasExtractInsn;
2470   }
2471 
2472   /// Tells the code generator not to expand logic operations on comparison
2473   /// predicates into separate sequences that increase the amount of flow
2474   /// control.
2475   void setJumpIsExpensive(bool isExpensive = true);
2476 
2477   /// Tells the code generator which bitwidths to bypass.
addBypassSlowDiv(unsigned int SlowBitWidth,unsigned int FastBitWidth)2478   void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
2479     BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
2480   }
2481 
2482   /// Add the specified register class as an available regclass for the
2483   /// specified value type. This indicates the selector can handle values of
2484   /// that class natively.
addRegisterClass(MVT VT,const TargetRegisterClass * RC)2485   void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
2486     assert((unsigned)VT.SimpleTy < std::size(RegClassForVT));
2487     RegClassForVT[VT.SimpleTy] = RC;
2488   }
2489 
2490   /// Return the largest legal super-reg register class of the register class
2491   /// for the specified type and its associated "cost".
2492   virtual std::pair<const TargetRegisterClass *, uint8_t>
2493   findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const;
2494 
2495   /// Once all of the register classes are added, this allows us to compute
2496   /// derived properties we expose.
2497   void computeRegisterProperties(const TargetRegisterInfo *TRI);
2498 
2499   /// Indicate that the specified operation does not work with the specified
2500   /// type and indicate what to do about it. Note that VT may refer to either
2501   /// the type of a result or that of an operand of Op.
setOperationAction(unsigned Op,MVT VT,LegalizeAction Action)2502   void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action) {
2503     assert(Op < std::size(OpActions[0]) && "Table isn't big enough!");
2504     OpActions[(unsigned)VT.SimpleTy][Op] = Action;
2505   }
setOperationAction(ArrayRef<unsigned> Ops,MVT VT,LegalizeAction Action)2506   void setOperationAction(ArrayRef<unsigned> Ops, MVT VT,
2507                           LegalizeAction Action) {
2508     for (auto Op : Ops)
2509       setOperationAction(Op, VT, Action);
2510   }
setOperationAction(ArrayRef<unsigned> Ops,ArrayRef<MVT> VTs,LegalizeAction Action)2511   void setOperationAction(ArrayRef<unsigned> Ops, ArrayRef<MVT> VTs,
2512                           LegalizeAction Action) {
2513     for (auto VT : VTs)
2514       setOperationAction(Ops, VT, Action);
2515   }
2516 
2517   /// Indicate that the specified load with extension does not work with the
2518   /// specified type and indicate what to do about it.
setLoadExtAction(unsigned ExtType,MVT ValVT,MVT MemVT,LegalizeAction Action)2519   void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
2520                         LegalizeAction Action) {
2521     assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
2522            MemVT.isValid() && "Table isn't big enough!");
2523     assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
2524     unsigned Shift = 4 * ExtType;
2525     LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift);
2526     LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift;
2527   }
setLoadExtAction(ArrayRef<unsigned> ExtTypes,MVT ValVT,MVT MemVT,LegalizeAction Action)2528   void setLoadExtAction(ArrayRef<unsigned> ExtTypes, MVT ValVT, MVT MemVT,
2529                         LegalizeAction Action) {
2530     for (auto ExtType : ExtTypes)
2531       setLoadExtAction(ExtType, ValVT, MemVT, Action);
2532   }
setLoadExtAction(ArrayRef<unsigned> ExtTypes,MVT ValVT,ArrayRef<MVT> MemVTs,LegalizeAction Action)2533   void setLoadExtAction(ArrayRef<unsigned> ExtTypes, MVT ValVT,
2534                         ArrayRef<MVT> MemVTs, LegalizeAction Action) {
2535     for (auto MemVT : MemVTs)
2536       setLoadExtAction(ExtTypes, ValVT, MemVT, Action);
2537   }
2538 
2539   /// Indicate that the specified truncating store does not work with the
2540   /// specified type and indicate what to do about it.
setTruncStoreAction(MVT ValVT,MVT MemVT,LegalizeAction Action)2541   void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action) {
2542     assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!");
2543     TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action;
2544   }
2545 
2546   /// Indicate that the specified indexed load does or does not work with the
2547   /// specified type and indicate what to do abort it.
2548   ///
2549   /// NOTE: All indexed mode loads are initialized to Expand in
2550   /// TargetLowering.cpp
setIndexedLoadAction(ArrayRef<unsigned> IdxModes,MVT VT,LegalizeAction Action)2551   void setIndexedLoadAction(ArrayRef<unsigned> IdxModes, MVT VT,
2552                             LegalizeAction Action) {
2553     for (auto IdxMode : IdxModes)
2554       setIndexedModeAction(IdxMode, VT, IMAB_Load, Action);
2555   }
2556 
setIndexedLoadAction(ArrayRef<unsigned> IdxModes,ArrayRef<MVT> VTs,LegalizeAction Action)2557   void setIndexedLoadAction(ArrayRef<unsigned> IdxModes, ArrayRef<MVT> VTs,
2558                             LegalizeAction Action) {
2559     for (auto VT : VTs)
2560       setIndexedLoadAction(IdxModes, VT, Action);
2561   }
2562 
2563   /// Indicate that the specified indexed store does or does not work with the
2564   /// specified type and indicate what to do about it.
2565   ///
2566   /// NOTE: All indexed mode stores are initialized to Expand in
2567   /// TargetLowering.cpp
setIndexedStoreAction(ArrayRef<unsigned> IdxModes,MVT VT,LegalizeAction Action)2568   void setIndexedStoreAction(ArrayRef<unsigned> IdxModes, MVT VT,
2569                              LegalizeAction Action) {
2570     for (auto IdxMode : IdxModes)
2571       setIndexedModeAction(IdxMode, VT, IMAB_Store, Action);
2572   }
2573 
setIndexedStoreAction(ArrayRef<unsigned> IdxModes,ArrayRef<MVT> VTs,LegalizeAction Action)2574   void setIndexedStoreAction(ArrayRef<unsigned> IdxModes, ArrayRef<MVT> VTs,
2575                              LegalizeAction Action) {
2576     for (auto VT : VTs)
2577       setIndexedStoreAction(IdxModes, VT, Action);
2578   }
2579 
2580   /// Indicate that the specified indexed masked load does or does not work with
2581   /// the specified type and indicate what to do about it.
2582   ///
2583   /// NOTE: All indexed mode masked loads are initialized to Expand in
2584   /// TargetLowering.cpp
setIndexedMaskedLoadAction(unsigned IdxMode,MVT VT,LegalizeAction Action)2585   void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT,
2586                                   LegalizeAction Action) {
2587     setIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad, Action);
2588   }
2589 
2590   /// Indicate that the specified indexed masked store does or does not work
2591   /// with the specified type and indicate what to do about it.
2592   ///
2593   /// NOTE: All indexed mode masked stores are initialized to Expand in
2594   /// TargetLowering.cpp
setIndexedMaskedStoreAction(unsigned IdxMode,MVT VT,LegalizeAction Action)2595   void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT,
2596                                    LegalizeAction Action) {
2597     setIndexedModeAction(IdxMode, VT, IMAB_MaskedStore, Action);
2598   }
2599 
2600   /// Indicate that the specified condition code is or isn't supported on the
2601   /// target and indicate what to do about it.
setCondCodeAction(ArrayRef<ISD::CondCode> CCs,MVT VT,LegalizeAction Action)2602   void setCondCodeAction(ArrayRef<ISD::CondCode> CCs, MVT VT,
2603                          LegalizeAction Action) {
2604     for (auto CC : CCs) {
2605       assert(VT.isValid() && (unsigned)CC < std::size(CondCodeActions) &&
2606              "Table isn't big enough!");
2607       assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
2608       /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the
2609       /// 32-bit value and the upper 29 bits index into the second dimension of
2610       /// the array to select what 32-bit value to use.
2611       uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
2612       CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift);
2613       CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift;
2614     }
2615   }
setCondCodeAction(ArrayRef<ISD::CondCode> CCs,ArrayRef<MVT> VTs,LegalizeAction Action)2616   void setCondCodeAction(ArrayRef<ISD::CondCode> CCs, ArrayRef<MVT> VTs,
2617                          LegalizeAction Action) {
2618     for (auto VT : VTs)
2619       setCondCodeAction(CCs, VT, Action);
2620   }
2621 
2622   /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
2623   /// to trying a larger integer/fp until it can find one that works. If that
2624   /// default is insufficient, this method can be used by the target to override
2625   /// the default.
AddPromotedToType(unsigned Opc,MVT OrigVT,MVT DestVT)2626   void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
2627     PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
2628   }
2629 
2630   /// Convenience method to set an operation to Promote and specify the type
2631   /// in a single call.
setOperationPromotedToType(unsigned Opc,MVT OrigVT,MVT DestVT)2632   void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
2633     setOperationAction(Opc, OrigVT, Promote);
2634     AddPromotedToType(Opc, OrigVT, DestVT);
2635   }
setOperationPromotedToType(ArrayRef<unsigned> Ops,MVT OrigVT,MVT DestVT)2636   void setOperationPromotedToType(ArrayRef<unsigned> Ops, MVT OrigVT,
2637                                   MVT DestVT) {
2638     for (auto Op : Ops) {
2639       setOperationAction(Op, OrigVT, Promote);
2640       AddPromotedToType(Op, OrigVT, DestVT);
2641     }
2642   }
2643 
2644   /// Targets should invoke this method for each target independent node that
2645   /// they want to provide a custom DAG combiner for by implementing the
2646   /// PerformDAGCombine virtual method.
setTargetDAGCombine(ArrayRef<ISD::NodeType> NTs)2647   void setTargetDAGCombine(ArrayRef<ISD::NodeType> NTs) {
2648     for (auto NT : NTs) {
2649       assert(unsigned(NT >> 3) < std::size(TargetDAGCombineArray));
2650       TargetDAGCombineArray[NT >> 3] |= 1 << (NT & 7);
2651     }
2652   }
2653 
2654   /// Set the target's minimum function alignment.
setMinFunctionAlignment(Align Alignment)2655   void setMinFunctionAlignment(Align Alignment) {
2656     MinFunctionAlignment = Alignment;
2657   }
2658 
2659   /// Set the target's preferred function alignment.  This should be set if
2660   /// there is a performance benefit to higher-than-minimum alignment
setPrefFunctionAlignment(Align Alignment)2661   void setPrefFunctionAlignment(Align Alignment) {
2662     PrefFunctionAlignment = Alignment;
2663   }
2664 
2665   /// Set the target's preferred loop alignment. Default alignment is one, it
2666   /// means the target does not care about loop alignment. The target may also
2667   /// override getPrefLoopAlignment to provide per-loop values.
setPrefLoopAlignment(Align Alignment)2668   void setPrefLoopAlignment(Align Alignment) { PrefLoopAlignment = Alignment; }
setMaxBytesForAlignment(unsigned MaxBytes)2669   void setMaxBytesForAlignment(unsigned MaxBytes) {
2670     MaxBytesForAlignment = MaxBytes;
2671   }
2672 
2673   /// Set the minimum stack alignment of an argument.
setMinStackArgumentAlignment(Align Alignment)2674   void setMinStackArgumentAlignment(Align Alignment) {
2675     MinStackArgumentAlignment = Alignment;
2676   }
2677 
2678   /// Set the maximum atomic operation size supported by the
2679   /// backend. Atomic operations greater than this size (as well as
2680   /// ones that are not naturally aligned), will be expanded by
2681   /// AtomicExpandPass into an __atomic_* library call.
setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)2682   void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) {
2683     MaxAtomicSizeInBitsSupported = SizeInBits;
2684   }
2685 
2686   /// Set the size in bits of the maximum div/rem the backend supports.
2687   /// Larger operations will be expanded by ExpandLargeDivRem.
setMaxDivRemBitWidthSupported(unsigned SizeInBits)2688   void setMaxDivRemBitWidthSupported(unsigned SizeInBits) {
2689     MaxDivRemBitWidthSupported = SizeInBits;
2690   }
2691 
2692   /// Set the size in bits of the maximum fp convert the backend supports.
2693   /// Larger operations will be expanded by ExpandLargeFPConvert.
setMaxLargeFPConvertBitWidthSupported(unsigned SizeInBits)2694   void setMaxLargeFPConvertBitWidthSupported(unsigned SizeInBits) {
2695     MaxLargeFPConvertBitWidthSupported = SizeInBits;
2696   }
2697 
2698   /// Sets the minimum cmpxchg or ll/sc size supported by the backend.
setMinCmpXchgSizeInBits(unsigned SizeInBits)2699   void setMinCmpXchgSizeInBits(unsigned SizeInBits) {
2700     MinCmpXchgSizeInBits = SizeInBits;
2701   }
2702 
2703   /// Sets whether unaligned atomic operations are supported.
setSupportsUnalignedAtomics(bool UnalignedSupported)2704   void setSupportsUnalignedAtomics(bool UnalignedSupported) {
2705     SupportsUnalignedAtomics = UnalignedSupported;
2706   }
2707 
2708 public:
2709   //===--------------------------------------------------------------------===//
2710   // Addressing mode description hooks (used by LSR etc).
2711   //
2712 
2713   /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
2714   /// instructions reading the address. This allows as much computation as
2715   /// possible to be done in the address mode for that operand. This hook lets
2716   /// targets also pass back when this should be done on intrinsics which
2717   /// load/store.
getAddrModeArguments(IntrinsicInst *,SmallVectorImpl<Value * > &,Type * &)2718   virtual bool getAddrModeArguments(IntrinsicInst * /*I*/,
2719                                     SmallVectorImpl<Value*> &/*Ops*/,
2720                                     Type *&/*AccessTy*/) const {
2721     return false;
2722   }
2723 
2724   /// This represents an addressing mode of:
2725   ///    BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
2726   /// If BaseGV is null,  there is no BaseGV.
2727   /// If BaseOffs is zero, there is no base offset.
2728   /// If HasBaseReg is false, there is no base register.
2729   /// If Scale is zero, there is no ScaleReg.  Scale of 1 indicates a reg with
2730   /// no scale.
2731   struct AddrMode {
2732     GlobalValue *BaseGV = nullptr;
2733     int64_t      BaseOffs = 0;
2734     bool         HasBaseReg = false;
2735     int64_t      Scale = 0;
2736     AddrMode() = default;
2737   };
2738 
2739   /// Return true if the addressing mode represented by AM is legal for this
2740   /// target, for a load/store of the specified type.
2741   ///
2742   /// The type may be VoidTy, in which case only return true if the addressing
2743   /// mode is legal for a load/store of any legal type.  TODO: Handle
2744   /// pre/postinc as well.
2745   ///
2746   /// If the address space cannot be determined, it will be -1.
2747   ///
2748   /// TODO: Remove default argument
2749   virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
2750                                      Type *Ty, unsigned AddrSpace,
2751                                      Instruction *I = nullptr) const;
2752 
2753   /// Return the prefered common base offset.
getPreferredLargeGEPBaseOffset(int64_t MinOffset,int64_t MaxOffset)2754   virtual int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset,
2755                                                  int64_t MaxOffset) const {
2756     return 0;
2757   }
2758 
2759   /// Return true if the specified immediate is legal icmp immediate, that is
2760   /// the target has icmp instructions which can compare a register against the
2761   /// immediate without having to materialize the immediate into a register.
isLegalICmpImmediate(int64_t)2762   virtual bool isLegalICmpImmediate(int64_t) const {
2763     return true;
2764   }
2765 
2766   /// Return true if the specified immediate is legal add immediate, that is the
2767   /// target has add instructions which can add a register with the immediate
2768   /// without having to materialize the immediate into a register.
isLegalAddImmediate(int64_t)2769   virtual bool isLegalAddImmediate(int64_t) const {
2770     return true;
2771   }
2772 
2773   /// Return true if the specified immediate is legal for the value input of a
2774   /// store instruction.
isLegalStoreImmediate(int64_t Value)2775   virtual bool isLegalStoreImmediate(int64_t Value) const {
2776     // Default implementation assumes that at least 0 works since it is likely
2777     // that a zero register exists or a zero immediate is allowed.
2778     return Value == 0;
2779   }
2780 
2781   /// Return true if it's significantly cheaper to shift a vector by a uniform
2782   /// scalar than by an amount which will vary across each lane. On x86 before
2783   /// AVX2 for example, there is a "psllw" instruction for the former case, but
2784   /// no simple instruction for a general "a << b" operation on vectors.
2785   /// This should also apply to lowering for vector funnel shifts (rotates).
isVectorShiftByScalarCheap(Type * Ty)2786   virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
2787     return false;
2788   }
2789 
2790   /// Given a shuffle vector SVI representing a vector splat, return a new
2791   /// scalar type of size equal to SVI's scalar type if the new type is more
2792   /// profitable. Returns nullptr otherwise. For example under MVE float splats
2793   /// are converted to integer to prevent the need to move from SPR to GPR
2794   /// registers.
shouldConvertSplatType(ShuffleVectorInst * SVI)2795   virtual Type* shouldConvertSplatType(ShuffleVectorInst* SVI) const {
2796     return nullptr;
2797   }
2798 
2799   /// Given a set in interconnected phis of type 'From' that are loaded/stored
2800   /// or bitcast to type 'To', return true if the set should be converted to
2801   /// 'To'.
shouldConvertPhiType(Type * From,Type * To)2802   virtual bool shouldConvertPhiType(Type *From, Type *To) const {
2803     return (From->isIntegerTy() || From->isFloatingPointTy()) &&
2804            (To->isIntegerTy() || To->isFloatingPointTy());
2805   }
2806 
2807   /// Returns true if the opcode is a commutative binary operation.
isCommutativeBinOp(unsigned Opcode)2808   virtual bool isCommutativeBinOp(unsigned Opcode) const {
2809     // FIXME: This should get its info from the td file.
2810     switch (Opcode) {
2811     case ISD::ADD:
2812     case ISD::SMIN:
2813     case ISD::SMAX:
2814     case ISD::UMIN:
2815     case ISD::UMAX:
2816     case ISD::MUL:
2817     case ISD::MULHU:
2818     case ISD::MULHS:
2819     case ISD::SMUL_LOHI:
2820     case ISD::UMUL_LOHI:
2821     case ISD::FADD:
2822     case ISD::FMUL:
2823     case ISD::AND:
2824     case ISD::OR:
2825     case ISD::XOR:
2826     case ISD::SADDO:
2827     case ISD::UADDO:
2828     case ISD::ADDC:
2829     case ISD::ADDE:
2830     case ISD::SADDSAT:
2831     case ISD::UADDSAT:
2832     case ISD::FMINNUM:
2833     case ISD::FMAXNUM:
2834     case ISD::FMINNUM_IEEE:
2835     case ISD::FMAXNUM_IEEE:
2836     case ISD::FMINIMUM:
2837     case ISD::FMAXIMUM:
2838     case ISD::AVGFLOORS:
2839     case ISD::AVGFLOORU:
2840     case ISD::AVGCEILS:
2841     case ISD::AVGCEILU:
2842     case ISD::ABDS:
2843     case ISD::ABDU:
2844       return true;
2845     default: return false;
2846     }
2847   }
2848 
2849   /// Return true if the node is a math/logic binary operator.
isBinOp(unsigned Opcode)2850   virtual bool isBinOp(unsigned Opcode) const {
2851     // A commutative binop must be a binop.
2852     if (isCommutativeBinOp(Opcode))
2853       return true;
2854     // These are non-commutative binops.
2855     switch (Opcode) {
2856     case ISD::SUB:
2857     case ISD::SHL:
2858     case ISD::SRL:
2859     case ISD::SRA:
2860     case ISD::ROTL:
2861     case ISD::ROTR:
2862     case ISD::SDIV:
2863     case ISD::UDIV:
2864     case ISD::SREM:
2865     case ISD::UREM:
2866     case ISD::SSUBSAT:
2867     case ISD::USUBSAT:
2868     case ISD::FSUB:
2869     case ISD::FDIV:
2870     case ISD::FREM:
2871       return true;
2872     default:
2873       return false;
2874     }
2875   }
2876 
2877   /// Return true if it's free to truncate a value of type FromTy to type
2878   /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
2879   /// by referencing its sub-register AX.
2880   /// Targets must return false when FromTy <= ToTy.
isTruncateFree(Type * FromTy,Type * ToTy)2881   virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const {
2882     return false;
2883   }
2884 
2885   /// Return true if a truncation from FromTy to ToTy is permitted when deciding
2886   /// whether a call is in tail position. Typically this means that both results
2887   /// would be assigned to the same register or stack slot, but it could mean
2888   /// the target performs adequate checks of its own before proceeding with the
2889   /// tail call.  Targets must return false when FromTy <= ToTy.
allowTruncateForTailCall(Type * FromTy,Type * ToTy)2890   virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const {
2891     return false;
2892   }
2893 
isTruncateFree(EVT FromVT,EVT ToVT)2894   virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const { return false; }
isTruncateFree(LLT FromTy,LLT ToTy,const DataLayout & DL,LLVMContext & Ctx)2895   virtual bool isTruncateFree(LLT FromTy, LLT ToTy, const DataLayout &DL,
2896                               LLVMContext &Ctx) const {
2897     return isTruncateFree(getApproximateEVTForLLT(FromTy, DL, Ctx),
2898                           getApproximateEVTForLLT(ToTy, DL, Ctx));
2899   }
2900 
2901   /// Return true if truncating the specific node Val to type VT2 is free.
isTruncateFree(SDValue Val,EVT VT2)2902   virtual bool isTruncateFree(SDValue Val, EVT VT2) const {
2903     // Fallback to type matching.
2904     return isTruncateFree(Val.getValueType(), VT2);
2905   }
2906 
isProfitableToHoist(Instruction * I)2907   virtual bool isProfitableToHoist(Instruction *I) const { return true; }
2908 
2909   /// Return true if the extension represented by \p I is free.
2910   /// Unlikely the is[Z|FP]ExtFree family which is based on types,
2911   /// this method can use the context provided by \p I to decide
2912   /// whether or not \p I is free.
2913   /// This method extends the behavior of the is[Z|FP]ExtFree family.
2914   /// In other words, if is[Z|FP]Free returns true, then this method
2915   /// returns true as well. The converse is not true.
2916   /// The target can perform the adequate checks by overriding isExtFreeImpl.
2917   /// \pre \p I must be a sign, zero, or fp extension.
isExtFree(const Instruction * I)2918   bool isExtFree(const Instruction *I) const {
2919     switch (I->getOpcode()) {
2920     case Instruction::FPExt:
2921       if (isFPExtFree(EVT::getEVT(I->getType()),
2922                       EVT::getEVT(I->getOperand(0)->getType())))
2923         return true;
2924       break;
2925     case Instruction::ZExt:
2926       if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
2927         return true;
2928       break;
2929     case Instruction::SExt:
2930       break;
2931     default:
2932       llvm_unreachable("Instruction is not an extension");
2933     }
2934     return isExtFreeImpl(I);
2935   }
2936 
2937   /// Return true if \p Load and \p Ext can form an ExtLoad.
2938   /// For example, in AArch64
2939   ///   %L = load i8, i8* %ptr
2940   ///   %E = zext i8 %L to i32
2941   /// can be lowered into one load instruction
2942   ///   ldrb w0, [x0]
isExtLoad(const LoadInst * Load,const Instruction * Ext,const DataLayout & DL)2943   bool isExtLoad(const LoadInst *Load, const Instruction *Ext,
2944                  const DataLayout &DL) const {
2945     EVT VT = getValueType(DL, Ext->getType());
2946     EVT LoadVT = getValueType(DL, Load->getType());
2947 
2948     // If the load has other users and the truncate is not free, the ext
2949     // probably isn't free.
2950     if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) &&
2951         !isTruncateFree(Ext->getType(), Load->getType()))
2952       return false;
2953 
2954     // Check whether the target supports casts folded into loads.
2955     unsigned LType;
2956     if (isa<ZExtInst>(Ext))
2957       LType = ISD::ZEXTLOAD;
2958     else {
2959       assert(isa<SExtInst>(Ext) && "Unexpected ext type!");
2960       LType = ISD::SEXTLOAD;
2961     }
2962 
2963     return isLoadExtLegal(LType, VT, LoadVT);
2964   }
2965 
2966   /// Return true if any actual instruction that defines a value of type FromTy
2967   /// implicitly zero-extends the value to ToTy in the result register.
2968   ///
2969   /// The function should return true when it is likely that the truncate can
2970   /// be freely folded with an instruction defining a value of FromTy. If
2971   /// the defining instruction is unknown (because you're looking at a
2972   /// function argument, PHI, etc.) then the target may require an
2973   /// explicit truncate, which is not necessarily free, but this function
2974   /// does not deal with those cases.
2975   /// Targets must return false when FromTy >= ToTy.
isZExtFree(Type * FromTy,Type * ToTy)2976   virtual bool isZExtFree(Type *FromTy, Type *ToTy) const {
2977     return false;
2978   }
2979 
isZExtFree(EVT FromTy,EVT ToTy)2980   virtual bool isZExtFree(EVT FromTy, EVT ToTy) const { return false; }
isZExtFree(LLT FromTy,LLT ToTy,const DataLayout & DL,LLVMContext & Ctx)2981   virtual bool isZExtFree(LLT FromTy, LLT ToTy, const DataLayout &DL,
2982                           LLVMContext &Ctx) const {
2983     return isZExtFree(getApproximateEVTForLLT(FromTy, DL, Ctx),
2984                       getApproximateEVTForLLT(ToTy, DL, Ctx));
2985   }
2986 
2987   /// Return true if zero-extending the specific node Val to type VT2 is free
2988   /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
2989   /// because it's folded such as X86 zero-extending loads).
isZExtFree(SDValue Val,EVT VT2)2990   virtual bool isZExtFree(SDValue Val, EVT VT2) const {
2991     return isZExtFree(Val.getValueType(), VT2);
2992   }
2993 
2994   /// Return true if sign-extension from FromTy to ToTy is cheaper than
2995   /// zero-extension.
isSExtCheaperThanZExt(EVT FromTy,EVT ToTy)2996   virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const {
2997     return false;
2998   }
2999 
3000   /// Return true if this constant should be sign extended when promoting to
3001   /// a larger type.
signExtendConstant(const ConstantInt * C)3002   virtual bool signExtendConstant(const ConstantInt *C) const { return false; }
3003 
3004   /// Return true if sinking I's operands to the same basic block as I is
3005   /// profitable, e.g. because the operands can be folded into a target
3006   /// instruction during instruction selection. After calling the function
3007   /// \p Ops contains the Uses to sink ordered by dominance (dominating users
3008   /// come first).
shouldSinkOperands(Instruction * I,SmallVectorImpl<Use * > & Ops)3009   virtual bool shouldSinkOperands(Instruction *I,
3010                                   SmallVectorImpl<Use *> &Ops) const {
3011     return false;
3012   }
3013 
3014   /// Try to optimize extending or truncating conversion instructions (like
3015   /// zext, trunc, fptoui, uitofp) for the target.
3016   virtual bool
optimizeExtendOrTruncateConversion(Instruction * I,Loop * L,const TargetTransformInfo & TTI)3017   optimizeExtendOrTruncateConversion(Instruction *I, Loop *L,
3018                                      const TargetTransformInfo &TTI) const {
3019     return false;
3020   }
3021 
3022   /// Return true if the target supplies and combines to a paired load
3023   /// two loaded values of type LoadedType next to each other in memory.
3024   /// RequiredAlignment gives the minimal alignment constraints that must be met
3025   /// to be able to select this paired load.
3026   ///
3027   /// This information is *not* used to generate actual paired loads, but it is
3028   /// used to generate a sequence of loads that is easier to combine into a
3029   /// paired load.
3030   /// For instance, something like this:
3031   /// a = load i64* addr
3032   /// b = trunc i64 a to i32
3033   /// c = lshr i64 a, 32
3034   /// d = trunc i64 c to i32
3035   /// will be optimized into:
3036   /// b = load i32* addr1
3037   /// d = load i32* addr2
3038   /// Where addr1 = addr2 +/- sizeof(i32).
3039   ///
3040   /// In other words, unless the target performs a post-isel load combining,
3041   /// this information should not be provided because it will generate more
3042   /// loads.
hasPairedLoad(EVT,Align &)3043   virtual bool hasPairedLoad(EVT /*LoadedType*/,
3044                              Align & /*RequiredAlignment*/) const {
3045     return false;
3046   }
3047 
3048   /// Return true if the target has a vector blend instruction.
hasVectorBlend()3049   virtual bool hasVectorBlend() const { return false; }
3050 
3051   /// Get the maximum supported factor for interleaved memory accesses.
3052   /// Default to be the minimum interleave factor: 2.
getMaxSupportedInterleaveFactor()3053   virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
3054 
3055   /// Lower an interleaved load to target specific intrinsics. Return
3056   /// true on success.
3057   ///
3058   /// \p LI is the vector load instruction.
3059   /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
3060   /// \p Indices is the corresponding indices for each shufflevector.
3061   /// \p Factor is the interleave factor.
lowerInterleavedLoad(LoadInst * LI,ArrayRef<ShuffleVectorInst * > Shuffles,ArrayRef<unsigned> Indices,unsigned Factor)3062   virtual bool lowerInterleavedLoad(LoadInst *LI,
3063                                     ArrayRef<ShuffleVectorInst *> Shuffles,
3064                                     ArrayRef<unsigned> Indices,
3065                                     unsigned Factor) const {
3066     return false;
3067   }
3068 
3069   /// Lower an interleaved store to target specific intrinsics. Return
3070   /// true on success.
3071   ///
3072   /// \p SI is the vector store instruction.
3073   /// \p SVI is the shufflevector to RE-interleave the stored vector.
3074   /// \p Factor is the interleave factor.
lowerInterleavedStore(StoreInst * SI,ShuffleVectorInst * SVI,unsigned Factor)3075   virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
3076                                      unsigned Factor) const {
3077     return false;
3078   }
3079 
3080   /// Lower a deinterleave intrinsic to a target specific load intrinsic.
3081   /// Return true on success. Currently only supports
3082   /// llvm.experimental.vector.deinterleave2
3083   ///
3084   /// \p DI is the deinterleave intrinsic.
3085   /// \p LI is the accompanying load instruction
lowerDeinterleaveIntrinsicToLoad(IntrinsicInst * DI,LoadInst * LI)3086   virtual bool lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *DI,
3087                                                 LoadInst *LI) const {
3088     return false;
3089   }
3090 
3091   /// Lower an interleave intrinsic to a target specific store intrinsic.
3092   /// Return true on success. Currently only supports
3093   /// llvm.experimental.vector.interleave2
3094   ///
3095   /// \p II is the interleave intrinsic.
3096   /// \p SI is the accompanying store instruction
lowerInterleaveIntrinsicToStore(IntrinsicInst * II,StoreInst * SI)3097   virtual bool lowerInterleaveIntrinsicToStore(IntrinsicInst *II,
3098                                                StoreInst *SI) const {
3099     return false;
3100   }
3101 
3102   /// Return true if an fpext operation is free (for instance, because
3103   /// single-precision floating-point numbers are implicitly extended to
3104   /// double-precision).
isFPExtFree(EVT DestVT,EVT SrcVT)3105   virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const {
3106     assert(SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() &&
3107            "invalid fpext types");
3108     return false;
3109   }
3110 
3111   /// Return true if an fpext operation input to an \p Opcode operation is free
3112   /// (for instance, because half-precision floating-point numbers are
3113   /// implicitly extended to float-precision) for an FMA instruction.
isFPExtFoldable(const MachineInstr & MI,unsigned Opcode,LLT DestTy,LLT SrcTy)3114   virtual bool isFPExtFoldable(const MachineInstr &MI, unsigned Opcode,
3115                                LLT DestTy, LLT SrcTy) const {
3116     return false;
3117   }
3118 
3119   /// Return true if an fpext operation input to an \p Opcode operation is free
3120   /// (for instance, because half-precision floating-point numbers are
3121   /// implicitly extended to float-precision) for an FMA instruction.
isFPExtFoldable(const SelectionDAG & DAG,unsigned Opcode,EVT DestVT,EVT SrcVT)3122   virtual bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode,
3123                                EVT DestVT, EVT SrcVT) const {
3124     assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
3125            "invalid fpext types");
3126     return isFPExtFree(DestVT, SrcVT);
3127   }
3128 
3129   /// Return true if folding a vector load into ExtVal (a sign, zero, or any
3130   /// extend node) is profitable.
isVectorLoadExtDesirable(SDValue ExtVal)3131   virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; }
3132 
3133   /// Return true if an fneg operation is free to the point where it is never
3134   /// worthwhile to replace it with a bitwise operation.
isFNegFree(EVT VT)3135   virtual bool isFNegFree(EVT VT) const {
3136     assert(VT.isFloatingPoint());
3137     return false;
3138   }
3139 
3140   /// Return true if an fabs operation is free to the point where it is never
3141   /// worthwhile to replace it with a bitwise operation.
isFAbsFree(EVT VT)3142   virtual bool isFAbsFree(EVT VT) const {
3143     assert(VT.isFloatingPoint());
3144     return false;
3145   }
3146 
3147   /// Return true if an FMA operation is faster than a pair of fmul and fadd
3148   /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
3149   /// returns true, otherwise fmuladd is expanded to fmul + fadd.
3150   ///
3151   /// NOTE: This may be called before legalization on types for which FMAs are
3152   /// not legal, but should return true if those types will eventually legalize
3153   /// to types that support FMAs. After legalization, it will only be called on
3154   /// types that support FMAs (via Legal or Custom actions)
isFMAFasterThanFMulAndFAdd(const MachineFunction & MF,EVT)3155   virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
3156                                           EVT) const {
3157     return false;
3158   }
3159 
3160   /// Return true if an FMA operation is faster than a pair of fmul and fadd
3161   /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
3162   /// returns true, otherwise fmuladd is expanded to fmul + fadd.
3163   ///
3164   /// NOTE: This may be called before legalization on types for which FMAs are
3165   /// not legal, but should return true if those types will eventually legalize
3166   /// to types that support FMAs. After legalization, it will only be called on
3167   /// types that support FMAs (via Legal or Custom actions)
isFMAFasterThanFMulAndFAdd(const MachineFunction & MF,LLT)3168   virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
3169                                           LLT) const {
3170     return false;
3171   }
3172 
3173   /// IR version
isFMAFasterThanFMulAndFAdd(const Function & F,Type *)3174   virtual bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *) const {
3175     return false;
3176   }
3177 
3178   /// Returns true if \p MI can be combined with another instruction to
3179   /// form TargetOpcode::G_FMAD. \p N may be an TargetOpcode::G_FADD,
3180   /// TargetOpcode::G_FSUB, or an TargetOpcode::G_FMUL which will be
3181   /// distributed into an fadd/fsub.
isFMADLegal(const MachineInstr & MI,LLT Ty)3182   virtual bool isFMADLegal(const MachineInstr &MI, LLT Ty) const {
3183     assert((MI.getOpcode() == TargetOpcode::G_FADD ||
3184             MI.getOpcode() == TargetOpcode::G_FSUB ||
3185             MI.getOpcode() == TargetOpcode::G_FMUL) &&
3186            "unexpected node in FMAD forming combine");
3187     switch (Ty.getScalarSizeInBits()) {
3188     case 16:
3189       return isOperationLegal(TargetOpcode::G_FMAD, MVT::f16);
3190     case 32:
3191       return isOperationLegal(TargetOpcode::G_FMAD, MVT::f32);
3192     case 64:
3193       return isOperationLegal(TargetOpcode::G_FMAD, MVT::f64);
3194     default:
3195       break;
3196     }
3197 
3198     return false;
3199   }
3200 
3201   /// Returns true if be combined with to form an ISD::FMAD. \p N may be an
3202   /// ISD::FADD, ISD::FSUB, or an ISD::FMUL which will be distributed into an
3203   /// fadd/fsub.
isFMADLegal(const SelectionDAG & DAG,const SDNode * N)3204   virtual bool isFMADLegal(const SelectionDAG &DAG, const SDNode *N) const {
3205     assert((N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB ||
3206             N->getOpcode() == ISD::FMUL) &&
3207            "unexpected node in FMAD forming combine");
3208     return isOperationLegal(ISD::FMAD, N->getValueType(0));
3209   }
3210 
3211   // Return true when the decision to generate FMA's (or FMS, FMLA etc) rather
3212   // than FMUL and ADD is delegated to the machine combiner.
generateFMAsInMachineCombiner(EVT VT,CodeGenOptLevel OptLevel)3213   virtual bool generateFMAsInMachineCombiner(EVT VT,
3214                                              CodeGenOptLevel OptLevel) const {
3215     return false;
3216   }
3217 
3218   /// Return true if it's profitable to narrow operations of type SrcVT to
3219   /// DestVT. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
3220   /// i32 to i16.
isNarrowingProfitable(EVT SrcVT,EVT DestVT)3221   virtual bool isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
3222     return false;
3223   }
3224 
3225   /// Return true if pulling a binary operation into a select with an identity
3226   /// constant is profitable. This is the inverse of an IR transform.
3227   /// Example: X + (Cond ? Y : 0) --> Cond ? (X + Y) : X
shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,EVT VT)3228   virtual bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,
3229                                                     EVT VT) const {
3230     return false;
3231   }
3232 
3233   /// Return true if it is beneficial to convert a load of a constant to
3234   /// just the constant itself.
3235   /// On some targets it might be more efficient to use a combination of
3236   /// arithmetic instructions to materialize the constant instead of loading it
3237   /// from a constant pool.
shouldConvertConstantLoadToIntImm(const APInt & Imm,Type * Ty)3238   virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
3239                                                  Type *Ty) const {
3240     return false;
3241   }
3242 
3243   /// Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type
3244   /// from this source type with this index. This is needed because
3245   /// EXTRACT_SUBVECTOR usually has custom lowering that depends on the index of
3246   /// the first element, and only the target knows which lowering is cheap.
isExtractSubvectorCheap(EVT ResVT,EVT SrcVT,unsigned Index)3247   virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
3248                                        unsigned Index) const {
3249     return false;
3250   }
3251 
3252   /// Try to convert an extract element of a vector binary operation into an
3253   /// extract element followed by a scalar operation.
shouldScalarizeBinop(SDValue VecOp)3254   virtual bool shouldScalarizeBinop(SDValue VecOp) const {
3255     return false;
3256   }
3257 
3258   /// Return true if extraction of a scalar element from the given vector type
3259   /// at the given index is cheap. For example, if scalar operations occur on
3260   /// the same register file as vector operations, then an extract element may
3261   /// be a sub-register rename rather than an actual instruction.
isExtractVecEltCheap(EVT VT,unsigned Index)3262   virtual bool isExtractVecEltCheap(EVT VT, unsigned Index) const {
3263     return false;
3264   }
3265 
3266   /// Try to convert math with an overflow comparison into the corresponding DAG
3267   /// node operation. Targets may want to override this independently of whether
3268   /// the operation is legal/custom for the given type because it may obscure
3269   /// matching of other patterns.
shouldFormOverflowOp(unsigned Opcode,EVT VT,bool MathUsed)3270   virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
3271                                     bool MathUsed) const {
3272     // TODO: The default logic is inherited from code in CodeGenPrepare.
3273     // The opcode should not make a difference by default?
3274     if (Opcode != ISD::UADDO)
3275       return false;
3276 
3277     // Allow the transform as long as we have an integer type that is not
3278     // obviously illegal and unsupported and if the math result is used
3279     // besides the overflow check. On some targets (e.g. SPARC), it is
3280     // not profitable to form on overflow op if the math result has no
3281     // concrete users.
3282     if (VT.isVector())
3283       return false;
3284     return MathUsed && (VT.isSimple() || !isOperationExpand(Opcode, VT));
3285   }
3286 
3287   // Return true if it is profitable to use a scalar input to a BUILD_VECTOR
3288   // even if the vector itself has multiple uses.
aggressivelyPreferBuildVectorSources(EVT VecVT)3289   virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const {
3290     return false;
3291   }
3292 
3293   // Return true if CodeGenPrepare should consider splitting large offset of a
3294   // GEP to make the GEP fit into the addressing mode and can be sunk into the
3295   // same blocks of its users.
shouldConsiderGEPOffsetSplit()3296   virtual bool shouldConsiderGEPOffsetSplit() const { return false; }
3297 
3298   /// Return true if creating a shift of the type by the given
3299   /// amount is not profitable.
shouldAvoidTransformToShift(EVT VT,unsigned Amount)3300   virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const {
3301     return false;
3302   }
3303 
3304   // Should we fold (select_cc seteq (and x, y), 0, 0, A) -> (and (sra (shl x))
3305   // A) where y has a single bit set?
shouldFoldSelectWithSingleBitTest(EVT VT,const APInt & AndMask)3306   virtual bool shouldFoldSelectWithSingleBitTest(EVT VT,
3307                                                  const APInt &AndMask) const {
3308     unsigned ShCt = AndMask.getBitWidth() - 1;
3309     return !shouldAvoidTransformToShift(VT, ShCt);
3310   }
3311 
3312   /// Does this target require the clearing of high-order bits in a register
3313   /// passed to the fp16 to fp conversion library function.
shouldKeepZExtForFP16Conv()3314   virtual bool shouldKeepZExtForFP16Conv() const { return false; }
3315 
3316   /// Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT
3317   /// from min(max(fptoi)) saturation patterns.
shouldConvertFpToSat(unsigned Op,EVT FPVT,EVT VT)3318   virtual bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const {
3319     return isOperationLegalOrCustom(Op, VT);
3320   }
3321 
3322   /// Does this target support complex deinterleaving
isComplexDeinterleavingSupported()3323   virtual bool isComplexDeinterleavingSupported() const { return false; }
3324 
3325   /// Does this target support complex deinterleaving with the given operation
3326   /// and type
isComplexDeinterleavingOperationSupported(ComplexDeinterleavingOperation Operation,Type * Ty)3327   virtual bool isComplexDeinterleavingOperationSupported(
3328       ComplexDeinterleavingOperation Operation, Type *Ty) const {
3329     return false;
3330   }
3331 
3332   /// Create the IR node for the given complex deinterleaving operation.
3333   /// If one cannot be created using all the given inputs, nullptr should be
3334   /// returned.
3335   virtual Value *createComplexDeinterleavingIR(
3336       IRBuilderBase &B, ComplexDeinterleavingOperation OperationType,
3337       ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
3338       Value *Accumulator = nullptr) const {
3339     return nullptr;
3340   }
3341 
3342   //===--------------------------------------------------------------------===//
3343   // Runtime Library hooks
3344   //
3345 
3346   /// Rename the default libcall routine name for the specified libcall.
setLibcallName(RTLIB::Libcall Call,const char * Name)3347   void setLibcallName(RTLIB::Libcall Call, const char *Name) {
3348     LibcallRoutineNames[Call] = Name;
3349   }
setLibcallName(ArrayRef<RTLIB::Libcall> Calls,const char * Name)3350   void setLibcallName(ArrayRef<RTLIB::Libcall> Calls, const char *Name) {
3351     for (auto Call : Calls)
3352       setLibcallName(Call, Name);
3353   }
3354 
3355   /// Get the libcall routine name for the specified libcall.
getLibcallName(RTLIB::Libcall Call)3356   const char *getLibcallName(RTLIB::Libcall Call) const {
3357     return LibcallRoutineNames[Call];
3358   }
3359 
3360   /// Override the default CondCode to be used to test the result of the
3361   /// comparison libcall against zero.
setCmpLibcallCC(RTLIB::Libcall Call,ISD::CondCode CC)3362   void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
3363     CmpLibcallCCs[Call] = CC;
3364   }
3365 
3366   /// Get the CondCode that's to be used to test the result of the comparison
3367   /// libcall against zero.
getCmpLibcallCC(RTLIB::Libcall Call)3368   ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
3369     return CmpLibcallCCs[Call];
3370   }
3371 
3372   /// Set the CallingConv that should be used for the specified libcall.
setLibcallCallingConv(RTLIB::Libcall Call,CallingConv::ID CC)3373   void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
3374     LibcallCallingConvs[Call] = CC;
3375   }
3376 
3377   /// Get the CallingConv that should be used for the specified libcall.
getLibcallCallingConv(RTLIB::Libcall Call)3378   CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
3379     return LibcallCallingConvs[Call];
3380   }
3381 
3382   /// Execute target specific actions to finalize target lowering.
3383   /// This is used to set extra flags in MachineFrameInformation and freezing
3384   /// the set of reserved registers.
3385   /// The default implementation just freezes the set of reserved registers.
3386   virtual void finalizeLowering(MachineFunction &MF) const;
3387 
3388   //===----------------------------------------------------------------------===//
3389   //  GlobalISel Hooks
3390   //===----------------------------------------------------------------------===//
3391   /// Check whether or not \p MI needs to be moved close to its uses.
3392   virtual bool shouldLocalize(const MachineInstr &MI, const TargetTransformInfo *TTI) const;
3393 
3394 
3395 private:
3396   const TargetMachine &TM;
3397 
3398   /// Tells the code generator that the target has multiple (allocatable)
3399   /// condition registers that can be used to store the results of comparisons
3400   /// for use by selects and conditional branches. With multiple condition
3401   /// registers, the code generator will not aggressively sink comparisons into
3402   /// the blocks of their users.
3403   bool HasMultipleConditionRegisters;
3404 
3405   /// Tells the code generator that the target has BitExtract instructions.
3406   /// The code generator will aggressively sink "shift"s into the blocks of
3407   /// their users if the users will generate "and" instructions which can be
3408   /// combined with "shift" to BitExtract instructions.
3409   bool HasExtractBitsInsn;
3410 
3411   /// Tells the code generator to bypass slow divide or remainder
3412   /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
3413   /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
3414   /// div/rem when the operands are positive and less than 256.
3415   DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
3416 
3417   /// Tells the code generator that it shouldn't generate extra flow control
3418   /// instructions and should attempt to combine flow control instructions via
3419   /// predication.
3420   bool JumpIsExpensive;
3421 
3422   /// Information about the contents of the high-bits in boolean values held in
3423   /// a type wider than i1. See getBooleanContents.
3424   BooleanContent BooleanContents;
3425 
3426   /// Information about the contents of the high-bits in boolean values held in
3427   /// a type wider than i1. See getBooleanContents.
3428   BooleanContent BooleanFloatContents;
3429 
3430   /// Information about the contents of the high-bits in boolean vector values
3431   /// when the element type is wider than i1. See getBooleanContents.
3432   BooleanContent BooleanVectorContents;
3433 
3434   /// The target scheduling preference: shortest possible total cycles or lowest
3435   /// register usage.
3436   Sched::Preference SchedPreferenceInfo;
3437 
3438   /// The minimum alignment that any argument on the stack needs to have.
3439   Align MinStackArgumentAlignment;
3440 
3441   /// The minimum function alignment (used when optimizing for size, and to
3442   /// prevent explicitly provided alignment from leading to incorrect code).
3443   Align MinFunctionAlignment;
3444 
3445   /// The preferred function alignment (used when alignment unspecified and
3446   /// optimizing for speed).
3447   Align PrefFunctionAlignment;
3448 
3449   /// The preferred loop alignment (in log2 bot in bytes).
3450   Align PrefLoopAlignment;
3451   /// The maximum amount of bytes permitted to be emitted for alignment.
3452   unsigned MaxBytesForAlignment;
3453 
3454   /// Size in bits of the maximum atomics size the backend supports.
3455   /// Accesses larger than this will be expanded by AtomicExpandPass.
3456   unsigned MaxAtomicSizeInBitsSupported;
3457 
3458   /// Size in bits of the maximum div/rem size the backend supports.
3459   /// Larger operations will be expanded by ExpandLargeDivRem.
3460   unsigned MaxDivRemBitWidthSupported;
3461 
3462   /// Size in bits of the maximum larget fp convert size the backend
3463   /// supports. Larger operations will be expanded by ExpandLargeFPConvert.
3464   unsigned MaxLargeFPConvertBitWidthSupported;
3465 
3466   /// Size in bits of the minimum cmpxchg or ll/sc operation the
3467   /// backend supports.
3468   unsigned MinCmpXchgSizeInBits;
3469 
3470   /// This indicates if the target supports unaligned atomic operations.
3471   bool SupportsUnalignedAtomics;
3472 
3473   /// If set to a physical register, this specifies the register that
3474   /// llvm.savestack/llvm.restorestack should save and restore.
3475   Register StackPointerRegisterToSaveRestore;
3476 
3477   /// This indicates the default register class to use for each ValueType the
3478   /// target supports natively.
3479   const TargetRegisterClass *RegClassForVT[MVT::VALUETYPE_SIZE];
3480   uint16_t NumRegistersForVT[MVT::VALUETYPE_SIZE];
3481   MVT RegisterTypeForVT[MVT::VALUETYPE_SIZE];
3482 
3483   /// This indicates the "representative" register class to use for each
3484   /// ValueType the target supports natively. This information is used by the
3485   /// scheduler to track register pressure. By default, the representative
3486   /// register class is the largest legal super-reg register class of the
3487   /// register class of the specified type. e.g. On x86, i8, i16, and i32's
3488   /// representative class would be GR32.
3489   const TargetRegisterClass *RepRegClassForVT[MVT::VALUETYPE_SIZE] = {0};
3490 
3491   /// This indicates the "cost" of the "representative" register class for each
3492   /// ValueType. The cost is used by the scheduler to approximate register
3493   /// pressure.
3494   uint8_t RepRegClassCostForVT[MVT::VALUETYPE_SIZE];
3495 
3496   /// For any value types we are promoting or expanding, this contains the value
3497   /// type that we are changing to.  For Expanded types, this contains one step
3498   /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
3499   /// (e.g. i64 -> i16).  For types natively supported by the system, this holds
3500   /// the same type (e.g. i32 -> i32).
3501   MVT TransformToType[MVT::VALUETYPE_SIZE];
3502 
3503   /// For each operation and each value type, keep a LegalizeAction that
3504   /// indicates how instruction selection should deal with the operation.  Most
3505   /// operations are Legal (aka, supported natively by the target), but
3506   /// operations that are not should be described.  Note that operations on
3507   /// non-legal value types are not described here.
3508   LegalizeAction OpActions[MVT::VALUETYPE_SIZE][ISD::BUILTIN_OP_END];
3509 
3510   /// For each load extension type and each value type, keep a LegalizeAction
3511   /// that indicates how instruction selection should deal with a load of a
3512   /// specific value type and extension type. Uses 4-bits to store the action
3513   /// for each of the 4 load ext types.
3514   uint16_t LoadExtActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE];
3515 
3516   /// For each value type pair keep a LegalizeAction that indicates whether a
3517   /// truncating store of a specific value type and truncating type is legal.
3518   LegalizeAction TruncStoreActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE];
3519 
3520   /// For each indexed mode and each value type, keep a quad of LegalizeAction
3521   /// that indicates how instruction selection should deal with the load /
3522   /// store / maskedload / maskedstore.
3523   ///
3524   /// The first dimension is the value_type for the reference. The second
3525   /// dimension represents the various modes for load store.
3526   uint16_t IndexedModeActions[MVT::VALUETYPE_SIZE][ISD::LAST_INDEXED_MODE];
3527 
3528   /// For each condition code (ISD::CondCode) keep a LegalizeAction that
3529   /// indicates how instruction selection should deal with the condition code.
3530   ///
3531   /// Because each CC action takes up 4 bits, we need to have the array size be
3532   /// large enough to fit all of the value types. This can be done by rounding
3533   /// up the MVT::VALUETYPE_SIZE value to the next multiple of 8.
3534   uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::VALUETYPE_SIZE + 7) / 8];
3535 
3536   ValueTypeActionImpl ValueTypeActions;
3537 
3538 private:
3539   /// Targets can specify ISD nodes that they would like PerformDAGCombine
3540   /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
3541   /// array.
3542   unsigned char
3543   TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
3544 
3545   /// For operations that must be promoted to a specific type, this holds the
3546   /// destination type.  This map should be sparse, so don't hold it as an
3547   /// array.
3548   ///
3549   /// Targets add entries to this map with AddPromotedToType(..), clients access
3550   /// this with getTypeToPromoteTo(..).
3551   std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
3552     PromoteToType;
3553 
3554   /// Stores the name each libcall.
3555   const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL + 1];
3556 
3557   /// The ISD::CondCode that should be used to test the result of each of the
3558   /// comparison libcall against zero.
3559   ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
3560 
3561   /// Stores the CallingConv that should be used for each libcall.
3562   CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
3563 
3564   /// Set default libcall names and calling conventions.
3565   void InitLibcalls(const Triple &TT);
3566 
3567   /// The bits of IndexedModeActions used to store the legalisation actions
3568   /// We store the data as   | ML | MS |  L |  S | each taking 4 bits.
3569   enum IndexedModeActionsBits {
3570     IMAB_Store = 0,
3571     IMAB_Load = 4,
3572     IMAB_MaskedStore = 8,
3573     IMAB_MaskedLoad = 12
3574   };
3575 
setIndexedModeAction(unsigned IdxMode,MVT VT,unsigned Shift,LegalizeAction Action)3576   void setIndexedModeAction(unsigned IdxMode, MVT VT, unsigned Shift,
3577                             LegalizeAction Action) {
3578     assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
3579            (unsigned)Action < 0xf && "Table isn't big enough!");
3580     unsigned Ty = (unsigned)VT.SimpleTy;
3581     IndexedModeActions[Ty][IdxMode] &= ~(0xf << Shift);
3582     IndexedModeActions[Ty][IdxMode] |= ((uint16_t)Action) << Shift;
3583   }
3584 
getIndexedModeAction(unsigned IdxMode,MVT VT,unsigned Shift)3585   LegalizeAction getIndexedModeAction(unsigned IdxMode, MVT VT,
3586                                       unsigned Shift) const {
3587     assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
3588            "Table isn't big enough!");
3589     unsigned Ty = (unsigned)VT.SimpleTy;
3590     return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] >> Shift) & 0xf);
3591   }
3592 
3593 protected:
3594   /// Return true if the extension represented by \p I is free.
3595   /// \pre \p I is a sign, zero, or fp extension and
3596   ///      is[Z|FP]ExtFree of the related types is not true.
isExtFreeImpl(const Instruction * I)3597   virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
3598 
3599   /// Depth that GatherAllAliases should continue looking for chain
3600   /// dependencies when trying to find a more preferable chain. As an
3601   /// approximation, this should be more than the number of consecutive stores
3602   /// expected to be merged.
3603   unsigned GatherAllAliasesMaxDepth;
3604 
3605   /// \brief Specify maximum number of store instructions per memset call.
3606   ///
3607   /// When lowering \@llvm.memset this field specifies the maximum number of
3608   /// store operations that may be substituted for the call to memset. Targets
3609   /// must set this value based on the cost threshold for that target. Targets
3610   /// should assume that the memset will be done using as many of the largest
3611   /// store operations first, followed by smaller ones, if necessary, per
3612   /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
3613   /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
3614   /// store.  This only applies to setting a constant array of a constant size.
3615   unsigned MaxStoresPerMemset;
3616   /// Likewise for functions with the OptSize attribute.
3617   unsigned MaxStoresPerMemsetOptSize;
3618 
3619   /// \brief Specify maximum number of store instructions per memcpy call.
3620   ///
3621   /// When lowering \@llvm.memcpy this field specifies the maximum number of
3622   /// store operations that may be substituted for a call to memcpy. Targets
3623   /// must set this value based on the cost threshold for that target. Targets
3624   /// should assume that the memcpy will be done using as many of the largest
3625   /// store operations first, followed by smaller ones, if necessary, per
3626   /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
3627   /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
3628   /// and one 1-byte store. This only applies to copying a constant array of
3629   /// constant size.
3630   unsigned MaxStoresPerMemcpy;
3631   /// Likewise for functions with the OptSize attribute.
3632   unsigned MaxStoresPerMemcpyOptSize;
3633   /// \brief Specify max number of store instructions to glue in inlined memcpy.
3634   ///
3635   /// When memcpy is inlined based on MaxStoresPerMemcpy, specify maximum number
3636   /// of store instructions to keep together. This helps in pairing and
3637   //  vectorization later on.
3638   unsigned MaxGluedStoresPerMemcpy = 0;
3639 
3640   /// \brief Specify maximum number of load instructions per memcmp call.
3641   ///
3642   /// When lowering \@llvm.memcmp this field specifies the maximum number of
3643   /// pairs of load operations that may be substituted for a call to memcmp.
3644   /// Targets must set this value based on the cost threshold for that target.
3645   /// Targets should assume that the memcmp will be done using as many of the
3646   /// largest load operations first, followed by smaller ones, if necessary, per
3647   /// alignment restrictions. For example, loading 7 bytes on a 32-bit machine
3648   /// with 32-bit alignment would result in one 4-byte load, a one 2-byte load
3649   /// and one 1-byte load. This only applies to copying a constant array of
3650   /// constant size.
3651   unsigned MaxLoadsPerMemcmp;
3652   /// Likewise for functions with the OptSize attribute.
3653   unsigned MaxLoadsPerMemcmpOptSize;
3654 
3655   /// \brief Specify maximum number of store instructions per memmove call.
3656   ///
3657   /// When lowering \@llvm.memmove this field specifies the maximum number of
3658   /// store instructions that may be substituted for a call to memmove. Targets
3659   /// must set this value based on the cost threshold for that target. Targets
3660   /// should assume that the memmove will be done using as many of the largest
3661   /// store operations first, followed by smaller ones, if necessary, per
3662   /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
3663   /// with 8-bit alignment would result in nine 1-byte stores.  This only
3664   /// applies to copying a constant array of constant size.
3665   unsigned MaxStoresPerMemmove;
3666   /// Likewise for functions with the OptSize attribute.
3667   unsigned MaxStoresPerMemmoveOptSize;
3668 
3669   /// Tells the code generator that select is more expensive than a branch if
3670   /// the branch is usually predicted right.
3671   bool PredictableSelectIsExpensive;
3672 
3673   /// \see enableExtLdPromotion.
3674   bool EnableExtLdPromotion;
3675 
3676   /// Return true if the value types that can be represented by the specified
3677   /// register class are all legal.
3678   bool isLegalRC(const TargetRegisterInfo &TRI,
3679                  const TargetRegisterClass &RC) const;
3680 
3681   /// Replace/modify any TargetFrameIndex operands with a targte-dependent
3682   /// sequence of memory operands that is recognized by PrologEpilogInserter.
3683   MachineBasicBlock *emitPatchPoint(MachineInstr &MI,
3684                                     MachineBasicBlock *MBB) const;
3685 
3686   bool IsStrictFPEnabled;
3687 };
3688 
3689 /// This class defines information used to lower LLVM code to legal SelectionDAG
3690 /// operators that the target instruction selector can accept natively.
3691 ///
3692 /// This class also defines callbacks that targets must implement to lower
3693 /// target-specific constructs to SelectionDAG operators.
3694 class TargetLowering : public TargetLoweringBase {
3695 public:
3696   struct DAGCombinerInfo;
3697   struct MakeLibCallOptions;
3698 
3699   TargetLowering(const TargetLowering &) = delete;
3700   TargetLowering &operator=(const TargetLowering &) = delete;
3701 
3702   explicit TargetLowering(const TargetMachine &TM);
3703 
3704   bool isPositionIndependent() const;
3705 
isSDNodeSourceOfDivergence(const SDNode * N,FunctionLoweringInfo * FLI,UniformityInfo * UA)3706   virtual bool isSDNodeSourceOfDivergence(const SDNode *N,
3707                                           FunctionLoweringInfo *FLI,
3708                                           UniformityInfo *UA) const {
3709     return false;
3710   }
3711 
3712   // Lets target to control the following reassociation of operands: (op (op x,
3713   // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By
3714   // default consider profitable any case where N0 has single use.  This
3715   // behavior reflects the condition replaced by this target hook call in the
3716   // DAGCombiner.  Any particular target can implement its own heuristic to
3717   // restrict common combiner.
isReassocProfitable(SelectionDAG & DAG,SDValue N0,SDValue N1)3718   virtual bool isReassocProfitable(SelectionDAG &DAG, SDValue N0,
3719                                    SDValue N1) const {
3720     return N0.hasOneUse();
3721   }
3722 
3723   // Lets target to control the following reassociation of operands: (op (op x,
3724   // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By
3725   // default consider profitable any case where N0 has single use.  This
3726   // behavior reflects the condition replaced by this target hook call in the
3727   // combiner.  Any particular target can implement its own heuristic to
3728   // restrict common combiner.
isReassocProfitable(MachineRegisterInfo & MRI,Register N0,Register N1)3729   virtual bool isReassocProfitable(MachineRegisterInfo &MRI, Register N0,
3730                                    Register N1) const {
3731     return MRI.hasOneNonDBGUse(N0);
3732   }
3733 
isSDNodeAlwaysUniform(const SDNode * N)3734   virtual bool isSDNodeAlwaysUniform(const SDNode * N) const {
3735     return false;
3736   }
3737 
3738   /// Returns true by value, base pointer and offset pointer and addressing mode
3739   /// by reference if the node's address can be legally represented as
3740   /// pre-indexed load / store address.
getPreIndexedAddressParts(SDNode *,SDValue &,SDValue &,ISD::MemIndexedMode &,SelectionDAG &)3741   virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
3742                                          SDValue &/*Offset*/,
3743                                          ISD::MemIndexedMode &/*AM*/,
3744                                          SelectionDAG &/*DAG*/) const {
3745     return false;
3746   }
3747 
3748   /// Returns true by value, base pointer and offset pointer and addressing mode
3749   /// by reference if this node can be combined with a load / store to form a
3750   /// post-indexed load / store.
getPostIndexedAddressParts(SDNode *,SDNode *,SDValue &,SDValue &,ISD::MemIndexedMode &,SelectionDAG &)3751   virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
3752                                           SDValue &/*Base*/,
3753                                           SDValue &/*Offset*/,
3754                                           ISD::MemIndexedMode &/*AM*/,
3755                                           SelectionDAG &/*DAG*/) const {
3756     return false;
3757   }
3758 
3759   /// Returns true if the specified base+offset is a legal indexed addressing
3760   /// mode for this target. \p MI is the load or store instruction that is being
3761   /// considered for transformation.
isIndexingLegal(MachineInstr & MI,Register Base,Register Offset,bool IsPre,MachineRegisterInfo & MRI)3762   virtual bool isIndexingLegal(MachineInstr &MI, Register Base, Register Offset,
3763                                bool IsPre, MachineRegisterInfo &MRI) const {
3764     return false;
3765   }
3766 
3767   /// Return the entry encoding for a jump table in the current function.  The
3768   /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
3769   virtual unsigned getJumpTableEncoding() const;
3770 
3771   virtual const MCExpr *
LowerCustomJumpTableEntry(const MachineJumpTableInfo *,const MachineBasicBlock *,unsigned,MCContext &)3772   LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
3773                             const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
3774                             MCContext &/*Ctx*/) const {
3775     llvm_unreachable("Need to implement this hook if target has custom JTIs");
3776   }
3777 
3778   /// Returns relocation base for the given PIC jumptable.
3779   virtual SDValue getPICJumpTableRelocBase(SDValue Table,
3780                                            SelectionDAG &DAG) const;
3781 
3782   /// This returns the relocation base for the given PIC jumptable, the same as
3783   /// getPICJumpTableRelocBase, but as an MCExpr.
3784   virtual const MCExpr *
3785   getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
3786                                unsigned JTI, MCContext &Ctx) const;
3787 
3788   /// Return true if folding a constant offset with the given GlobalAddress is
3789   /// legal.  It is frequently not legal in PIC relocation models.
3790   virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
3791 
3792   /// On x86, return true if the operand with index OpNo is a CALL or JUMP
3793   /// instruction, which can use either a memory constraint or an address
3794   /// constraint. -fasm-blocks "__asm call foo" lowers to
3795   /// call void asm sideeffect inteldialect "call ${0:P}", "*m..."
3796   ///
3797   /// This function is used by a hack to choose the address constraint,
3798   /// lowering to a direct call.
3799   virtual bool
isInlineAsmTargetBranch(const SmallVectorImpl<StringRef> & AsmStrs,unsigned OpNo)3800   isInlineAsmTargetBranch(const SmallVectorImpl<StringRef> &AsmStrs,
3801                           unsigned OpNo) const {
3802     return false;
3803   }
3804 
3805   bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
3806                             SDValue &Chain) const;
3807 
3808   void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
3809                            SDValue &NewRHS, ISD::CondCode &CCCode,
3810                            const SDLoc &DL, const SDValue OldLHS,
3811                            const SDValue OldRHS) const;
3812 
3813   void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
3814                            SDValue &NewRHS, ISD::CondCode &CCCode,
3815                            const SDLoc &DL, const SDValue OldLHS,
3816                            const SDValue OldRHS, SDValue &Chain,
3817                            bool IsSignaling = false) const;
3818 
3819   /// Returns a pair of (return value, chain).
3820   /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
3821   std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
3822                                           EVT RetVT, ArrayRef<SDValue> Ops,
3823                                           MakeLibCallOptions CallOptions,
3824                                           const SDLoc &dl,
3825                                           SDValue Chain = SDValue()) const;
3826 
3827   /// Check whether parameters to a call that are passed in callee saved
3828   /// registers are the same as from the calling function.  This needs to be
3829   /// checked for tail call eligibility.
3830   bool parametersInCSRMatch(const MachineRegisterInfo &MRI,
3831       const uint32_t *CallerPreservedMask,
3832       const SmallVectorImpl<CCValAssign> &ArgLocs,
3833       const SmallVectorImpl<SDValue> &OutVals) const;
3834 
3835   //===--------------------------------------------------------------------===//
3836   // TargetLowering Optimization Methods
3837   //
3838 
3839   /// A convenience struct that encapsulates a DAG, and two SDValues for
3840   /// returning information from TargetLowering to its clients that want to
3841   /// combine.
3842   struct TargetLoweringOpt {
3843     SelectionDAG &DAG;
3844     bool LegalTys;
3845     bool LegalOps;
3846     SDValue Old;
3847     SDValue New;
3848 
TargetLoweringOptTargetLoweringOpt3849     explicit TargetLoweringOpt(SelectionDAG &InDAG,
3850                                bool LT, bool LO) :
3851       DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
3852 
LegalTypesTargetLoweringOpt3853     bool LegalTypes() const { return LegalTys; }
LegalOperationsTargetLoweringOpt3854     bool LegalOperations() const { return LegalOps; }
3855 
CombineToTargetLoweringOpt3856     bool CombineTo(SDValue O, SDValue N) {
3857       Old = O;
3858       New = N;
3859       return true;
3860     }
3861   };
3862 
3863   /// Determines the optimal series of memory ops to replace the memset / memcpy.
3864   /// Return true if the number of memory ops is below the threshold (Limit).
3865   /// Note that this is always the case when Limit is ~0.
3866   /// It returns the types of the sequence of memory ops to perform
3867   /// memset / memcpy by reference.
3868   virtual bool
3869   findOptimalMemOpLowering(std::vector<EVT> &MemOps, unsigned Limit,
3870                            const MemOp &Op, unsigned DstAS, unsigned SrcAS,
3871                            const AttributeList &FuncAttributes) const;
3872 
3873   /// Check to see if the specified operand of the specified instruction is a
3874   /// constant integer.  If so, check to see if there are any bits set in the
3875   /// constant that are not demanded.  If so, shrink the constant and return
3876   /// true.
3877   bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
3878                               const APInt &DemandedElts,
3879                               TargetLoweringOpt &TLO) const;
3880 
3881   /// Helper wrapper around ShrinkDemandedConstant, demanding all elements.
3882   bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
3883                               TargetLoweringOpt &TLO) const;
3884 
3885   // Target hook to do target-specific const optimization, which is called by
3886   // ShrinkDemandedConstant. This function should return true if the target
3887   // doesn't want ShrinkDemandedConstant to further optimize the constant.
targetShrinkDemandedConstant(SDValue Op,const APInt & DemandedBits,const APInt & DemandedElts,TargetLoweringOpt & TLO)3888   virtual bool targetShrinkDemandedConstant(SDValue Op,
3889                                             const APInt &DemandedBits,
3890                                             const APInt &DemandedElts,
3891                                             TargetLoweringOpt &TLO) const {
3892     return false;
3893   }
3894 
3895   /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
3896   /// This uses isTruncateFree/isZExtFree and ANY_EXTEND for the widening cast,
3897   /// but it could be generalized for targets with other types of implicit
3898   /// widening casts.
3899   bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth,
3900                         const APInt &DemandedBits,
3901                         TargetLoweringOpt &TLO) const;
3902 
3903   /// Look at Op.  At this point, we know that only the DemandedBits bits of the
3904   /// result of Op are ever used downstream.  If we can use this information to
3905   /// simplify Op, create a new simplified DAG node and return true, returning
3906   /// the original and new nodes in Old and New.  Otherwise, analyze the
3907   /// expression and return a mask of KnownOne and KnownZero bits for the
3908   /// expression (used to simplify the caller).  The KnownZero/One bits may only
3909   /// be accurate for those bits in the Demanded masks.
3910   /// \p AssumeSingleUse When this parameter is true, this function will
3911   ///    attempt to simplify \p Op even if there are multiple uses.
3912   ///    Callers are responsible for correctly updating the DAG based on the
3913   ///    results of this function, because simply replacing TLO.Old
3914   ///    with TLO.New will be incorrect when this parameter is true and TLO.Old
3915   ///    has multiple uses.
3916   bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
3917                             const APInt &DemandedElts, KnownBits &Known,
3918                             TargetLoweringOpt &TLO, unsigned Depth = 0,
3919                             bool AssumeSingleUse = false) const;
3920 
3921   /// Helper wrapper around SimplifyDemandedBits, demanding all elements.
3922   /// Adds Op back to the worklist upon success.
3923   bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
3924                             KnownBits &Known, TargetLoweringOpt &TLO,
3925                             unsigned Depth = 0,
3926                             bool AssumeSingleUse = false) const;
3927 
3928   /// Helper wrapper around SimplifyDemandedBits.
3929   /// Adds Op back to the worklist upon success.
3930   bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
3931                             DAGCombinerInfo &DCI) const;
3932 
3933   /// Helper wrapper around SimplifyDemandedBits.
3934   /// Adds Op back to the worklist upon success.
3935   bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
3936                             const APInt &DemandedElts,
3937                             DAGCombinerInfo &DCI) const;
3938 
3939   /// More limited version of SimplifyDemandedBits that can be used to "look
3940   /// through" ops that don't contribute to the DemandedBits/DemandedElts -
3941   /// bitwise ops etc.
3942   SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits,
3943                                           const APInt &DemandedElts,
3944                                           SelectionDAG &DAG,
3945                                           unsigned Depth = 0) const;
3946 
3947   /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all
3948   /// elements.
3949   SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits,
3950                                           SelectionDAG &DAG,
3951                                           unsigned Depth = 0) const;
3952 
3953   /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all
3954   /// bits from only some vector elements.
3955   SDValue SimplifyMultipleUseDemandedVectorElts(SDValue Op,
3956                                                 const APInt &DemandedElts,
3957                                                 SelectionDAG &DAG,
3958                                                 unsigned Depth = 0) const;
3959 
3960   /// Look at Vector Op. At this point, we know that only the DemandedElts
3961   /// elements of the result of Op are ever used downstream.  If we can use
3962   /// this information to simplify Op, create a new simplified DAG node and
3963   /// return true, storing the original and new nodes in TLO.
3964   /// Otherwise, analyze the expression and return a mask of KnownUndef and
3965   /// KnownZero elements for the expression (used to simplify the caller).
3966   /// The KnownUndef/Zero elements may only be accurate for those bits
3967   /// in the DemandedMask.
3968   /// \p AssumeSingleUse When this parameter is true, this function will
3969   ///    attempt to simplify \p Op even if there are multiple uses.
3970   ///    Callers are responsible for correctly updating the DAG based on the
3971   ///    results of this function, because simply replacing TLO.Old
3972   ///    with TLO.New will be incorrect when this parameter is true and TLO.Old
3973   ///    has multiple uses.
3974   bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask,
3975                                   APInt &KnownUndef, APInt &KnownZero,
3976                                   TargetLoweringOpt &TLO, unsigned Depth = 0,
3977                                   bool AssumeSingleUse = false) const;
3978 
3979   /// Helper wrapper around SimplifyDemandedVectorElts.
3980   /// Adds Op back to the worklist upon success.
3981   bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts,
3982                                   DAGCombinerInfo &DCI) const;
3983 
3984   /// Return true if the target supports simplifying demanded vector elements by
3985   /// converting them to undefs.
3986   virtual bool
shouldSimplifyDemandedVectorElts(SDValue Op,const TargetLoweringOpt & TLO)3987   shouldSimplifyDemandedVectorElts(SDValue Op,
3988                                    const TargetLoweringOpt &TLO) const {
3989     return true;
3990   }
3991 
3992   /// Determine which of the bits specified in Mask are known to be either zero
3993   /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
3994   /// argument allows us to only collect the known bits that are shared by the
3995   /// requested vector elements.
3996   virtual void computeKnownBitsForTargetNode(const SDValue Op,
3997                                              KnownBits &Known,
3998                                              const APInt &DemandedElts,
3999                                              const SelectionDAG &DAG,
4000                                              unsigned Depth = 0) const;
4001 
4002   /// Determine which of the bits specified in Mask are known to be either zero
4003   /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
4004   /// argument allows us to only collect the known bits that are shared by the
4005   /// requested vector elements. This is for GISel.
4006   virtual void computeKnownBitsForTargetInstr(GISelKnownBits &Analysis,
4007                                               Register R, KnownBits &Known,
4008                                               const APInt &DemandedElts,
4009                                               const MachineRegisterInfo &MRI,
4010                                               unsigned Depth = 0) const;
4011 
4012   /// Determine the known alignment for the pointer value \p R. This is can
4013   /// typically be inferred from the number of low known 0 bits. However, for a
4014   /// pointer with a non-integral address space, the alignment value may be
4015   /// independent from the known low bits.
4016   virtual Align computeKnownAlignForTargetInstr(GISelKnownBits &Analysis,
4017                                                 Register R,
4018                                                 const MachineRegisterInfo &MRI,
4019                                                 unsigned Depth = 0) const;
4020 
4021   /// Determine which of the bits of FrameIndex \p FIOp are known to be 0.
4022   /// Default implementation computes low bits based on alignment
4023   /// information. This should preserve known bits passed into it.
4024   virtual void computeKnownBitsForFrameIndex(int FIOp,
4025                                              KnownBits &Known,
4026                                              const MachineFunction &MF) const;
4027 
4028   /// This method can be implemented by targets that want to expose additional
4029   /// information about sign bits to the DAG Combiner. The DemandedElts
4030   /// argument allows us to only collect the minimum sign bits that are shared
4031   /// by the requested vector elements.
4032   virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
4033                                                    const APInt &DemandedElts,
4034                                                    const SelectionDAG &DAG,
4035                                                    unsigned Depth = 0) const;
4036 
4037   /// This method can be implemented by targets that want to expose additional
4038   /// information about sign bits to GlobalISel combiners. The DemandedElts
4039   /// argument allows us to only collect the minimum sign bits that are shared
4040   /// by the requested vector elements.
4041   virtual unsigned computeNumSignBitsForTargetInstr(GISelKnownBits &Analysis,
4042                                                     Register R,
4043                                                     const APInt &DemandedElts,
4044                                                     const MachineRegisterInfo &MRI,
4045                                                     unsigned Depth = 0) const;
4046 
4047   /// Attempt to simplify any target nodes based on the demanded vector
4048   /// elements, returning true on success. Otherwise, analyze the expression and
4049   /// return a mask of KnownUndef and KnownZero elements for the expression
4050   /// (used to simplify the caller). The KnownUndef/Zero elements may only be
4051   /// accurate for those bits in the DemandedMask.
4052   virtual bool SimplifyDemandedVectorEltsForTargetNode(
4053       SDValue Op, const APInt &DemandedElts, APInt &KnownUndef,
4054       APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth = 0) const;
4055 
4056   /// Attempt to simplify any target nodes based on the demanded bits/elts,
4057   /// returning true on success. Otherwise, analyze the
4058   /// expression and return a mask of KnownOne and KnownZero bits for the
4059   /// expression (used to simplify the caller).  The KnownZero/One bits may only
4060   /// be accurate for those bits in the Demanded masks.
4061   virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op,
4062                                                  const APInt &DemandedBits,
4063                                                  const APInt &DemandedElts,
4064                                                  KnownBits &Known,
4065                                                  TargetLoweringOpt &TLO,
4066                                                  unsigned Depth = 0) const;
4067 
4068   /// More limited version of SimplifyDemandedBits that can be used to "look
4069   /// through" ops that don't contribute to the DemandedBits/DemandedElts -
4070   /// bitwise ops etc.
4071   virtual SDValue SimplifyMultipleUseDemandedBitsForTargetNode(
4072       SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
4073       SelectionDAG &DAG, unsigned Depth) const;
4074 
4075   /// Return true if this function can prove that \p Op is never poison
4076   /// and, if \p PoisonOnly is false, does not have undef bits. The DemandedElts
4077   /// argument limits the check to the requested vector elements.
4078   virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(
4079       SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
4080       bool PoisonOnly, unsigned Depth) const;
4081 
4082   /// Return true if Op can create undef or poison from non-undef & non-poison
4083   /// operands. The DemandedElts argument limits the check to the requested
4084   /// vector elements.
4085   virtual bool
4086   canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts,
4087                                       const SelectionDAG &DAG, bool PoisonOnly,
4088                                       bool ConsiderFlags, unsigned Depth) const;
4089 
4090   /// Tries to build a legal vector shuffle using the provided parameters
4091   /// or equivalent variations. The Mask argument maybe be modified as the
4092   /// function tries different variations.
4093   /// Returns an empty SDValue if the operation fails.
4094   SDValue buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0,
4095                                   SDValue N1, MutableArrayRef<int> Mask,
4096                                   SelectionDAG &DAG) const;
4097 
4098   /// This method returns the constant pool value that will be loaded by LD.
4099   /// NOTE: You must check for implicit extensions of the constant by LD.
4100   virtual const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const;
4101 
4102   /// If \p SNaN is false, \returns true if \p Op is known to never be any
4103   /// NaN. If \p sNaN is true, returns if \p Op is known to never be a signaling
4104   /// NaN.
4105   virtual bool isKnownNeverNaNForTargetNode(SDValue Op,
4106                                             const SelectionDAG &DAG,
4107                                             bool SNaN = false,
4108                                             unsigned Depth = 0) const;
4109 
4110   /// Return true if vector \p Op has the same value across all \p DemandedElts,
4111   /// indicating any elements which may be undef in the output \p UndefElts.
4112   virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts,
4113                                          APInt &UndefElts,
4114                                          const SelectionDAG &DAG,
4115                                          unsigned Depth = 0) const;
4116 
4117   /// Returns true if the given Opc is considered a canonical constant for the
4118   /// target, which should not be transformed back into a BUILD_VECTOR.
isTargetCanonicalConstantNode(SDValue Op)4119   virtual bool isTargetCanonicalConstantNode(SDValue Op) const {
4120     return Op.getOpcode() == ISD::SPLAT_VECTOR ||
4121            Op.getOpcode() == ISD::SPLAT_VECTOR_PARTS;
4122   }
4123 
4124   struct DAGCombinerInfo {
4125     void *DC;  // The DAG Combiner object.
4126     CombineLevel Level;
4127     bool CalledByLegalizer;
4128 
4129   public:
4130     SelectionDAG &DAG;
4131 
DAGCombinerInfoDAGCombinerInfo4132     DAGCombinerInfo(SelectionDAG &dag, CombineLevel level,  bool cl, void *dc)
4133       : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
4134 
isBeforeLegalizeDAGCombinerInfo4135     bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
isBeforeLegalizeOpsDAGCombinerInfo4136     bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; }
isAfterLegalizeDAGDAGCombinerInfo4137     bool isAfterLegalizeDAG() const { return Level >= AfterLegalizeDAG; }
getDAGCombineLevelDAGCombinerInfo4138     CombineLevel getDAGCombineLevel() { return Level; }
isCalledByLegalizerDAGCombinerInfo4139     bool isCalledByLegalizer() const { return CalledByLegalizer; }
4140 
4141     void AddToWorklist(SDNode *N);
4142     SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true);
4143     SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
4144     SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
4145 
4146     bool recursivelyDeleteUnusedNodes(SDNode *N);
4147 
4148     void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
4149   };
4150 
4151   /// Return if the N is a constant or constant vector equal to the true value
4152   /// from getBooleanContents().
4153   bool isConstTrueVal(SDValue N) const;
4154 
4155   /// Return if the N is a constant or constant vector equal to the false value
4156   /// from getBooleanContents().
4157   bool isConstFalseVal(SDValue N) const;
4158 
4159   /// Return if \p N is a True value when extended to \p VT.
4160   bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const;
4161 
4162   /// Try to simplify a setcc built with the specified operands and cc. If it is
4163   /// unable to simplify it, return a null SDValue.
4164   SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
4165                         bool foldBooleans, DAGCombinerInfo &DCI,
4166                         const SDLoc &dl) const;
4167 
4168   // For targets which wrap address, unwrap for analysis.
unwrapAddress(SDValue N)4169   virtual SDValue unwrapAddress(SDValue N) const { return N; }
4170 
4171   /// Returns true (and the GlobalValue and the offset) if the node is a
4172   /// GlobalAddress + offset.
4173   virtual bool
4174   isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
4175 
4176   /// This method will be invoked for all target nodes and for any
4177   /// target-independent nodes that the target has registered with invoke it
4178   /// for.
4179   ///
4180   /// The semantics are as follows:
4181   /// Return Value:
4182   ///   SDValue.Val == 0   - No change was made
4183   ///   SDValue.Val == N   - N was replaced, is dead, and is already handled.
4184   ///   otherwise          - N should be replaced by the returned Operand.
4185   ///
4186   /// In addition, methods provided by DAGCombinerInfo may be used to perform
4187   /// more complex transformations.
4188   ///
4189   virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
4190 
4191   /// Return true if it is profitable to move this shift by a constant amount
4192   /// through its operand, adjusting any immediate operands as necessary to
4193   /// preserve semantics. This transformation may not be desirable if it
4194   /// disrupts a particularly auspicious target-specific tree (e.g. bitfield
4195   /// extraction in AArch64). By default, it returns true.
4196   ///
4197   /// @param N the shift node
4198   /// @param Level the current DAGCombine legalization level.
isDesirableToCommuteWithShift(const SDNode * N,CombineLevel Level)4199   virtual bool isDesirableToCommuteWithShift(const SDNode *N,
4200                                              CombineLevel Level) const {
4201     return true;
4202   }
4203 
4204   /// GlobalISel - return true if it is profitable to move this shift by a
4205   /// constant amount through its operand, adjusting any immediate operands as
4206   /// necessary to preserve semantics. This transformation may not be desirable
4207   /// if it disrupts a particularly auspicious target-specific tree (e.g.
4208   /// bitfield extraction in AArch64). By default, it returns true.
4209   ///
4210   /// @param MI the shift instruction
4211   /// @param IsAfterLegal true if running after legalization.
isDesirableToCommuteWithShift(const MachineInstr & MI,bool IsAfterLegal)4212   virtual bool isDesirableToCommuteWithShift(const MachineInstr &MI,
4213                                              bool IsAfterLegal) const {
4214     return true;
4215   }
4216 
4217   /// GlobalISel - return true if it's profitable to perform the combine:
4218   /// shl ([sza]ext x), y => zext (shl x, y)
isDesirableToPullExtFromShl(const MachineInstr & MI)4219   virtual bool isDesirableToPullExtFromShl(const MachineInstr &MI) const {
4220     return true;
4221   }
4222 
4223   // Return AndOrSETCCFoldKind::{AddAnd, ABS} if its desirable to try and
4224   // optimize LogicOp(SETCC0, SETCC1). An example (what is implemented as of
4225   // writing this) is:
4226   //    With C as a power of 2 and C != 0 and C != INT_MIN:
4227   //    AddAnd:
4228   //     (icmp eq A, C) | (icmp eq A, -C)
4229   //            -> (icmp eq and(add(A, C), ~(C + C)), 0)
4230   //     (icmp ne A, C) & (icmp ne A, -C)w
4231   //            -> (icmp ne and(add(A, C), ~(C + C)), 0)
4232   //    ABS:
4233   //     (icmp eq A, C) | (icmp eq A, -C)
4234   //            -> (icmp eq Abs(A), C)
4235   //     (icmp ne A, C) & (icmp ne A, -C)w
4236   //            -> (icmp ne Abs(A), C)
4237   //
4238   // @param LogicOp the logic op
4239   // @param SETCC0 the first of the SETCC nodes
4240   // @param SETCC0 the second of the SETCC nodes
isDesirableToCombineLogicOpOfSETCC(const SDNode * LogicOp,const SDNode * SETCC0,const SDNode * SETCC1)4241   virtual AndOrSETCCFoldKind isDesirableToCombineLogicOpOfSETCC(
4242       const SDNode *LogicOp, const SDNode *SETCC0, const SDNode *SETCC1) const {
4243     return AndOrSETCCFoldKind::None;
4244   }
4245 
4246   /// Return true if it is profitable to combine an XOR of a logical shift
4247   /// to create a logical shift of NOT. This transformation may not be desirable
4248   /// if it disrupts a particularly auspicious target-specific tree (e.g.
4249   /// BIC on ARM/AArch64). By default, it returns true.
isDesirableToCommuteXorWithShift(const SDNode * N)4250   virtual bool isDesirableToCommuteXorWithShift(const SDNode *N) const {
4251     return true;
4252   }
4253 
4254   /// Return true if the target has native support for the specified value type
4255   /// and it is 'desirable' to use the type for the given node type. e.g. On x86
4256   /// i16 is legal, but undesirable since i16 instruction encodings are longer
4257   /// and some i16 instructions are slow.
isTypeDesirableForOp(unsigned,EVT VT)4258   virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
4259     // By default, assume all legal types are desirable.
4260     return isTypeLegal(VT);
4261   }
4262 
4263   /// Return true if it is profitable for dag combiner to transform a floating
4264   /// point op of specified opcode to a equivalent op of an integer
4265   /// type. e.g. f32 load -> i32 load can be profitable on ARM.
isDesirableToTransformToIntegerOp(unsigned,EVT)4266   virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
4267                                                  EVT /*VT*/) const {
4268     return false;
4269   }
4270 
4271   /// This method query the target whether it is beneficial for dag combiner to
4272   /// promote the specified node. If true, it should return the desired
4273   /// promotion type by reference.
IsDesirableToPromoteOp(SDValue,EVT &)4274   virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
4275     return false;
4276   }
4277 
4278   /// Return true if the target supports swifterror attribute. It optimizes
4279   /// loads and stores to reading and writing a specific register.
supportSwiftError()4280   virtual bool supportSwiftError() const {
4281     return false;
4282   }
4283 
4284   /// Return true if the target supports that a subset of CSRs for the given
4285   /// machine function is handled explicitly via copies.
supportSplitCSR(MachineFunction * MF)4286   virtual bool supportSplitCSR(MachineFunction *MF) const {
4287     return false;
4288   }
4289 
4290   /// Return true if the target supports kcfi operand bundles.
supportKCFIBundles()4291   virtual bool supportKCFIBundles() const { return false; }
4292 
4293   /// Perform necessary initialization to handle a subset of CSRs explicitly
4294   /// via copies. This function is called at the beginning of instruction
4295   /// selection.
initializeSplitCSR(MachineBasicBlock * Entry)4296   virtual void initializeSplitCSR(MachineBasicBlock *Entry) const {
4297     llvm_unreachable("Not Implemented");
4298   }
4299 
4300   /// Insert explicit copies in entry and exit blocks. We copy a subset of
4301   /// CSRs to virtual registers in the entry block, and copy them back to
4302   /// physical registers in the exit blocks. This function is called at the end
4303   /// of instruction selection.
insertCopiesSplitCSR(MachineBasicBlock * Entry,const SmallVectorImpl<MachineBasicBlock * > & Exits)4304   virtual void insertCopiesSplitCSR(
4305       MachineBasicBlock *Entry,
4306       const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
4307     llvm_unreachable("Not Implemented");
4308   }
4309 
4310   /// Return the newly negated expression if the cost is not expensive and
4311   /// set the cost in \p Cost to indicate that if it is cheaper or neutral to
4312   /// do the negation.
4313   virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG,
4314                                        bool LegalOps, bool OptForSize,
4315                                        NegatibleCost &Cost,
4316                                        unsigned Depth = 0) const;
4317 
4318   SDValue getCheaperOrNeutralNegatedExpression(
4319       SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize,
4320       const NegatibleCost CostThreshold = NegatibleCost::Neutral,
4321       unsigned Depth = 0) const {
4322     NegatibleCost Cost = NegatibleCost::Expensive;
4323     SDValue Neg =
4324         getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth);
4325     if (!Neg)
4326       return SDValue();
4327 
4328     if (Cost <= CostThreshold)
4329       return Neg;
4330 
4331     // Remove the new created node to avoid the side effect to the DAG.
4332     if (Neg->use_empty())
4333       DAG.RemoveDeadNode(Neg.getNode());
4334     return SDValue();
4335   }
4336 
4337   /// This is the helper function to return the newly negated expression only
4338   /// when the cost is cheaper.
4339   SDValue getCheaperNegatedExpression(SDValue Op, SelectionDAG &DAG,
4340                                       bool LegalOps, bool OptForSize,
4341                                       unsigned Depth = 0) const {
4342     return getCheaperOrNeutralNegatedExpression(Op, DAG, LegalOps, OptForSize,
4343                                                 NegatibleCost::Cheaper, Depth);
4344   }
4345 
4346   /// This is the helper function to return the newly negated expression if
4347   /// the cost is not expensive.
4348   SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps,
4349                                bool OptForSize, unsigned Depth = 0) const {
4350     NegatibleCost Cost = NegatibleCost::Expensive;
4351     return getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth);
4352   }
4353 
4354   //===--------------------------------------------------------------------===//
4355   // Lowering methods - These methods must be implemented by targets so that
4356   // the SelectionDAGBuilder code knows how to lower these.
4357   //
4358 
4359   /// Target-specific splitting of values into parts that fit a register
4360   /// storing a legal type
splitValueIntoRegisterParts(SelectionDAG & DAG,const SDLoc & DL,SDValue Val,SDValue * Parts,unsigned NumParts,MVT PartVT,std::optional<CallingConv::ID> CC)4361   virtual bool splitValueIntoRegisterParts(
4362       SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
4363       unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {
4364     return false;
4365   }
4366 
4367   /// Allows the target to handle physreg-carried dependency
4368   /// in target-specific way. Used from the ScheduleDAGSDNodes to decide whether
4369   /// to add the edge to the dependency graph.
4370   /// Def - input: Selection DAG node defininfg physical register
4371   /// User - input: Selection DAG node using physical register
4372   /// Op - input: Number of User operand
4373   /// PhysReg - inout: set to the physical register if the edge is
4374   /// necessary, unchanged otherwise
4375   /// Cost - inout: physical register copy cost.
4376   /// Returns 'true' is the edge is necessary, 'false' otherwise
checkForPhysRegDependency(SDNode * Def,SDNode * User,unsigned Op,const TargetRegisterInfo * TRI,const TargetInstrInfo * TII,unsigned & PhysReg,int & Cost)4377   virtual bool checkForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
4378                                          const TargetRegisterInfo *TRI,
4379                                          const TargetInstrInfo *TII,
4380                                          unsigned &PhysReg, int &Cost) const {
4381     return false;
4382   }
4383 
4384   /// Target-specific combining of register parts into its original value
4385   virtual SDValue
joinRegisterPartsIntoValue(SelectionDAG & DAG,const SDLoc & DL,const SDValue * Parts,unsigned NumParts,MVT PartVT,EVT ValueVT,std::optional<CallingConv::ID> CC)4386   joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL,
4387                              const SDValue *Parts, unsigned NumParts,
4388                              MVT PartVT, EVT ValueVT,
4389                              std::optional<CallingConv::ID> CC) const {
4390     return SDValue();
4391   }
4392 
4393   /// This hook must be implemented to lower the incoming (formal) arguments,
4394   /// described by the Ins array, into the specified DAG. The implementation
4395   /// should fill in the InVals array with legal-type argument values, and
4396   /// return the resulting token chain value.
LowerFormalArguments(SDValue,CallingConv::ID,bool,const SmallVectorImpl<ISD::InputArg> &,const SDLoc &,SelectionDAG &,SmallVectorImpl<SDValue> &)4397   virtual SDValue LowerFormalArguments(
4398       SDValue /*Chain*/, CallingConv::ID /*CallConv*/, bool /*isVarArg*/,
4399       const SmallVectorImpl<ISD::InputArg> & /*Ins*/, const SDLoc & /*dl*/,
4400       SelectionDAG & /*DAG*/, SmallVectorImpl<SDValue> & /*InVals*/) const {
4401     llvm_unreachable("Not Implemented");
4402   }
4403 
4404   /// This structure contains all information that is necessary for lowering
4405   /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
4406   /// needs to lower a call, and targets will see this struct in their LowerCall
4407   /// implementation.
4408   struct CallLoweringInfo {
4409     SDValue Chain;
4410     Type *RetTy = nullptr;
4411     bool RetSExt           : 1;
4412     bool RetZExt           : 1;
4413     bool IsVarArg          : 1;
4414     bool IsInReg           : 1;
4415     bool DoesNotReturn     : 1;
4416     bool IsReturnValueUsed : 1;
4417     bool IsConvergent      : 1;
4418     bool IsPatchPoint      : 1;
4419     bool IsPreallocated : 1;
4420     bool NoMerge           : 1;
4421 
4422     // IsTailCall should be modified by implementations of
4423     // TargetLowering::LowerCall that perform tail call conversions.
4424     bool IsTailCall = false;
4425 
4426     // Is Call lowering done post SelectionDAG type legalization.
4427     bool IsPostTypeLegalization = false;
4428 
4429     unsigned NumFixedArgs = -1;
4430     CallingConv::ID CallConv = CallingConv::C;
4431     SDValue Callee;
4432     ArgListTy Args;
4433     SelectionDAG &DAG;
4434     SDLoc DL;
4435     const CallBase *CB = nullptr;
4436     SmallVector<ISD::OutputArg, 32> Outs;
4437     SmallVector<SDValue, 32> OutVals;
4438     SmallVector<ISD::InputArg, 32> Ins;
4439     SmallVector<SDValue, 4> InVals;
4440     const ConstantInt *CFIType = nullptr;
4441     SDValue ConvergenceControlToken;
4442 
CallLoweringInfoCallLoweringInfo4443     CallLoweringInfo(SelectionDAG &DAG)
4444         : RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false),
4445           DoesNotReturn(false), IsReturnValueUsed(true), IsConvergent(false),
4446           IsPatchPoint(false), IsPreallocated(false), NoMerge(false),
4447           DAG(DAG) {}
4448 
setDebugLocCallLoweringInfo4449     CallLoweringInfo &setDebugLoc(const SDLoc &dl) {
4450       DL = dl;
4451       return *this;
4452     }
4453 
setChainCallLoweringInfo4454     CallLoweringInfo &setChain(SDValue InChain) {
4455       Chain = InChain;
4456       return *this;
4457     }
4458 
4459     // setCallee with target/module-specific attributes
setLibCalleeCallLoweringInfo4460     CallLoweringInfo &setLibCallee(CallingConv::ID CC, Type *ResultType,
4461                                    SDValue Target, ArgListTy &&ArgsList) {
4462       RetTy = ResultType;
4463       Callee = Target;
4464       CallConv = CC;
4465       NumFixedArgs = ArgsList.size();
4466       Args = std::move(ArgsList);
4467 
4468       DAG.getTargetLoweringInfo().markLibCallAttributes(
4469           &(DAG.getMachineFunction()), CC, Args);
4470       return *this;
4471     }
4472 
4473     CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType,
4474                                 SDValue Target, ArgListTy &&ArgsList,
4475                                 AttributeSet ResultAttrs = {}) {
4476       RetTy = ResultType;
4477       IsInReg = ResultAttrs.hasAttribute(Attribute::InReg);
4478       RetSExt = ResultAttrs.hasAttribute(Attribute::SExt);
4479       RetZExt = ResultAttrs.hasAttribute(Attribute::ZExt);
4480       NoMerge = ResultAttrs.hasAttribute(Attribute::NoMerge);
4481 
4482       Callee = Target;
4483       CallConv = CC;
4484       NumFixedArgs = ArgsList.size();
4485       Args = std::move(ArgsList);
4486       return *this;
4487     }
4488 
setCalleeCallLoweringInfo4489     CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy,
4490                                 SDValue Target, ArgListTy &&ArgsList,
4491                                 const CallBase &Call) {
4492       RetTy = ResultType;
4493 
4494       IsInReg = Call.hasRetAttr(Attribute::InReg);
4495       DoesNotReturn =
4496           Call.doesNotReturn() ||
4497           (!isa<InvokeInst>(Call) && isa<UnreachableInst>(Call.getNextNode()));
4498       IsVarArg = FTy->isVarArg();
4499       IsReturnValueUsed = !Call.use_empty();
4500       RetSExt = Call.hasRetAttr(Attribute::SExt);
4501       RetZExt = Call.hasRetAttr(Attribute::ZExt);
4502       NoMerge = Call.hasFnAttr(Attribute::NoMerge);
4503 
4504       Callee = Target;
4505 
4506       CallConv = Call.getCallingConv();
4507       NumFixedArgs = FTy->getNumParams();
4508       Args = std::move(ArgsList);
4509 
4510       CB = &Call;
4511 
4512       return *this;
4513     }
4514 
4515     CallLoweringInfo &setInRegister(bool Value = true) {
4516       IsInReg = Value;
4517       return *this;
4518     }
4519 
4520     CallLoweringInfo &setNoReturn(bool Value = true) {
4521       DoesNotReturn = Value;
4522       return *this;
4523     }
4524 
4525     CallLoweringInfo &setVarArg(bool Value = true) {
4526       IsVarArg = Value;
4527       return *this;
4528     }
4529 
4530     CallLoweringInfo &setTailCall(bool Value = true) {
4531       IsTailCall = Value;
4532       return *this;
4533     }
4534 
4535     CallLoweringInfo &setDiscardResult(bool Value = true) {
4536       IsReturnValueUsed = !Value;
4537       return *this;
4538     }
4539 
4540     CallLoweringInfo &setConvergent(bool Value = true) {
4541       IsConvergent = Value;
4542       return *this;
4543     }
4544 
4545     CallLoweringInfo &setSExtResult(bool Value = true) {
4546       RetSExt = Value;
4547       return *this;
4548     }
4549 
4550     CallLoweringInfo &setZExtResult(bool Value = true) {
4551       RetZExt = Value;
4552       return *this;
4553     }
4554 
4555     CallLoweringInfo &setIsPatchPoint(bool Value = true) {
4556       IsPatchPoint = Value;
4557       return *this;
4558     }
4559 
4560     CallLoweringInfo &setIsPreallocated(bool Value = true) {
4561       IsPreallocated = Value;
4562       return *this;
4563     }
4564 
4565     CallLoweringInfo &setIsPostTypeLegalization(bool Value=true) {
4566       IsPostTypeLegalization = Value;
4567       return *this;
4568     }
4569 
setCFITypeCallLoweringInfo4570     CallLoweringInfo &setCFIType(const ConstantInt *Type) {
4571       CFIType = Type;
4572       return *this;
4573     }
4574 
setConvergenceControlTokenCallLoweringInfo4575     CallLoweringInfo &setConvergenceControlToken(SDValue Token) {
4576       ConvergenceControlToken = Token;
4577       return *this;
4578     }
4579 
getArgsCallLoweringInfo4580     ArgListTy &getArgs() {
4581       return Args;
4582     }
4583   };
4584 
4585   /// This structure is used to pass arguments to makeLibCall function.
4586   struct MakeLibCallOptions {
4587     // By passing type list before soften to makeLibCall, the target hook
4588     // shouldExtendTypeInLibCall can get the original type before soften.
4589     ArrayRef<EVT> OpsVTBeforeSoften;
4590     EVT RetVTBeforeSoften;
4591     bool IsSExt : 1;
4592     bool DoesNotReturn : 1;
4593     bool IsReturnValueUsed : 1;
4594     bool IsPostTypeLegalization : 1;
4595     bool IsSoften : 1;
4596 
MakeLibCallOptionsMakeLibCallOptions4597     MakeLibCallOptions()
4598         : IsSExt(false), DoesNotReturn(false), IsReturnValueUsed(true),
4599           IsPostTypeLegalization(false), IsSoften(false) {}
4600 
4601     MakeLibCallOptions &setSExt(bool Value = true) {
4602       IsSExt = Value;
4603       return *this;
4604     }
4605 
4606     MakeLibCallOptions &setNoReturn(bool Value = true) {
4607       DoesNotReturn = Value;
4608       return *this;
4609     }
4610 
4611     MakeLibCallOptions &setDiscardResult(bool Value = true) {
4612       IsReturnValueUsed = !Value;
4613       return *this;
4614     }
4615 
4616     MakeLibCallOptions &setIsPostTypeLegalization(bool Value = true) {
4617       IsPostTypeLegalization = Value;
4618       return *this;
4619     }
4620 
4621     MakeLibCallOptions &setTypeListBeforeSoften(ArrayRef<EVT> OpsVT, EVT RetVT,
4622                                                 bool Value = true) {
4623       OpsVTBeforeSoften = OpsVT;
4624       RetVTBeforeSoften = RetVT;
4625       IsSoften = Value;
4626       return *this;
4627     }
4628   };
4629 
4630   /// This function lowers an abstract call to a function into an actual call.
4631   /// This returns a pair of operands.  The first element is the return value
4632   /// for the function (if RetTy is not VoidTy).  The second element is the
4633   /// outgoing token chain. It calls LowerCall to do the actual lowering.
4634   std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
4635 
4636   /// This hook must be implemented to lower calls into the specified
4637   /// DAG. The outgoing arguments to the call are described by the Outs array,
4638   /// and the values to be returned by the call are described by the Ins
4639   /// array. The implementation should fill in the InVals array with legal-type
4640   /// return values from the call, and return the resulting token chain value.
4641   virtual SDValue
LowerCall(CallLoweringInfo &,SmallVectorImpl<SDValue> &)4642     LowerCall(CallLoweringInfo &/*CLI*/,
4643               SmallVectorImpl<SDValue> &/*InVals*/) const {
4644     llvm_unreachable("Not Implemented");
4645   }
4646 
4647   /// Target-specific cleanup for formal ByVal parameters.
HandleByVal(CCState *,unsigned &,Align)4648   virtual void HandleByVal(CCState *, unsigned &, Align) const {}
4649 
4650   /// This hook should be implemented to check whether the return values
4651   /// described by the Outs array can fit into the return registers.  If false
4652   /// is returned, an sret-demotion is performed.
CanLowerReturn(CallingConv::ID,MachineFunction &,bool,const SmallVectorImpl<ISD::OutputArg> &,LLVMContext &)4653   virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
4654                               MachineFunction &/*MF*/, bool /*isVarArg*/,
4655                const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
4656                LLVMContext &/*Context*/) const
4657   {
4658     // Return true by default to get preexisting behavior.
4659     return true;
4660   }
4661 
4662   /// This hook must be implemented to lower outgoing return values, described
4663   /// by the Outs array, into the specified DAG. The implementation should
4664   /// return the resulting token chain value.
LowerReturn(SDValue,CallingConv::ID,bool,const SmallVectorImpl<ISD::OutputArg> &,const SmallVectorImpl<SDValue> &,const SDLoc &,SelectionDAG &)4665   virtual SDValue LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
4666                               bool /*isVarArg*/,
4667                               const SmallVectorImpl<ISD::OutputArg> & /*Outs*/,
4668                               const SmallVectorImpl<SDValue> & /*OutVals*/,
4669                               const SDLoc & /*dl*/,
4670                               SelectionDAG & /*DAG*/) const {
4671     llvm_unreachable("Not Implemented");
4672   }
4673 
4674   /// Return true if result of the specified node is used by a return node
4675   /// only. It also compute and return the input chain for the tail call.
4676   ///
4677   /// This is used to determine whether it is possible to codegen a libcall as
4678   /// tail call at legalization time.
isUsedByReturnOnly(SDNode *,SDValue &)4679   virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
4680     return false;
4681   }
4682 
4683   /// Return true if the target may be able emit the call instruction as a tail
4684   /// call. This is used by optimization passes to determine if it's profitable
4685   /// to duplicate return instructions to enable tailcall optimization.
mayBeEmittedAsTailCall(const CallInst *)4686   virtual bool mayBeEmittedAsTailCall(const CallInst *) const {
4687     return false;
4688   }
4689 
4690   /// Return the builtin name for the __builtin___clear_cache intrinsic
4691   /// Default is to invoke the clear cache library call
getClearCacheBuiltinName()4692   virtual const char * getClearCacheBuiltinName() const {
4693     return "__clear_cache";
4694   }
4695 
4696   /// Return the register ID of the name passed in. Used by named register
4697   /// global variables extension. There is no target-independent behaviour
4698   /// so the default action is to bail.
getRegisterByName(const char * RegName,LLT Ty,const MachineFunction & MF)4699   virtual Register getRegisterByName(const char* RegName, LLT Ty,
4700                                      const MachineFunction &MF) const {
4701     report_fatal_error("Named registers not implemented for this target");
4702   }
4703 
4704   /// Return the type that should be used to zero or sign extend a
4705   /// zeroext/signext integer return value.  FIXME: Some C calling conventions
4706   /// require the return type to be promoted, but this is not true all the time,
4707   /// e.g. i1/i8/i16 on x86/x86_64. It is also not necessary for non-C calling
4708   /// conventions. The frontend should handle this and include all of the
4709   /// necessary information.
getTypeForExtReturn(LLVMContext & Context,EVT VT,ISD::NodeType)4710   virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT,
4711                                        ISD::NodeType /*ExtendKind*/) const {
4712     EVT MinVT = getRegisterType(MVT::i32);
4713     return VT.bitsLT(MinVT) ? MinVT : VT;
4714   }
4715 
4716   /// For some targets, an LLVM struct type must be broken down into multiple
4717   /// simple types, but the calling convention specifies that the entire struct
4718   /// must be passed in a block of consecutive registers.
4719   virtual bool
functionArgumentNeedsConsecutiveRegisters(Type * Ty,CallingConv::ID CallConv,bool isVarArg,const DataLayout & DL)4720   functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv,
4721                                             bool isVarArg,
4722                                             const DataLayout &DL) const {
4723     return false;
4724   }
4725 
4726   /// For most targets, an LLVM type must be broken down into multiple
4727   /// smaller types. Usually the halves are ordered according to the endianness
4728   /// but for some platform that would break. So this method will default to
4729   /// matching the endianness but can be overridden.
4730   virtual bool
shouldSplitFunctionArgumentsAsLittleEndian(const DataLayout & DL)4731   shouldSplitFunctionArgumentsAsLittleEndian(const DataLayout &DL) const {
4732     return DL.isLittleEndian();
4733   }
4734 
4735   /// Returns a 0 terminated array of registers that can be safely used as
4736   /// scratch registers.
getScratchRegisters(CallingConv::ID CC)4737   virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const {
4738     return nullptr;
4739   }
4740 
4741   /// Returns a 0 terminated array of rounding control registers that can be
4742   /// attached into strict FP call.
getRoundingControlRegisters()4743   virtual ArrayRef<MCPhysReg> getRoundingControlRegisters() const {
4744     return ArrayRef<MCPhysReg>();
4745   }
4746 
4747   /// This callback is used to prepare for a volatile or atomic load.
4748   /// It takes a chain node as input and returns the chain for the load itself.
4749   ///
4750   /// Having a callback like this is necessary for targets like SystemZ,
4751   /// which allows a CPU to reuse the result of a previous load indefinitely,
4752   /// even if a cache-coherent store is performed by another CPU.  The default
4753   /// implementation does nothing.
prepareVolatileOrAtomicLoad(SDValue Chain,const SDLoc & DL,SelectionDAG & DAG)4754   virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL,
4755                                               SelectionDAG &DAG) const {
4756     return Chain;
4757   }
4758 
4759   /// This callback is invoked by the type legalizer to legalize nodes with an
4760   /// illegal operand type but legal result types.  It replaces the
4761   /// LowerOperation callback in the type Legalizer.  The reason we can not do
4762   /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
4763   /// use this callback.
4764   ///
4765   /// TODO: Consider merging with ReplaceNodeResults.
4766   ///
4767   /// The target places new result values for the node in Results (their number
4768   /// and types must exactly match those of the original return values of
4769   /// the node), or leaves Results empty, which indicates that the node is not
4770   /// to be custom lowered after all.
4771   /// The default implementation calls LowerOperation.
4772   virtual void LowerOperationWrapper(SDNode *N,
4773                                      SmallVectorImpl<SDValue> &Results,
4774                                      SelectionDAG &DAG) const;
4775 
4776   /// This callback is invoked for operations that are unsupported by the
4777   /// target, which are registered to use 'custom' lowering, and whose defined
4778   /// values are all legal.  If the target has no operations that require custom
4779   /// lowering, it need not implement this.  The default implementation of this
4780   /// aborts.
4781   virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
4782 
4783   /// This callback is invoked when a node result type is illegal for the
4784   /// target, and the operation was registered to use 'custom' lowering for that
4785   /// result type.  The target places new result values for the node in Results
4786   /// (their number and types must exactly match those of the original return
4787   /// values of the node), or leaves Results empty, which indicates that the
4788   /// node is not to be custom lowered after all.
4789   ///
4790   /// If the target has no operations that require custom lowering, it need not
4791   /// implement this.  The default implementation aborts.
ReplaceNodeResults(SDNode *,SmallVectorImpl<SDValue> &,SelectionDAG &)4792   virtual void ReplaceNodeResults(SDNode * /*N*/,
4793                                   SmallVectorImpl<SDValue> &/*Results*/,
4794                                   SelectionDAG &/*DAG*/) const {
4795     llvm_unreachable("ReplaceNodeResults not implemented for this target!");
4796   }
4797 
4798   /// This method returns the name of a target specific DAG node.
4799   virtual const char *getTargetNodeName(unsigned Opcode) const;
4800 
4801   /// This method returns a target specific FastISel object, or null if the
4802   /// target does not support "fast" ISel.
createFastISel(FunctionLoweringInfo &,const TargetLibraryInfo *)4803   virtual FastISel *createFastISel(FunctionLoweringInfo &,
4804                                    const TargetLibraryInfo *) const {
4805     return nullptr;
4806   }
4807 
4808   bool verifyReturnAddressArgumentIsConstant(SDValue Op,
4809                                              SelectionDAG &DAG) const;
4810 
4811   //===--------------------------------------------------------------------===//
4812   // Inline Asm Support hooks
4813   //
4814 
4815   /// This hook allows the target to expand an inline asm call to be explicit
4816   /// llvm code if it wants to.  This is useful for turning simple inline asms
4817   /// into LLVM intrinsics, which gives the compiler more information about the
4818   /// behavior of the code.
ExpandInlineAsm(CallInst *)4819   virtual bool ExpandInlineAsm(CallInst *) const {
4820     return false;
4821   }
4822 
4823   enum ConstraintType {
4824     C_Register,            // Constraint represents specific register(s).
4825     C_RegisterClass,       // Constraint represents any of register(s) in class.
4826     C_Memory,              // Memory constraint.
4827     C_Address,             // Address constraint.
4828     C_Immediate,           // Requires an immediate.
4829     C_Other,               // Something else.
4830     C_Unknown              // Unsupported constraint.
4831   };
4832 
4833   enum ConstraintWeight {
4834     // Generic weights.
4835     CW_Invalid  = -1,     // No match.
4836     CW_Okay     = 0,      // Acceptable.
4837     CW_Good     = 1,      // Good weight.
4838     CW_Better   = 2,      // Better weight.
4839     CW_Best     = 3,      // Best weight.
4840 
4841     // Well-known weights.
4842     CW_SpecificReg  = CW_Okay,    // Specific register operands.
4843     CW_Register     = CW_Good,    // Register operands.
4844     CW_Memory       = CW_Better,  // Memory operands.
4845     CW_Constant     = CW_Best,    // Constant operand.
4846     CW_Default      = CW_Okay     // Default or don't know type.
4847   };
4848 
4849   /// This contains information for each constraint that we are lowering.
4850   struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
4851     /// This contains the actual string for the code, like "m".  TargetLowering
4852     /// picks the 'best' code from ConstraintInfo::Codes that most closely
4853     /// matches the operand.
4854     std::string ConstraintCode;
4855 
4856     /// Information about the constraint code, e.g. Register, RegisterClass,
4857     /// Memory, Other, Unknown.
4858     TargetLowering::ConstraintType ConstraintType = TargetLowering::C_Unknown;
4859 
4860     /// If this is the result output operand or a clobber, this is null,
4861     /// otherwise it is the incoming operand to the CallInst.  This gets
4862     /// modified as the asm is processed.
4863     Value *CallOperandVal = nullptr;
4864 
4865     /// The ValueType for the operand value.
4866     MVT ConstraintVT = MVT::Other;
4867 
4868     /// Copy constructor for copying from a ConstraintInfo.
AsmOperandInfoAsmOperandInfo4869     AsmOperandInfo(InlineAsm::ConstraintInfo Info)
4870         : InlineAsm::ConstraintInfo(std::move(Info)) {}
4871 
4872     /// Return true of this is an input operand that is a matching constraint
4873     /// like "4".
4874     bool isMatchingInputConstraint() const;
4875 
4876     /// If this is an input matching constraint, this method returns the output
4877     /// operand it matches.
4878     unsigned getMatchedOperand() const;
4879   };
4880 
4881   using AsmOperandInfoVector = std::vector<AsmOperandInfo>;
4882 
4883   /// Split up the constraint string from the inline assembly value into the
4884   /// specific constraints and their prefixes, and also tie in the associated
4885   /// operand values.  If this returns an empty vector, and if the constraint
4886   /// string itself isn't empty, there was an error parsing.
4887   virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL,
4888                                                 const TargetRegisterInfo *TRI,
4889                                                 const CallBase &Call) const;
4890 
4891   /// Examine constraint type and operand type and determine a weight value.
4892   /// The operand object must already have been set up with the operand type.
4893   virtual ConstraintWeight getMultipleConstraintMatchWeight(
4894       AsmOperandInfo &info, int maIndex) const;
4895 
4896   /// Examine constraint string and operand type and determine a weight value.
4897   /// The operand object must already have been set up with the operand type.
4898   virtual ConstraintWeight getSingleConstraintMatchWeight(
4899       AsmOperandInfo &info, const char *constraint) const;
4900 
4901   /// Determines the constraint code and constraint type to use for the specific
4902   /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
4903   /// If the actual operand being passed in is available, it can be passed in as
4904   /// Op, otherwise an empty SDValue can be passed.
4905   virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
4906                                       SDValue Op,
4907                                       SelectionDAG *DAG = nullptr) const;
4908 
4909   /// Given a constraint, return the type of constraint it is for this target.
4910   virtual ConstraintType getConstraintType(StringRef Constraint) const;
4911 
4912   using ConstraintPair = std::pair<StringRef, TargetLowering::ConstraintType>;
4913   using ConstraintGroup = SmallVector<ConstraintPair>;
4914   /// Given an OpInfo with list of constraints codes as strings, return a
4915   /// sorted Vector of pairs of constraint codes and their types in priority of
4916   /// what we'd prefer to lower them as. This may contain immediates that
4917   /// cannot be lowered, but it is meant to be a machine agnostic order of
4918   /// preferences.
4919   ConstraintGroup getConstraintPreferences(AsmOperandInfo &OpInfo) const;
4920 
4921   /// Given a physical register constraint (e.g.  {edx}), return the register
4922   /// number and the register class for the register.
4923   ///
4924   /// Given a register class constraint, like 'r', if this corresponds directly
4925   /// to an LLVM register class, return a register of 0 and the register class
4926   /// pointer.
4927   ///
4928   /// This should only be used for C_Register constraints.  On error, this
4929   /// returns a register number of 0 and a null register class pointer.
4930   virtual std::pair<unsigned, const TargetRegisterClass *>
4931   getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
4932                                StringRef Constraint, MVT VT) const;
4933 
4934   virtual InlineAsm::ConstraintCode
getInlineAsmMemConstraint(StringRef ConstraintCode)4935   getInlineAsmMemConstraint(StringRef ConstraintCode) const {
4936     if (ConstraintCode == "m")
4937       return InlineAsm::ConstraintCode::m;
4938     if (ConstraintCode == "o")
4939       return InlineAsm::ConstraintCode::o;
4940     if (ConstraintCode == "X")
4941       return InlineAsm::ConstraintCode::X;
4942     if (ConstraintCode == "p")
4943       return InlineAsm::ConstraintCode::p;
4944     return InlineAsm::ConstraintCode::Unknown;
4945   }
4946 
4947   /// Try to replace an X constraint, which matches anything, with another that
4948   /// has more specific requirements based on the type of the corresponding
4949   /// operand.  This returns null if there is no replacement to make.
4950   virtual const char *LowerXConstraint(EVT ConstraintVT) const;
4951 
4952   /// Lower the specified operand into the Ops vector.  If it is invalid, don't
4953   /// add anything to Ops.
4954   virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint,
4955                                             std::vector<SDValue> &Ops,
4956                                             SelectionDAG &DAG) const;
4957 
4958   // Lower custom output constraints. If invalid, return SDValue().
4959   virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue,
4960                                               const SDLoc &DL,
4961                                               const AsmOperandInfo &OpInfo,
4962                                               SelectionDAG &DAG) const;
4963 
4964   // Targets may override this function to collect operands from the CallInst
4965   // and for example, lower them into the SelectionDAG operands.
4966   virtual void CollectTargetIntrinsicOperands(const CallInst &I,
4967                                               SmallVectorImpl<SDValue> &Ops,
4968                                               SelectionDAG &DAG) const;
4969 
4970   //===--------------------------------------------------------------------===//
4971   // Div utility functions
4972   //
4973 
4974   SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
4975                     SmallVectorImpl<SDNode *> &Created) const;
4976   SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
4977                     SmallVectorImpl<SDNode *> &Created) const;
4978   // Build sdiv by power-of-2 with conditional move instructions
4979   SDValue buildSDIVPow2WithCMov(SDNode *N, const APInt &Divisor,
4980                                 SelectionDAG &DAG,
4981                                 SmallVectorImpl<SDNode *> &Created) const;
4982 
4983   /// Targets may override this function to provide custom SDIV lowering for
4984   /// power-of-2 denominators.  If the target returns an empty SDValue, LLVM
4985   /// assumes SDIV is expensive and replaces it with a series of other integer
4986   /// operations.
4987   virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor,
4988                                 SelectionDAG &DAG,
4989                                 SmallVectorImpl<SDNode *> &Created) const;
4990 
4991   /// Targets may override this function to provide custom SREM lowering for
4992   /// power-of-2 denominators.  If the target returns an empty SDValue, LLVM
4993   /// assumes SREM is expensive and replaces it with a series of other integer
4994   /// operations.
4995   virtual SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor,
4996                                 SelectionDAG &DAG,
4997                                 SmallVectorImpl<SDNode *> &Created) const;
4998 
4999   /// Indicate whether this target prefers to combine FDIVs with the same
5000   /// divisor. If the transform should never be done, return zero. If the
5001   /// transform should be done, return the minimum number of divisor uses
5002   /// that must exist.
combineRepeatedFPDivisors()5003   virtual unsigned combineRepeatedFPDivisors() const {
5004     return 0;
5005   }
5006 
5007   /// Hooks for building estimates in place of slower divisions and square
5008   /// roots.
5009 
5010   /// Return either a square root or its reciprocal estimate value for the input
5011   /// operand.
5012   /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
5013   /// 'Enabled' as set by a potential default override attribute.
5014   /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
5015   /// refinement iterations required to generate a sufficient (though not
5016   /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
5017   /// The boolean UseOneConstNR output is used to select a Newton-Raphson
5018   /// algorithm implementation that uses either one or two constants.
5019   /// The boolean Reciprocal is used to select whether the estimate is for the
5020   /// square root of the input operand or the reciprocal of its square root.
5021   /// A target may choose to implement its own refinement within this function.
5022   /// If that's true, then return '0' as the number of RefinementSteps to avoid
5023   /// any further refinement of the estimate.
5024   /// An empty SDValue return means no estimate sequence can be created.
getSqrtEstimate(SDValue Operand,SelectionDAG & DAG,int Enabled,int & RefinementSteps,bool & UseOneConstNR,bool Reciprocal)5025   virtual SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
5026                                   int Enabled, int &RefinementSteps,
5027                                   bool &UseOneConstNR, bool Reciprocal) const {
5028     return SDValue();
5029   }
5030 
5031   /// Try to convert the fminnum/fmaxnum to a compare/select sequence. This is
5032   /// required for correctness since InstCombine might have canonicalized a
5033   /// fcmp+select sequence to a FMINNUM/FMAXNUM intrinsic.  If we were to fall
5034   /// through to the default expansion/soften to libcall, we might introduce a
5035   /// link-time dependency on libm into a file that originally did not have one.
5036   SDValue createSelectForFMINNUM_FMAXNUM(SDNode *Node, SelectionDAG &DAG) const;
5037 
5038   /// Return a reciprocal estimate value for the input operand.
5039   /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
5040   /// 'Enabled' as set by a potential default override attribute.
5041   /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
5042   /// refinement iterations required to generate a sufficient (though not
5043   /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
5044   /// A target may choose to implement its own refinement within this function.
5045   /// If that's true, then return '0' as the number of RefinementSteps to avoid
5046   /// any further refinement of the estimate.
5047   /// An empty SDValue return means no estimate sequence can be created.
getRecipEstimate(SDValue Operand,SelectionDAG & DAG,int Enabled,int & RefinementSteps)5048   virtual SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
5049                                    int Enabled, int &RefinementSteps) const {
5050     return SDValue();
5051   }
5052 
5053   /// Return a target-dependent comparison result if the input operand is
5054   /// suitable for use with a square root estimate calculation. For example, the
5055   /// comparison may check if the operand is NAN, INF, zero, normal, etc. The
5056   /// result should be used as the condition operand for a select or branch.
5057   virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG,
5058                                    const DenormalMode &Mode) const;
5059 
5060   /// Return a target-dependent result if the input operand is not suitable for
5061   /// use with a square root estimate calculation.
getSqrtResultForDenormInput(SDValue Operand,SelectionDAG & DAG)5062   virtual SDValue getSqrtResultForDenormInput(SDValue Operand,
5063                                               SelectionDAG &DAG) const {
5064     return DAG.getConstantFP(0.0, SDLoc(Operand), Operand.getValueType());
5065   }
5066 
5067   //===--------------------------------------------------------------------===//
5068   // Legalization utility functions
5069   //
5070 
5071   /// Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes,
5072   /// respectively, each computing an n/2-bit part of the result.
5073   /// \param Result A vector that will be filled with the parts of the result
5074   ///        in little-endian order.
5075   /// \param LL Low bits of the LHS of the MUL.  You can use this parameter
5076   ///        if you want to control how low bits are extracted from the LHS.
5077   /// \param LH High bits of the LHS of the MUL.  See LL for meaning.
5078   /// \param RL Low bits of the RHS of the MUL.  See LL for meaning
5079   /// \param RH High bits of the RHS of the MUL.  See LL for meaning.
5080   /// \returns true if the node has been expanded, false if it has not
5081   bool expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS,
5082                       SDValue RHS, SmallVectorImpl<SDValue> &Result, EVT HiLoVT,
5083                       SelectionDAG &DAG, MulExpansionKind Kind,
5084                       SDValue LL = SDValue(), SDValue LH = SDValue(),
5085                       SDValue RL = SDValue(), SDValue RH = SDValue()) const;
5086 
5087   /// Expand a MUL into two nodes.  One that computes the high bits of
5088   /// the result and one that computes the low bits.
5089   /// \param HiLoVT The value type to use for the Lo and Hi nodes.
5090   /// \param LL Low bits of the LHS of the MUL.  You can use this parameter
5091   ///        if you want to control how low bits are extracted from the LHS.
5092   /// \param LH High bits of the LHS of the MUL.  See LL for meaning.
5093   /// \param RL Low bits of the RHS of the MUL.  See LL for meaning
5094   /// \param RH High bits of the RHS of the MUL.  See LL for meaning.
5095   /// \returns true if the node has been expanded. false if it has not
5096   bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
5097                  SelectionDAG &DAG, MulExpansionKind Kind,
5098                  SDValue LL = SDValue(), SDValue LH = SDValue(),
5099                  SDValue RL = SDValue(), SDValue RH = SDValue()) const;
5100 
5101   /// Attempt to expand an n-bit div/rem/divrem by constant using a n/2-bit
5102   /// urem by constant and other arithmetic ops. The n/2-bit urem by constant
5103   /// will be expanded by DAGCombiner. This is not possible for all constant
5104   /// divisors.
5105   /// \param N Node to expand
5106   /// \param Result A vector that will be filled with the lo and high parts of
5107   ///        the results. For *DIVREM, this will be the quotient parts followed
5108   ///        by the remainder parts.
5109   /// \param HiLoVT The value type to use for the Lo and Hi parts. Should be
5110   ///        half of VT.
5111   /// \param LL Low bits of the LHS of the operation. You can use this
5112   ///        parameter if you want to control how low bits are extracted from
5113   ///        the LHS.
5114   /// \param LH High bits of the LHS of the operation. See LL for meaning.
5115   /// \returns true if the node has been expanded, false if it has not.
5116   bool expandDIVREMByConstant(SDNode *N, SmallVectorImpl<SDValue> &Result,
5117                               EVT HiLoVT, SelectionDAG &DAG,
5118                               SDValue LL = SDValue(),
5119                               SDValue LH = SDValue()) const;
5120 
5121   /// Expand funnel shift.
5122   /// \param N Node to expand
5123   /// \returns The expansion if successful, SDValue() otherwise
5124   SDValue expandFunnelShift(SDNode *N, SelectionDAG &DAG) const;
5125 
5126   /// Expand rotations.
5127   /// \param N Node to expand
5128   /// \param AllowVectorOps expand vector rotate, this should only be performed
5129   ///        if the legalization is happening outside of LegalizeVectorOps
5130   /// \returns The expansion if successful, SDValue() otherwise
5131   SDValue expandROT(SDNode *N, bool AllowVectorOps, SelectionDAG &DAG) const;
5132 
5133   /// Expand shift-by-parts.
5134   /// \param N Node to expand
5135   /// \param Lo lower-output-part after conversion
5136   /// \param Hi upper-output-part after conversion
5137   void expandShiftParts(SDNode *N, SDValue &Lo, SDValue &Hi,
5138                         SelectionDAG &DAG) const;
5139 
5140   /// Expand float(f32) to SINT(i64) conversion
5141   /// \param N Node to expand
5142   /// \param Result output after conversion
5143   /// \returns True, if the expansion was successful, false otherwise
5144   bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
5145 
5146   /// Expand float to UINT conversion
5147   /// \param N Node to expand
5148   /// \param Result output after conversion
5149   /// \param Chain output chain after conversion
5150   /// \returns True, if the expansion was successful, false otherwise
5151   bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain,
5152                         SelectionDAG &DAG) const;
5153 
5154   /// Expand UINT(i64) to double(f64) conversion
5155   /// \param N Node to expand
5156   /// \param Result output after conversion
5157   /// \param Chain output chain after conversion
5158   /// \returns True, if the expansion was successful, false otherwise
5159   bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SDValue &Chain,
5160                         SelectionDAG &DAG) const;
5161 
5162   /// Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs.
5163   SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const;
5164 
5165   /// Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max.
5166   /// \param N Node to expand
5167   /// \returns The expansion result
5168   SDValue expandFP_TO_INT_SAT(SDNode *N, SelectionDAG &DAG) const;
5169 
5170   /// Truncate Op to ResultVT. If the result is exact, leave it alone. If it is
5171   /// not exact, force the result to be odd.
5172   /// \param ResultVT The type of result.
5173   /// \param Op The value to round.
5174   /// \returns The expansion result
5175   SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL,
5176                                   SelectionDAG &DAG) const;
5177 
5178   /// Expand round(fp) to fp conversion
5179   /// \param N Node to expand
5180   /// \returns The expansion result
5181   SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const;
5182 
5183   /// Expand check for floating point class.
5184   /// \param ResultVT The type of intrinsic call result.
5185   /// \param Op The tested value.
5186   /// \param Test The test to perform.
5187   /// \param Flags The optimization flags.
5188   /// \returns The expansion result or SDValue() if it fails.
5189   SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test,
5190                            SDNodeFlags Flags, const SDLoc &DL,
5191                            SelectionDAG &DAG) const;
5192 
5193   /// Expand CTPOP nodes. Expands vector/scalar CTPOP nodes,
5194   /// vector nodes can only succeed if all operations are legal/custom.
5195   /// \param N Node to expand
5196   /// \returns The expansion result or SDValue() if it fails.
5197   SDValue expandCTPOP(SDNode *N, SelectionDAG &DAG) const;
5198 
5199   /// Expand VP_CTPOP nodes.
5200   /// \returns The expansion result or SDValue() if it fails.
5201   SDValue expandVPCTPOP(SDNode *N, SelectionDAG &DAG) const;
5202 
5203   /// Expand CTLZ/CTLZ_ZERO_UNDEF nodes. Expands vector/scalar CTLZ nodes,
5204   /// vector nodes can only succeed if all operations are legal/custom.
5205   /// \param N Node to expand
5206   /// \returns The expansion result or SDValue() if it fails.
5207   SDValue expandCTLZ(SDNode *N, SelectionDAG &DAG) const;
5208 
5209   /// Expand VP_CTLZ/VP_CTLZ_ZERO_UNDEF nodes.
5210   /// \param N Node to expand
5211   /// \returns The expansion result or SDValue() if it fails.
5212   SDValue expandVPCTLZ(SDNode *N, SelectionDAG &DAG) const;
5213 
5214   /// Expand CTTZ via Table Lookup.
5215   /// \param N Node to expand
5216   /// \returns The expansion result or SDValue() if it fails.
5217   SDValue CTTZTableLookup(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, EVT VT,
5218                           SDValue Op, unsigned NumBitsPerElt) const;
5219 
5220   /// Expand CTTZ/CTTZ_ZERO_UNDEF nodes. Expands vector/scalar CTTZ nodes,
5221   /// vector nodes can only succeed if all operations are legal/custom.
5222   /// \param N Node to expand
5223   /// \returns The expansion result or SDValue() if it fails.
5224   SDValue expandCTTZ(SDNode *N, SelectionDAG &DAG) const;
5225 
5226   /// Expand VP_CTTZ/VP_CTTZ_ZERO_UNDEF nodes.
5227   /// \param N Node to expand
5228   /// \returns The expansion result or SDValue() if it fails.
5229   SDValue expandVPCTTZ(SDNode *N, SelectionDAG &DAG) const;
5230 
5231   /// Expand ABS nodes. Expands vector/scalar ABS nodes,
5232   /// vector nodes can only succeed if all operations are legal/custom.
5233   /// (ABS x) -> (XOR (ADD x, (SRA x, type_size)), (SRA x, type_size))
5234   /// \param N Node to expand
5235   /// \param IsNegative indicate negated abs
5236   /// \returns The expansion result or SDValue() if it fails.
5237   SDValue expandABS(SDNode *N, SelectionDAG &DAG,
5238                     bool IsNegative = false) const;
5239 
5240   /// Expand ABDS/ABDU nodes. Expands vector/scalar ABDS/ABDU nodes.
5241   /// \param N Node to expand
5242   /// \returns The expansion result or SDValue() if it fails.
5243   SDValue expandABD(SDNode *N, SelectionDAG &DAG) const;
5244 
5245   /// Expand BSWAP nodes. Expands scalar/vector BSWAP nodes with i16/i32/i64
5246   /// scalar types. Returns SDValue() if expand fails.
5247   /// \param N Node to expand
5248   /// \returns The expansion result or SDValue() if it fails.
5249   SDValue expandBSWAP(SDNode *N, SelectionDAG &DAG) const;
5250 
5251   /// Expand VP_BSWAP nodes. Expands VP_BSWAP nodes with
5252   /// i16/i32/i64 scalar types. Returns SDValue() if expand fails. \param N Node
5253   /// to expand \returns The expansion result or SDValue() if it fails.
5254   SDValue expandVPBSWAP(SDNode *N, SelectionDAG &DAG) const;
5255 
5256   /// Expand BITREVERSE nodes. Expands scalar/vector BITREVERSE nodes.
5257   /// Returns SDValue() if expand fails.
5258   /// \param N Node to expand
5259   /// \returns The expansion result or SDValue() if it fails.
5260   SDValue expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const;
5261 
5262   /// Expand VP_BITREVERSE nodes. Expands VP_BITREVERSE nodes with
5263   /// i8/i16/i32/i64 scalar types. \param N Node to expand \returns The
5264   /// expansion result or SDValue() if it fails.
5265   SDValue expandVPBITREVERSE(SDNode *N, SelectionDAG &DAG) const;
5266 
5267   /// Turn load of vector type into a load of the individual elements.
5268   /// \param LD load to expand
5269   /// \returns BUILD_VECTOR and TokenFactor nodes.
5270   std::pair<SDValue, SDValue> scalarizeVectorLoad(LoadSDNode *LD,
5271                                                   SelectionDAG &DAG) const;
5272 
5273   // Turn a store of a vector type into stores of the individual elements.
5274   /// \param ST Store with a vector value type
5275   /// \returns TokenFactor of the individual store chains.
5276   SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const;
5277 
5278   /// Expands an unaligned load to 2 half-size loads for an integer, and
5279   /// possibly more for vectors.
5280   std::pair<SDValue, SDValue> expandUnalignedLoad(LoadSDNode *LD,
5281                                                   SelectionDAG &DAG) const;
5282 
5283   /// Expands an unaligned store to 2 half-size stores for integer values, and
5284   /// possibly more for vectors.
5285   SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const;
5286 
5287   /// Increments memory address \p Addr according to the type of the value
5288   /// \p DataVT that should be stored. If the data is stored in compressed
5289   /// form, the memory address should be incremented according to the number of
5290   /// the stored elements. This number is equal to the number of '1's bits
5291   /// in the \p Mask.
5292   /// \p DataVT is a vector type. \p Mask is a vector value.
5293   /// \p DataVT and \p Mask have the same number of vector elements.
5294   SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL,
5295                                  EVT DataVT, SelectionDAG &DAG,
5296                                  bool IsCompressedMemory) const;
5297 
5298   /// Get a pointer to vector element \p Idx located in memory for a vector of
5299   /// type \p VecVT starting at a base address of \p VecPtr. If \p Idx is out of
5300   /// bounds the returned pointer is unspecified, but will be within the vector
5301   /// bounds.
5302   SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT,
5303                                   SDValue Index) const;
5304 
5305   /// Get a pointer to a sub-vector of type \p SubVecVT at index \p Idx located
5306   /// in memory for a vector of type \p VecVT starting at a base address of
5307   /// \p VecPtr. If \p Idx plus the size of \p SubVecVT is out of bounds the
5308   /// returned pointer is unspecified, but the value returned will be such that
5309   /// the entire subvector would be within the vector bounds.
5310   SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT,
5311                                  EVT SubVecVT, SDValue Index) const;
5312 
5313   /// Method for building the DAG expansion of ISD::[US][MIN|MAX]. This
5314   /// method accepts integers as its arguments.
5315   SDValue expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const;
5316 
5317   /// Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT. This
5318   /// method accepts integers as its arguments.
5319   SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const;
5320 
5321   /// Method for building the DAG expansion of ISD::[US]SHLSAT. This
5322   /// method accepts integers as its arguments.
5323   SDValue expandShlSat(SDNode *Node, SelectionDAG &DAG) const;
5324 
5325   /// Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT]. This
5326   /// method accepts integers as its arguments.
5327   SDValue expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const;
5328 
5329   /// Method for building the DAG expansion of ISD::[US]DIVFIX[SAT]. This
5330   /// method accepts integers as its arguments.
5331   /// Note: This method may fail if the division could not be performed
5332   /// within the type. Clients must retry with a wider type if this happens.
5333   SDValue expandFixedPointDiv(unsigned Opcode, const SDLoc &dl,
5334                               SDValue LHS, SDValue RHS,
5335                               unsigned Scale, SelectionDAG &DAG) const;
5336 
5337   /// Method for building the DAG expansion of ISD::U(ADD|SUB)O. Expansion
5338   /// always suceeds and populates the Result and Overflow arguments.
5339   void expandUADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow,
5340                       SelectionDAG &DAG) const;
5341 
5342   /// Method for building the DAG expansion of ISD::S(ADD|SUB)O. Expansion
5343   /// always suceeds and populates the Result and Overflow arguments.
5344   void expandSADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow,
5345                       SelectionDAG &DAG) const;
5346 
5347   /// Method for building the DAG expansion of ISD::[US]MULO. Returns whether
5348   /// expansion was successful and populates the Result and Overflow arguments.
5349   bool expandMULO(SDNode *Node, SDValue &Result, SDValue &Overflow,
5350                   SelectionDAG &DAG) const;
5351 
5352   /// forceExpandWideMUL - Unconditionally expand a MUL into either a libcall or
5353   /// brute force via a wide multiplication. The expansion works by
5354   /// attempting to do a multiplication on a wider type twice the size of the
5355   /// original operands. LL and LH represent the lower and upper halves of the
5356   /// first operand. RL and RH represent the lower and upper halves of the
5357   /// second operand. The upper and lower halves of the result are stored in Lo
5358   /// and Hi.
5359   void forceExpandWideMUL(SelectionDAG &DAG, const SDLoc &dl, bool Signed,
5360                           EVT WideVT, const SDValue LL, const SDValue LH,
5361                           const SDValue RL, const SDValue RH, SDValue &Lo,
5362                           SDValue &Hi) const;
5363 
5364   /// Same as above, but creates the upper halves of each operand by
5365   /// sign/zero-extending the operands.
5366   void forceExpandWideMUL(SelectionDAG &DAG, const SDLoc &dl, bool Signed,
5367                           const SDValue LHS, const SDValue RHS, SDValue &Lo,
5368                           SDValue &Hi) const;
5369 
5370   /// Expand a VECREDUCE_* into an explicit calculation. If Count is specified,
5371   /// only the first Count elements of the vector are used.
5372   SDValue expandVecReduce(SDNode *Node, SelectionDAG &DAG) const;
5373 
5374   /// Expand a VECREDUCE_SEQ_* into an explicit ordered calculation.
5375   SDValue expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const;
5376 
5377   /// Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal.
5378   /// Returns true if the expansion was successful.
5379   bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const;
5380 
5381   /// Method for building the DAG expansion of ISD::VECTOR_SPLICE. This
5382   /// method accepts vectors as its arguments.
5383   SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const;
5384 
5385   /// Legalize a SETCC or VP_SETCC with given LHS and RHS and condition code CC
5386   /// on the current target. A VP_SETCC will additionally be given a Mask
5387   /// and/or EVL not equal to SDValue().
5388   ///
5389   /// If the SETCC has been legalized using AND / OR, then the legalized node
5390   /// will be stored in LHS. RHS and CC will be set to SDValue(). NeedInvert
5391   /// will be set to false. This will also hold if the VP_SETCC has been
5392   /// legalized using VP_AND / VP_OR.
5393   ///
5394   /// If the SETCC / VP_SETCC has been legalized by using
5395   /// getSetCCSwappedOperands(), then the values of LHS and RHS will be
5396   /// swapped, CC will be set to the new condition, and NeedInvert will be set
5397   /// to false.
5398   ///
5399   /// If the SETCC / VP_SETCC has been legalized using the inverse condcode,
5400   /// then LHS and RHS will be unchanged, CC will set to the inverted condcode,
5401   /// and NeedInvert will be set to true. The caller must invert the result of
5402   /// the SETCC with SelectionDAG::getLogicalNOT() or take equivalent action to
5403   /// swap the effect of a true/false result.
5404   ///
5405   /// \returns true if the SETCC / VP_SETCC has been legalized, false if it
5406   /// hasn't.
5407   bool LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, SDValue &LHS,
5408                              SDValue &RHS, SDValue &CC, SDValue Mask,
5409                              SDValue EVL, bool &NeedInvert, const SDLoc &dl,
5410                              SDValue &Chain, bool IsSignaling = false) const;
5411 
5412   //===--------------------------------------------------------------------===//
5413   // Instruction Emitting Hooks
5414   //
5415 
5416   /// This method should be implemented by targets that mark instructions with
5417   /// the 'usesCustomInserter' flag.  These instructions are special in various
5418   /// ways, which require special support to insert.  The specified MachineInstr
5419   /// is created but not inserted into any basic blocks, and this method is
5420   /// called to expand it into a sequence of instructions, potentially also
5421   /// creating new basic blocks and control flow.
5422   /// As long as the returned basic block is different (i.e., we created a new
5423   /// one), the custom inserter is free to modify the rest of \p MBB.
5424   virtual MachineBasicBlock *
5425   EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const;
5426 
5427   /// This method should be implemented by targets that mark instructions with
5428   /// the 'hasPostISelHook' flag. These instructions must be adjusted after
5429   /// instruction selection by target hooks.  e.g. To fill in optional defs for
5430   /// ARM 's' setting instructions.
5431   virtual void AdjustInstrPostInstrSelection(MachineInstr &MI,
5432                                              SDNode *Node) const;
5433 
5434   /// If this function returns true, SelectionDAGBuilder emits a
5435   /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
useLoadStackGuardNode()5436   virtual bool useLoadStackGuardNode() const {
5437     return false;
5438   }
5439 
emitStackGuardXorFP(SelectionDAG & DAG,SDValue Val,const SDLoc & DL)5440   virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
5441                                       const SDLoc &DL) const {
5442     llvm_unreachable("not implemented for this target");
5443   }
5444 
5445   /// Lower TLS global address SDNode for target independent emulated TLS model.
5446   virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
5447                                           SelectionDAG &DAG) const;
5448 
5449   /// Expands target specific indirect branch for the case of JumpTable
5450   /// expansion.
5451   virtual SDValue expandIndirectJTBranch(const SDLoc &dl, SDValue Value,
5452                                          SDValue Addr, int JTI,
5453                                          SelectionDAG &DAG) const;
5454 
5455   // seteq(x, 0) -> truncate(srl(ctlz(zext(x)), log2(#bits)))
5456   // If we're comparing for equality to zero and isCtlzFast is true, expose the
5457   // fact that this can be implemented as a ctlz/srl pair, so that the dag
5458   // combiner can fold the new nodes.
5459   SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const;
5460 
5461   // Return true if `X & Y eq/ne 0` is preferable to `X & Y ne/eq Y`
isXAndYEqZeroPreferableToXAndYEqY(ISD::CondCode,EVT)5462   virtual bool isXAndYEqZeroPreferableToXAndYEqY(ISD::CondCode, EVT) const {
5463     return true;
5464   }
5465 
5466 private:
5467   SDValue foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
5468                            const SDLoc &DL, DAGCombinerInfo &DCI) const;
5469   SDValue foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
5470                              const SDLoc &DL, DAGCombinerInfo &DCI) const;
5471 
5472   SDValue optimizeSetCCOfSignedTruncationCheck(EVT SCCVT, SDValue N0,
5473                                                SDValue N1, ISD::CondCode Cond,
5474                                                DAGCombinerInfo &DCI,
5475                                                const SDLoc &DL) const;
5476 
5477   // (X & (C l>>/<< Y)) ==/!= 0  -->  ((X <</l>> Y) & C) ==/!= 0
5478   SDValue optimizeSetCCByHoistingAndByConstFromLogicalShift(
5479       EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond,
5480       DAGCombinerInfo &DCI, const SDLoc &DL) const;
5481 
5482   SDValue prepareUREMEqFold(EVT SETCCVT, SDValue REMNode,
5483                             SDValue CompTargetNode, ISD::CondCode Cond,
5484                             DAGCombinerInfo &DCI, const SDLoc &DL,
5485                             SmallVectorImpl<SDNode *> &Created) const;
5486   SDValue buildUREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode,
5487                           ISD::CondCode Cond, DAGCombinerInfo &DCI,
5488                           const SDLoc &DL) const;
5489 
5490   SDValue prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
5491                             SDValue CompTargetNode, ISD::CondCode Cond,
5492                             DAGCombinerInfo &DCI, const SDLoc &DL,
5493                             SmallVectorImpl<SDNode *> &Created) const;
5494   SDValue buildSREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode,
5495                           ISD::CondCode Cond, DAGCombinerInfo &DCI,
5496                           const SDLoc &DL) const;
5497 };
5498 
5499 /// Given an LLVM IR type and return type attributes, compute the return value
5500 /// EVTs and flags, and optionally also the offsets, if the return value is
5501 /// being lowered to memory.
5502 void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr,
5503                    SmallVectorImpl<ISD::OutputArg> &Outs,
5504                    const TargetLowering &TLI, const DataLayout &DL);
5505 
5506 } // end namespace llvm
5507 
5508 #endif // LLVM_CODEGEN_TARGETLOWERING_H
5509