1 //===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file describes how to lower LLVM code to machine code. This has two 11 /// main components: 12 /// 13 /// 1. Which ValueTypes are natively supported by the target. 14 /// 2. Which operations are supported for supported ValueTypes. 15 /// 3. Cost thresholds for alternative implementations of certain operations. 16 /// 17 /// In addition it has a few other components, like information about FP 18 /// immediates. 19 /// 20 //===----------------------------------------------------------------------===// 21 22 #ifndef LLVM_CODEGEN_TARGETLOWERING_H 23 #define LLVM_CODEGEN_TARGETLOWERING_H 24 25 #include "llvm/ADT/APInt.h" 26 #include "llvm/ADT/ArrayRef.h" 27 #include "llvm/ADT/DenseMap.h" 28 #include "llvm/ADT/SmallVector.h" 29 #include "llvm/ADT/StringRef.h" 30 #include "llvm/CodeGen/DAGCombine.h" 31 #include "llvm/CodeGen/ISDOpcodes.h" 32 #include "llvm/CodeGen/LowLevelTypeUtils.h" 33 #include "llvm/CodeGen/MachineRegisterInfo.h" 34 #include "llvm/CodeGen/MachineValueType.h" 35 #include "llvm/CodeGen/RuntimeLibcalls.h" 36 #include "llvm/CodeGen/SelectionDAG.h" 37 #include "llvm/CodeGen/SelectionDAGNodes.h" 38 #include "llvm/CodeGen/TargetCallingConv.h" 39 #include "llvm/CodeGen/ValueTypes.h" 40 #include "llvm/IR/Attributes.h" 41 #include "llvm/IR/CallingConv.h" 42 #include "llvm/IR/DataLayout.h" 43 #include "llvm/IR/DerivedTypes.h" 44 #include "llvm/IR/Function.h" 45 #include "llvm/IR/InlineAsm.h" 46 #include "llvm/IR/Instruction.h" 47 #include "llvm/IR/Instructions.h" 48 #include "llvm/IR/Type.h" 49 #include "llvm/Support/Alignment.h" 50 #include "llvm/Support/AtomicOrdering.h" 51 #include "llvm/Support/Casting.h" 52 #include "llvm/Support/ErrorHandling.h" 53 #include <algorithm> 54 #include <cassert> 55 #include <climits> 56 #include <cstdint> 57 #include <iterator> 58 #include <map> 59 #include <string> 60 #include <utility> 61 #include <vector> 62 63 namespace llvm { 64 65 class AssumptionCache; 66 class CCState; 67 class CCValAssign; 68 enum class ComplexDeinterleavingOperation; 69 enum class ComplexDeinterleavingRotation; 70 class Constant; 71 class FastISel; 72 class FunctionLoweringInfo; 73 class GlobalValue; 74 class Loop; 75 class GISelKnownBits; 76 class IntrinsicInst; 77 class IRBuilderBase; 78 struct KnownBits; 79 class LLVMContext; 80 class MachineBasicBlock; 81 class MachineFunction; 82 class MachineInstr; 83 class MachineJumpTableInfo; 84 class MachineLoop; 85 class MachineRegisterInfo; 86 class MCContext; 87 class MCExpr; 88 class Module; 89 class ProfileSummaryInfo; 90 class TargetLibraryInfo; 91 class TargetMachine; 92 class TargetRegisterClass; 93 class TargetRegisterInfo; 94 class TargetTransformInfo; 95 class Value; 96 97 namespace Sched { 98 99 enum Preference { 100 None, // No preference 101 Source, // Follow source order. 102 RegPressure, // Scheduling for lowest register pressure. 103 Hybrid, // Scheduling for both latency and register pressure. 104 ILP, // Scheduling for ILP in low register pressure mode. 105 VLIW, // Scheduling for VLIW targets. 106 Fast, // Fast suboptimal list scheduling 107 Linearize // Linearize DAG, no scheduling 108 }; 109 110 } // end namespace Sched 111 112 // MemOp models a memory operation, either memset or memcpy/memmove. 113 struct MemOp { 114 private: 115 // Shared 116 uint64_t Size; 117 bool DstAlignCanChange; // true if destination alignment can satisfy any 118 // constraint. 119 Align DstAlign; // Specified alignment of the memory operation. 120 121 bool AllowOverlap; 122 // memset only 123 bool IsMemset; // If setthis memory operation is a memset. 124 bool ZeroMemset; // If set clears out memory with zeros. 125 // memcpy only 126 bool MemcpyStrSrc; // Indicates whether the memcpy source is an in-register 127 // constant so it does not need to be loaded. 128 Align SrcAlign; // Inferred alignment of the source or default value if the 129 // memory operation does not need to load the value. 130 public: 131 static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign, 132 Align SrcAlign, bool IsVolatile, 133 bool MemcpyStrSrc = false) { 134 MemOp Op; 135 Op.Size = Size; 136 Op.DstAlignCanChange = DstAlignCanChange; 137 Op.DstAlign = DstAlign; 138 Op.AllowOverlap = !IsVolatile; 139 Op.IsMemset = false; 140 Op.ZeroMemset = false; 141 Op.MemcpyStrSrc = MemcpyStrSrc; 142 Op.SrcAlign = SrcAlign; 143 return Op; 144 } 145 SetMemOp146 static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign, 147 bool IsZeroMemset, bool IsVolatile) { 148 MemOp Op; 149 Op.Size = Size; 150 Op.DstAlignCanChange = DstAlignCanChange; 151 Op.DstAlign = DstAlign; 152 Op.AllowOverlap = !IsVolatile; 153 Op.IsMemset = true; 154 Op.ZeroMemset = IsZeroMemset; 155 Op.MemcpyStrSrc = false; 156 return Op; 157 } 158 sizeMemOp159 uint64_t size() const { return Size; } getDstAlignMemOp160 Align getDstAlign() const { 161 assert(!DstAlignCanChange); 162 return DstAlign; 163 } isFixedDstAlignMemOp164 bool isFixedDstAlign() const { return !DstAlignCanChange; } allowOverlapMemOp165 bool allowOverlap() const { return AllowOverlap; } isMemsetMemOp166 bool isMemset() const { return IsMemset; } isMemcpyMemOp167 bool isMemcpy() const { return !IsMemset; } isMemcpyWithFixedDstAlignMemOp168 bool isMemcpyWithFixedDstAlign() const { 169 return isMemcpy() && !DstAlignCanChange; 170 } isZeroMemsetMemOp171 bool isZeroMemset() const { return isMemset() && ZeroMemset; } isMemcpyStrSrcMemOp172 bool isMemcpyStrSrc() const { 173 assert(isMemcpy() && "Must be a memcpy"); 174 return MemcpyStrSrc; 175 } getSrcAlignMemOp176 Align getSrcAlign() const { 177 assert(isMemcpy() && "Must be a memcpy"); 178 return SrcAlign; 179 } isSrcAlignedMemOp180 bool isSrcAligned(Align AlignCheck) const { 181 return isMemset() || llvm::isAligned(AlignCheck, SrcAlign.value()); 182 } isDstAlignedMemOp183 bool isDstAligned(Align AlignCheck) const { 184 return DstAlignCanChange || llvm::isAligned(AlignCheck, DstAlign.value()); 185 } isAlignedMemOp186 bool isAligned(Align AlignCheck) const { 187 return isSrcAligned(AlignCheck) && isDstAligned(AlignCheck); 188 } 189 }; 190 191 /// This base class for TargetLowering contains the SelectionDAG-independent 192 /// parts that can be used from the rest of CodeGen. 193 class TargetLoweringBase { 194 public: 195 /// This enum indicates whether operations are valid for a target, and if not, 196 /// what action should be used to make them valid. 197 enum LegalizeAction : uint8_t { 198 Legal, // The target natively supports this operation. 199 Promote, // This operation should be executed in a larger type. 200 Expand, // Try to expand this to other ops, otherwise use a libcall. 201 LibCall, // Don't try to expand this to other ops, always use a libcall. 202 Custom // Use the LowerOperation hook to implement custom lowering. 203 }; 204 205 /// This enum indicates whether a types are legal for a target, and if not, 206 /// what action should be used to make them valid. 207 enum LegalizeTypeAction : uint8_t { 208 TypeLegal, // The target natively supports this type. 209 TypePromoteInteger, // Replace this integer with a larger one. 210 TypeExpandInteger, // Split this integer into two of half the size. 211 TypeSoftenFloat, // Convert this float to a same size integer type. 212 TypeExpandFloat, // Split this float into two of half the size. 213 TypeScalarizeVector, // Replace this one-element vector with its element. 214 TypeSplitVector, // Split this vector into two of half the size. 215 TypeWidenVector, // This vector should be widened into a larger vector. 216 TypePromoteFloat, // Replace this float with a larger one. 217 TypeSoftPromoteHalf, // Soften half to i16 and use float to do arithmetic. 218 TypeScalarizeScalableVector, // This action is explicitly left unimplemented. 219 // While it is theoretically possible to 220 // legalize operations on scalable types with a 221 // loop that handles the vscale * #lanes of the 222 // vector, this is non-trivial at SelectionDAG 223 // level and these types are better to be 224 // widened or promoted. 225 }; 226 227 /// LegalizeKind holds the legalization kind that needs to happen to EVT 228 /// in order to type-legalize it. 229 using LegalizeKind = std::pair<LegalizeTypeAction, EVT>; 230 231 /// Enum that describes how the target represents true/false values. 232 enum BooleanContent { 233 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage. 234 ZeroOrOneBooleanContent, // All bits zero except for bit 0. 235 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0. 236 }; 237 238 /// Enum that describes what type of support for selects the target has. 239 enum SelectSupportKind { 240 ScalarValSelect, // The target supports scalar selects (ex: cmov). 241 ScalarCondVectorVal, // The target supports selects with a scalar condition 242 // and vector values (ex: cmov). 243 VectorMaskSelect // The target supports vector selects with a vector 244 // mask (ex: x86 blends). 245 }; 246 247 /// Enum that specifies what an atomic load/AtomicRMWInst is expanded 248 /// to, if at all. Exists because different targets have different levels of 249 /// support for these atomic instructions, and also have different options 250 /// w.r.t. what they should expand to. 251 enum class AtomicExpansionKind { 252 None, // Don't expand the instruction. 253 CastToInteger, // Cast the atomic instruction to another type, e.g. from 254 // floating-point to integer type. 255 LLSC, // Expand the instruction into loadlinked/storeconditional; used 256 // by ARM/AArch64. 257 LLOnly, // Expand the (load) instruction into just a load-linked, which has 258 // greater atomic guarantees than a normal load. 259 CmpXChg, // Expand the instruction into cmpxchg; used by at least X86. 260 MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop. 261 BitTestIntrinsic, // Use a target-specific intrinsic for special bit 262 // operations; used by X86. 263 CmpArithIntrinsic,// Use a target-specific intrinsic for special compare 264 // operations; used by X86. 265 Expand, // Generic expansion in terms of other atomic operations. 266 267 // Rewrite to a non-atomic form for use in a known non-preemptible 268 // environment. 269 NotAtomic 270 }; 271 272 /// Enum that specifies when a multiplication should be expanded. 273 enum class MulExpansionKind { 274 Always, // Always expand the instruction. 275 OnlyLegalOrCustom, // Only expand when the resulting instructions are legal 276 // or custom. 277 }; 278 279 /// Enum that specifies when a float negation is beneficial. 280 enum class NegatibleCost { 281 Cheaper = 0, // Negated expression is cheaper. 282 Neutral = 1, // Negated expression has the same cost. 283 Expensive = 2 // Negated expression is more expensive. 284 }; 285 286 /// Enum of different potentially desirable ways to fold (and/or (setcc ...), 287 /// (setcc ...)). 288 enum AndOrSETCCFoldKind : uint8_t { 289 None = 0, // No fold is preferable. 290 AddAnd = 1, // Fold with `Add` op and `And` op is preferable. 291 NotAnd = 2, // Fold with `Not` op and `And` op is preferable. 292 ABS = 4, // Fold with `llvm.abs` op is preferable. 293 }; 294 295 class ArgListEntry { 296 public: 297 Value *Val = nullptr; 298 SDValue Node = SDValue(); 299 Type *Ty = nullptr; 300 bool IsSExt : 1; 301 bool IsZExt : 1; 302 bool IsInReg : 1; 303 bool IsSRet : 1; 304 bool IsNest : 1; 305 bool IsByVal : 1; 306 bool IsByRef : 1; 307 bool IsInAlloca : 1; 308 bool IsPreallocated : 1; 309 bool IsReturned : 1; 310 bool IsSwiftSelf : 1; 311 bool IsSwiftAsync : 1; 312 bool IsSwiftError : 1; 313 bool IsCFGuardTarget : 1; 314 MaybeAlign Alignment = std::nullopt; 315 Type *IndirectType = nullptr; 316 ArgListEntry()317 ArgListEntry() 318 : IsSExt(false), IsZExt(false), IsInReg(false), IsSRet(false), 319 IsNest(false), IsByVal(false), IsByRef(false), IsInAlloca(false), 320 IsPreallocated(false), IsReturned(false), IsSwiftSelf(false), 321 IsSwiftAsync(false), IsSwiftError(false), IsCFGuardTarget(false) {} 322 323 void setAttributes(const CallBase *Call, unsigned ArgIdx); 324 }; 325 using ArgListTy = std::vector<ArgListEntry>; 326 markLibCallAttributes(MachineFunction * MF,unsigned CC,ArgListTy & Args)327 virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC, 328 ArgListTy &Args) const {}; 329 getExtendForContent(BooleanContent Content)330 static ISD::NodeType getExtendForContent(BooleanContent Content) { 331 switch (Content) { 332 case UndefinedBooleanContent: 333 // Extend by adding rubbish bits. 334 return ISD::ANY_EXTEND; 335 case ZeroOrOneBooleanContent: 336 // Extend by adding zero bits. 337 return ISD::ZERO_EXTEND; 338 case ZeroOrNegativeOneBooleanContent: 339 // Extend by copying the sign bit. 340 return ISD::SIGN_EXTEND; 341 } 342 llvm_unreachable("Invalid content kind"); 343 } 344 345 explicit TargetLoweringBase(const TargetMachine &TM); 346 TargetLoweringBase(const TargetLoweringBase &) = delete; 347 TargetLoweringBase &operator=(const TargetLoweringBase &) = delete; 348 virtual ~TargetLoweringBase() = default; 349 350 /// Return true if the target support strict float operation isStrictFPEnabled()351 bool isStrictFPEnabled() const { 352 return IsStrictFPEnabled; 353 } 354 355 protected: 356 /// Initialize all of the actions to default values. 357 void initActions(); 358 359 public: getTargetMachine()360 const TargetMachine &getTargetMachine() const { return TM; } 361 useSoftFloat()362 virtual bool useSoftFloat() const { return false; } 363 364 /// Return the pointer type for the given address space, defaults to 365 /// the pointer type from the data layout. 366 /// FIXME: The default needs to be removed once all the code is updated. 367 virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const { 368 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); 369 } 370 371 /// Return the in-memory pointer type for the given address space, defaults to 372 /// the pointer type from the data layout. 373 /// FIXME: The default needs to be removed once all the code is updated. 374 virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS = 0) const { 375 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); 376 } 377 378 /// Return the type for frame index, which is determined by 379 /// the alloca address space specified through the data layout. getFrameIndexTy(const DataLayout & DL)380 MVT getFrameIndexTy(const DataLayout &DL) const { 381 return getPointerTy(DL, DL.getAllocaAddrSpace()); 382 } 383 384 /// Return the type for code pointers, which is determined by the program 385 /// address space specified through the data layout. getProgramPointerTy(const DataLayout & DL)386 MVT getProgramPointerTy(const DataLayout &DL) const { 387 return getPointerTy(DL, DL.getProgramAddressSpace()); 388 } 389 390 /// Return the type for operands of fence. 391 /// TODO: Let fence operands be of i32 type and remove this. getFenceOperandTy(const DataLayout & DL)392 virtual MVT getFenceOperandTy(const DataLayout &DL) const { 393 return getPointerTy(DL); 394 } 395 396 /// Return the type to use for a scalar shift opcode, given the shifted amount 397 /// type. Targets should return a legal type if the input type is legal. 398 /// Targets can return a type that is too small if the input type is illegal. 399 virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const; 400 401 /// Returns the type for the shift amount of a shift opcode. For vectors, 402 /// returns the input type. For scalars, behavior depends on \p LegalTypes. If 403 /// \p LegalTypes is true, calls getScalarShiftAmountTy, otherwise uses 404 /// pointer type. If getScalarShiftAmountTy or pointer type cannot represent 405 /// all possible shift amounts, returns MVT::i32. In general, \p LegalTypes 406 /// should be set to true for calls during type legalization and after type 407 /// legalization has been completed. 408 EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, 409 bool LegalTypes = true) const; 410 411 /// Return the preferred type to use for a shift opcode, given the shifted 412 /// amount type is \p ShiftValueTy. 413 LLVM_READONLY getPreferredShiftAmountTy(LLT ShiftValueTy)414 virtual LLT getPreferredShiftAmountTy(LLT ShiftValueTy) const { 415 return ShiftValueTy; 416 } 417 418 /// Returns the type to be used for the index operand of: 419 /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT, 420 /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR getVectorIdxTy(const DataLayout & DL)421 virtual MVT getVectorIdxTy(const DataLayout &DL) const { 422 return getPointerTy(DL); 423 } 424 425 /// Returns the type to be used for the EVL/AVL operand of VP nodes: 426 /// ISD::VP_ADD, ISD::VP_SUB, etc. It must be a legal scalar integer type, 427 /// and must be at least as large as i32. The EVL is implicitly zero-extended 428 /// to any larger type. getVPExplicitVectorLengthTy()429 virtual MVT getVPExplicitVectorLengthTy() const { return MVT::i32; } 430 431 /// This callback is used to inspect load/store instructions and add 432 /// target-specific MachineMemOperand flags to them. The default 433 /// implementation does nothing. getTargetMMOFlags(const Instruction & I)434 virtual MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const { 435 return MachineMemOperand::MONone; 436 } 437 438 /// This callback is used to inspect load/store SDNode. 439 /// The default implementation does nothing. 440 virtual MachineMemOperand::Flags getTargetMMOFlags(const MemSDNode & Node)441 getTargetMMOFlags(const MemSDNode &Node) const { 442 return MachineMemOperand::MONone; 443 } 444 445 MachineMemOperand::Flags 446 getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, 447 AssumptionCache *AC = nullptr, 448 const TargetLibraryInfo *LibInfo = nullptr) const; 449 MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, 450 const DataLayout &DL) const; 451 MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, 452 const DataLayout &DL) const; 453 isSelectSupported(SelectSupportKind)454 virtual bool isSelectSupported(SelectSupportKind /*kind*/) const { 455 return true; 456 } 457 458 /// Return true if the @llvm.get.active.lane.mask intrinsic should be expanded 459 /// using generic code in SelectionDAGBuilder. shouldExpandGetActiveLaneMask(EVT VT,EVT OpVT)460 virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const { 461 return true; 462 } 463 shouldExpandGetVectorLength(EVT CountVT,unsigned VF,bool IsScalable)464 virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF, 465 bool IsScalable) const { 466 return true; 467 } 468 469 /// Return true if the @llvm.experimental.cttz.elts intrinsic should be 470 /// expanded using generic code in SelectionDAGBuilder. shouldExpandCttzElements(EVT VT)471 virtual bool shouldExpandCttzElements(EVT VT) const { return true; } 472 473 // Return true if op(vecreduce(x), vecreduce(y)) should be reassociated to 474 // vecreduce(op(x, y)) for the reduction opcode RedOpc. shouldReassociateReduction(unsigned RedOpc,EVT VT)475 virtual bool shouldReassociateReduction(unsigned RedOpc, EVT VT) const { 476 return true; 477 } 478 479 /// Return true if it is profitable to convert a select of FP constants into 480 /// a constant pool load whose address depends on the select condition. The 481 /// parameter may be used to differentiate a select with FP compare from 482 /// integer compare. reduceSelectOfFPConstantLoads(EVT CmpOpVT)483 virtual bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const { 484 return true; 485 } 486 487 /// Return true if multiple condition registers are available. hasMultipleConditionRegisters()488 bool hasMultipleConditionRegisters() const { 489 return HasMultipleConditionRegisters; 490 } 491 492 /// Return true if the target has BitExtract instructions. hasExtractBitsInsn()493 bool hasExtractBitsInsn() const { return HasExtractBitsInsn; } 494 495 /// Return the preferred vector type legalization action. 496 virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT)497 getPreferredVectorAction(MVT VT) const { 498 // The default action for one element vectors is to scalarize 499 if (VT.getVectorElementCount().isScalar()) 500 return TypeScalarizeVector; 501 // The default action for an odd-width vector is to widen. 502 if (!VT.isPow2VectorType()) 503 return TypeWidenVector; 504 // The default action for other vectors is to promote 505 return TypePromoteInteger; 506 } 507 508 // Return true if the half type should be passed around as i16, but promoted 509 // to float around arithmetic. The default behavior is to pass around as 510 // float and convert around loads/stores/bitcasts and other places where 511 // the size matters. softPromoteHalfType()512 virtual bool softPromoteHalfType() const { return false; } 513 514 // There are two general methods for expanding a BUILD_VECTOR node: 515 // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle 516 // them together. 517 // 2. Build the vector on the stack and then load it. 518 // If this function returns true, then method (1) will be used, subject to 519 // the constraint that all of the necessary shuffles are legal (as determined 520 // by isShuffleMaskLegal). If this function returns false, then method (2) is 521 // always used. The vector type, and the number of defined values, are 522 // provided. 523 virtual bool shouldExpandBuildVectorWithShuffles(EVT,unsigned DefinedValues)524 shouldExpandBuildVectorWithShuffles(EVT /* VT */, 525 unsigned DefinedValues) const { 526 return DefinedValues < 3; 527 } 528 529 /// Return true if integer divide is usually cheaper than a sequence of 530 /// several shifts, adds, and multiplies for this target. 531 /// The definition of "cheaper" may depend on whether we're optimizing 532 /// for speed or for size. isIntDivCheap(EVT VT,AttributeList Attr)533 virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; } 534 535 /// Return true if the target can handle a standalone remainder operation. hasStandaloneRem(EVT VT)536 virtual bool hasStandaloneRem(EVT VT) const { 537 return true; 538 } 539 540 /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X). isFsqrtCheap(SDValue X,SelectionDAG & DAG)541 virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const { 542 // Default behavior is to replace SQRT(X) with X*RSQRT(X). 543 return false; 544 } 545 546 /// Reciprocal estimate status values used by the functions below. 547 enum ReciprocalEstimate : int { 548 Unspecified = -1, 549 Disabled = 0, 550 Enabled = 1 551 }; 552 553 /// Return a ReciprocalEstimate enum value for a square root of the given type 554 /// based on the function's attributes. If the operation is not overridden by 555 /// the function's attributes, "Unspecified" is returned and target defaults 556 /// are expected to be used for instruction selection. 557 int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const; 558 559 /// Return a ReciprocalEstimate enum value for a division of the given type 560 /// based on the function's attributes. If the operation is not overridden by 561 /// the function's attributes, "Unspecified" is returned and target defaults 562 /// are expected to be used for instruction selection. 563 int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const; 564 565 /// Return the refinement step count for a square root of the given type based 566 /// on the function's attributes. If the operation is not overridden by 567 /// the function's attributes, "Unspecified" is returned and target defaults 568 /// are expected to be used for instruction selection. 569 int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const; 570 571 /// Return the refinement step count for a division of the given type based 572 /// on the function's attributes. If the operation is not overridden by 573 /// the function's attributes, "Unspecified" is returned and target defaults 574 /// are expected to be used for instruction selection. 575 int getDivRefinementSteps(EVT VT, MachineFunction &MF) const; 576 577 /// Returns true if target has indicated at least one type should be bypassed. isSlowDivBypassed()578 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); } 579 580 /// Returns map of slow types for division or remainder with corresponding 581 /// fast types getBypassSlowDivWidths()582 const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const { 583 return BypassSlowDivWidths; 584 } 585 586 /// Return true only if vscale must be a power of two. isVScaleKnownToBeAPowerOfTwo()587 virtual bool isVScaleKnownToBeAPowerOfTwo() const { return false; } 588 589 /// Return true if Flow Control is an expensive operation that should be 590 /// avoided. isJumpExpensive()591 bool isJumpExpensive() const { return JumpIsExpensive; } 592 593 /// Return true if selects are only cheaper than branches if the branch is 594 /// unlikely to be predicted right. isPredictableSelectExpensive()595 bool isPredictableSelectExpensive() const { 596 return PredictableSelectIsExpensive; 597 } 598 fallBackToDAGISel(const Instruction & Inst)599 virtual bool fallBackToDAGISel(const Instruction &Inst) const { 600 return false; 601 } 602 603 /// Return true if the following transform is beneficial: 604 /// fold (conv (load x)) -> (load (conv*)x) 605 /// On architectures that don't natively support some vector loads 606 /// efficiently, casting the load to a smaller vector of larger types and 607 /// loading is more efficient, however, this can be undone by optimizations in 608 /// dag combiner. 609 virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT, 610 const SelectionDAG &DAG, 611 const MachineMemOperand &MMO) const; 612 613 /// Return true if the following transform is beneficial: 614 /// (store (y (conv x)), y*)) -> (store x, (x*)) isStoreBitCastBeneficial(EVT StoreVT,EVT BitcastVT,const SelectionDAG & DAG,const MachineMemOperand & MMO)615 virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT, 616 const SelectionDAG &DAG, 617 const MachineMemOperand &MMO) const { 618 // Default to the same logic as loads. 619 return isLoadBitCastBeneficial(StoreVT, BitcastVT, DAG, MMO); 620 } 621 622 /// Return true if it is expected to be cheaper to do a store of vector 623 /// constant with the given size and type for the address space than to 624 /// store the individual scalar element constants. storeOfVectorConstantIsCheap(bool IsZero,EVT MemVT,unsigned NumElem,unsigned AddrSpace)625 virtual bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT, 626 unsigned NumElem, 627 unsigned AddrSpace) const { 628 return IsZero; 629 } 630 631 /// Allow store merging for the specified type after legalization in addition 632 /// to before legalization. This may transform stores that do not exist 633 /// earlier (for example, stores created from intrinsics). mergeStoresAfterLegalization(EVT MemVT)634 virtual bool mergeStoresAfterLegalization(EVT MemVT) const { 635 return true; 636 } 637 638 /// Returns if it's reasonable to merge stores to MemVT size. canMergeStoresTo(unsigned AS,EVT MemVT,const MachineFunction & MF)639 virtual bool canMergeStoresTo(unsigned AS, EVT MemVT, 640 const MachineFunction &MF) const { 641 return true; 642 } 643 644 /// Return true if it is cheap to speculate a call to intrinsic cttz. isCheapToSpeculateCttz(Type * Ty)645 virtual bool isCheapToSpeculateCttz(Type *Ty) const { 646 return false; 647 } 648 649 /// Return true if it is cheap to speculate a call to intrinsic ctlz. isCheapToSpeculateCtlz(Type * Ty)650 virtual bool isCheapToSpeculateCtlz(Type *Ty) const { 651 return false; 652 } 653 654 /// Return true if ctlz instruction is fast. isCtlzFast()655 virtual bool isCtlzFast() const { 656 return false; 657 } 658 659 /// Return true if ctpop instruction is fast. isCtpopFast(EVT VT)660 virtual bool isCtpopFast(EVT VT) const { 661 return isOperationLegal(ISD::CTPOP, VT); 662 } 663 664 /// Return the maximum number of "x & (x - 1)" operations that can be done 665 /// instead of deferring to a custom CTPOP. getCustomCtpopCost(EVT VT,ISD::CondCode Cond)666 virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const { 667 return 1; 668 } 669 670 /// Return true if instruction generated for equality comparison is folded 671 /// with instruction generated for signed comparison. isEqualityCmpFoldedWithSignedCmp()672 virtual bool isEqualityCmpFoldedWithSignedCmp() const { return true; } 673 674 /// Return true if the heuristic to prefer icmp eq zero should be used in code 675 /// gen prepare. preferZeroCompareBranch()676 virtual bool preferZeroCompareBranch() const { return false; } 677 678 /// Return true if it is cheaper to split the store of a merged int val 679 /// from a pair of smaller values into multiple stores. isMultiStoresCheaperThanBitsMerge(EVT LTy,EVT HTy)680 virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const { 681 return false; 682 } 683 684 /// Return if the target supports combining a 685 /// chain like: 686 /// \code 687 /// %andResult = and %val1, #mask 688 /// %icmpResult = icmp %andResult, 0 689 /// \endcode 690 /// into a single machine instruction of a form like: 691 /// \code 692 /// cc = test %register, #mask 693 /// \endcode isMaskAndCmp0FoldingBeneficial(const Instruction & AndI)694 virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const { 695 return false; 696 } 697 698 /// Return true if it is valid to merge the TargetMMOFlags in two SDNodes. 699 virtual bool areTwoSDNodeTargetMMOFlagsMergeable(const MemSDNode & NodeX,const MemSDNode & NodeY)700 areTwoSDNodeTargetMMOFlagsMergeable(const MemSDNode &NodeX, 701 const MemSDNode &NodeY) const { 702 return true; 703 } 704 705 /// Use bitwise logic to make pairs of compares more efficient. For example: 706 /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0 707 /// This should be true when it takes more than one instruction to lower 708 /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on 709 /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win. convertSetCCLogicToBitwiseLogic(EVT VT)710 virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const { 711 return false; 712 } 713 714 /// Return the preferred operand type if the target has a quick way to compare 715 /// integer values of the given size. Assume that any legal integer type can 716 /// be compared efficiently. Targets may override this to allow illegal wide 717 /// types to return a vector type if there is support to compare that type. hasFastEqualityCompare(unsigned NumBits)718 virtual MVT hasFastEqualityCompare(unsigned NumBits) const { 719 MVT VT = MVT::getIntegerVT(NumBits); 720 return isTypeLegal(VT) ? VT : MVT::INVALID_SIMPLE_VALUE_TYPE; 721 } 722 723 /// Return true if the target should transform: 724 /// (X & Y) == Y ---> (~X & Y) == 0 725 /// (X & Y) != Y ---> (~X & Y) != 0 726 /// 727 /// This may be profitable if the target has a bitwise and-not operation that 728 /// sets comparison flags. A target may want to limit the transformation based 729 /// on the type of Y or if Y is a constant. 730 /// 731 /// Note that the transform will not occur if Y is known to be a power-of-2 732 /// because a mask and compare of a single bit can be handled by inverting the 733 /// predicate, for example: 734 /// (X & 8) == 8 ---> (X & 8) != 0 hasAndNotCompare(SDValue Y)735 virtual bool hasAndNotCompare(SDValue Y) const { 736 return false; 737 } 738 739 /// Return true if the target has a bitwise and-not operation: 740 /// X = ~A & B 741 /// This can be used to simplify select or other instructions. hasAndNot(SDValue X)742 virtual bool hasAndNot(SDValue X) const { 743 // If the target has the more complex version of this operation, assume that 744 // it has this operation too. 745 return hasAndNotCompare(X); 746 } 747 748 /// Return true if the target has a bit-test instruction: 749 /// (X & (1 << Y)) ==/!= 0 750 /// This knowledge can be used to prevent breaking the pattern, 751 /// or creating it if it could be recognized. hasBitTest(SDValue X,SDValue Y)752 virtual bool hasBitTest(SDValue X, SDValue Y) const { return false; } 753 754 /// There are two ways to clear extreme bits (either low or high): 755 /// Mask: x & (-1 << y) (the instcombine canonical form) 756 /// Shifts: x >> y << y 757 /// Return true if the variant with 2 variable shifts is preferred. 758 /// Return false if there is no preference. shouldFoldMaskToVariableShiftPair(SDValue X)759 virtual bool shouldFoldMaskToVariableShiftPair(SDValue X) const { 760 // By default, let's assume that no one prefers shifts. 761 return false; 762 } 763 764 /// Return true if it is profitable to fold a pair of shifts into a mask. 765 /// This is usually true on most targets. But some targets, like Thumb1, 766 /// have immediate shift instructions, but no immediate "and" instruction; 767 /// this makes the fold unprofitable. shouldFoldConstantShiftPairToMask(const SDNode * N,CombineLevel Level)768 virtual bool shouldFoldConstantShiftPairToMask(const SDNode *N, 769 CombineLevel Level) const { 770 return true; 771 } 772 773 /// Should we tranform the IR-optimal check for whether given truncation 774 /// down into KeptBits would be truncating or not: 775 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits) 776 /// Into it's more traditional form: 777 /// ((%x << C) a>> C) dstcond %x 778 /// Return true if we should transform. 779 /// Return false if there is no preference. shouldTransformSignedTruncationCheck(EVT XVT,unsigned KeptBits)780 virtual bool shouldTransformSignedTruncationCheck(EVT XVT, 781 unsigned KeptBits) const { 782 // By default, let's assume that no one prefers shifts. 783 return false; 784 } 785 786 /// Given the pattern 787 /// (X & (C l>>/<< Y)) ==/!= 0 788 /// return true if it should be transformed into: 789 /// ((X <</l>> Y) & C) ==/!= 0 790 /// WARNING: if 'X' is a constant, the fold may deadlock! 791 /// FIXME: we could avoid passing XC, but we can't use isConstOrConstSplat() 792 /// here because it can end up being not linked in. shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X,ConstantSDNode * XC,ConstantSDNode * CC,SDValue Y,unsigned OldShiftOpcode,unsigned NewShiftOpcode,SelectionDAG & DAG)793 virtual bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd( 794 SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, 795 unsigned OldShiftOpcode, unsigned NewShiftOpcode, 796 SelectionDAG &DAG) const { 797 if (hasBitTest(X, Y)) { 798 // One interesting pattern that we'd want to form is 'bit test': 799 // ((1 << Y) & C) ==/!= 0 800 // But we also need to be careful not to try to reverse that fold. 801 802 // Is this '1 << Y' ? 803 if (OldShiftOpcode == ISD::SHL && CC->isOne()) 804 return false; // Keep the 'bit test' pattern. 805 806 // Will it be '1 << Y' after the transform ? 807 if (XC && NewShiftOpcode == ISD::SHL && XC->isOne()) 808 return true; // Do form the 'bit test' pattern. 809 } 810 811 // If 'X' is a constant, and we transform, then we will immediately 812 // try to undo the fold, thus causing endless combine loop. 813 // So by default, let's assume everyone prefers the fold 814 // iff 'X' is not a constant. 815 return !XC; 816 } 817 818 // Return true if its desirable to perform the following transform: 819 // (fmul C, (uitofp Pow2)) 820 // -> (bitcast_to_FP (add (bitcast_to_INT C), Log2(Pow2) << mantissa)) 821 // (fdiv C, (uitofp Pow2)) 822 // -> (bitcast_to_FP (sub (bitcast_to_INT C), Log2(Pow2) << mantissa)) 823 // 824 // This is only queried after we have verified the transform will be bitwise 825 // equals. 826 // 827 // SDNode *N : The FDiv/FMul node we want to transform. 828 // SDValue FPConst: The Float constant operand in `N`. 829 // SDValue IntPow2: The Integer power of 2 operand in `N`. optimizeFMulOrFDivAsShiftAddBitcast(SDNode * N,SDValue FPConst,SDValue IntPow2)830 virtual bool optimizeFMulOrFDivAsShiftAddBitcast(SDNode *N, SDValue FPConst, 831 SDValue IntPow2) const { 832 // Default to avoiding fdiv which is often very expensive. 833 return N->getOpcode() == ISD::FDIV; 834 } 835 836 // Given: 837 // (icmp eq/ne (and X, C0), (shift X, C1)) 838 // or 839 // (icmp eq/ne X, (rotate X, CPow2)) 840 841 // If C0 is a mask or shifted mask and the shift amt (C1) isolates the 842 // remaining bits (i.e something like `(x64 & UINT32_MAX) == (x64 >> 32)`) 843 // Do we prefer the shift to be shift-right, shift-left, or rotate. 844 // Note: Its only valid to convert the rotate version to the shift version iff 845 // the shift-amt (`C1`) is a power of 2 (including 0). 846 // If ShiftOpc (current Opcode) is returned, do nothing. preferedOpcodeForCmpEqPiecesOfOperand(EVT VT,unsigned ShiftOpc,bool MayTransformRotate,const APInt & ShiftOrRotateAmt,const std::optional<APInt> & AndMask)847 virtual unsigned preferedOpcodeForCmpEqPiecesOfOperand( 848 EVT VT, unsigned ShiftOpc, bool MayTransformRotate, 849 const APInt &ShiftOrRotateAmt, 850 const std::optional<APInt> &AndMask) const { 851 return ShiftOpc; 852 } 853 854 /// These two forms are equivalent: 855 /// sub %y, (xor %x, -1) 856 /// add (add %x, 1), %y 857 /// The variant with two add's is IR-canonical. 858 /// Some targets may prefer one to the other. preferIncOfAddToSubOfNot(EVT VT)859 virtual bool preferIncOfAddToSubOfNot(EVT VT) const { 860 // By default, let's assume that everyone prefers the form with two add's. 861 return true; 862 } 863 864 // By default prefer folding (abs (sub nsw x, y)) -> abds(x, y). Some targets 865 // may want to avoid this to prevent loss of sub_nsw pattern. preferABDSToABSWithNSW(EVT VT)866 virtual bool preferABDSToABSWithNSW(EVT VT) const { 867 return true; 868 } 869 870 // Return true if the target wants to transform Op(Splat(X)) -> Splat(Op(X)) preferScalarizeSplat(SDNode * N)871 virtual bool preferScalarizeSplat(SDNode *N) const { return true; } 872 873 // Return true if the target wants to transform: 874 // (TruncVT truncate(sext_in_reg(VT X, ExtVT)) 875 // -> (TruncVT sext_in_reg(truncate(VT X), ExtVT)) 876 // Some targets might prefer pre-sextinreg to improve truncation/saturation. preferSextInRegOfTruncate(EVT TruncVT,EVT VT,EVT ExtVT)877 virtual bool preferSextInRegOfTruncate(EVT TruncVT, EVT VT, EVT ExtVT) const { 878 return true; 879 } 880 881 /// Return true if the target wants to use the optimization that 882 /// turns ext(promotableInst1(...(promotableInstN(load)))) into 883 /// promotedInst1(...(promotedInstN(ext(load)))). enableExtLdPromotion()884 bool enableExtLdPromotion() const { return EnableExtLdPromotion; } 885 886 /// Return true if the target can combine store(extractelement VectorTy, 887 /// Idx). 888 /// \p Cost[out] gives the cost of that transformation when this is true. canCombineStoreAndExtract(Type * VectorTy,Value * Idx,unsigned & Cost)889 virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, 890 unsigned &Cost) const { 891 return false; 892 } 893 894 /// Return true if the target shall perform extract vector element and store 895 /// given that the vector is known to be splat of constant. 896 /// \p Index[out] gives the index of the vector element to be extracted when 897 /// this is true. shallExtractConstSplatVectorElementToStore(Type * VectorTy,unsigned ElemSizeInBits,unsigned & Index)898 virtual bool shallExtractConstSplatVectorElementToStore( 899 Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const { 900 return false; 901 } 902 903 /// Return true if inserting a scalar into a variable element of an undef 904 /// vector is more efficiently handled by splatting the scalar instead. shouldSplatInsEltVarIndex(EVT)905 virtual bool shouldSplatInsEltVarIndex(EVT) const { 906 return false; 907 } 908 909 /// Return true if target always benefits from combining into FMA for a 910 /// given value type. This must typically return false on targets where FMA 911 /// takes more cycles to execute than FADD. enableAggressiveFMAFusion(EVT VT)912 virtual bool enableAggressiveFMAFusion(EVT VT) const { return false; } 913 914 /// Return true if target always benefits from combining into FMA for a 915 /// given value type. This must typically return false on targets where FMA 916 /// takes more cycles to execute than FADD. enableAggressiveFMAFusion(LLT Ty)917 virtual bool enableAggressiveFMAFusion(LLT Ty) const { return false; } 918 919 /// Return the ValueType of the result of SETCC operations. 920 virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, 921 EVT VT) const; 922 923 /// Return the ValueType for comparison libcalls. Comparison libcalls include 924 /// floating point comparison calls, and Ordered/Unordered check calls on 925 /// floating point numbers. 926 virtual 927 MVT::SimpleValueType getCmpLibcallReturnType() const; 928 929 /// For targets without i1 registers, this gives the nature of the high-bits 930 /// of boolean values held in types wider than i1. 931 /// 932 /// "Boolean values" are special true/false values produced by nodes like 933 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND. 934 /// Not to be confused with general values promoted from i1. Some cpus 935 /// distinguish between vectors of boolean and scalars; the isVec parameter 936 /// selects between the two kinds. For example on X86 a scalar boolean should 937 /// be zero extended from i1, while the elements of a vector of booleans 938 /// should be sign extended from i1. 939 /// 940 /// Some cpus also treat floating point types the same way as they treat 941 /// vectors instead of the way they treat scalars. getBooleanContents(bool isVec,bool isFloat)942 BooleanContent getBooleanContents(bool isVec, bool isFloat) const { 943 if (isVec) 944 return BooleanVectorContents; 945 return isFloat ? BooleanFloatContents : BooleanContents; 946 } 947 getBooleanContents(EVT Type)948 BooleanContent getBooleanContents(EVT Type) const { 949 return getBooleanContents(Type.isVector(), Type.isFloatingPoint()); 950 } 951 952 /// Promote the given target boolean to a target boolean of the given type. 953 /// A target boolean is an integer value, not necessarily of type i1, the bits 954 /// of which conform to getBooleanContents. 955 /// 956 /// ValVT is the type of values that produced the boolean. promoteTargetBoolean(SelectionDAG & DAG,SDValue Bool,EVT ValVT)957 SDValue promoteTargetBoolean(SelectionDAG &DAG, SDValue Bool, 958 EVT ValVT) const { 959 SDLoc dl(Bool); 960 EVT BoolVT = 961 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ValVT); 962 ISD::NodeType ExtendCode = getExtendForContent(getBooleanContents(ValVT)); 963 return DAG.getNode(ExtendCode, dl, BoolVT, Bool); 964 } 965 966 /// Return target scheduling preference. getSchedulingPreference()967 Sched::Preference getSchedulingPreference() const { 968 return SchedPreferenceInfo; 969 } 970 971 /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics 972 /// for different nodes. This function returns the preference (or none) for 973 /// the given node. getSchedulingPreference(SDNode *)974 virtual Sched::Preference getSchedulingPreference(SDNode *) const { 975 return Sched::None; 976 } 977 978 /// Return the register class that should be used for the specified value 979 /// type. 980 virtual const TargetRegisterClass *getRegClassFor(MVT VT, bool isDivergent = false) const { 981 (void)isDivergent; 982 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy]; 983 assert(RC && "This value type is not natively supported!"); 984 return RC; 985 } 986 987 /// Allows target to decide about the register class of the 988 /// specific value that is live outside the defining block. 989 /// Returns true if the value needs uniform register class. requiresUniformRegister(MachineFunction & MF,const Value *)990 virtual bool requiresUniformRegister(MachineFunction &MF, 991 const Value *) const { 992 return false; 993 } 994 995 /// Return the 'representative' register class for the specified value 996 /// type. 997 /// 998 /// The 'representative' register class is the largest legal super-reg 999 /// register class for the register class of the value type. For example, on 1000 /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep 1001 /// register class is GR64 on x86_64. getRepRegClassFor(MVT VT)1002 virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const { 1003 const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy]; 1004 return RC; 1005 } 1006 1007 /// Return the cost of the 'representative' register class for the specified 1008 /// value type. getRepRegClassCostFor(MVT VT)1009 virtual uint8_t getRepRegClassCostFor(MVT VT) const { 1010 return RepRegClassCostForVT[VT.SimpleTy]; 1011 } 1012 1013 /// Return the preferred strategy to legalize tihs SHIFT instruction, with 1014 /// \p ExpansionFactor being the recursion depth - how many expansion needed. 1015 enum class ShiftLegalizationStrategy { 1016 ExpandToParts, 1017 ExpandThroughStack, 1018 LowerToLibcall 1019 }; 1020 virtual ShiftLegalizationStrategy preferredShiftLegalizationStrategy(SelectionDAG & DAG,SDNode * N,unsigned ExpansionFactor)1021 preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N, 1022 unsigned ExpansionFactor) const { 1023 if (ExpansionFactor == 1) 1024 return ShiftLegalizationStrategy::ExpandToParts; 1025 return ShiftLegalizationStrategy::ExpandThroughStack; 1026 } 1027 1028 /// Return true if the target has native support for the specified value type. 1029 /// This means that it has a register that directly holds it without 1030 /// promotions or expansions. isTypeLegal(EVT VT)1031 bool isTypeLegal(EVT VT) const { 1032 assert(!VT.isSimple() || 1033 (unsigned)VT.getSimpleVT().SimpleTy < std::size(RegClassForVT)); 1034 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr; 1035 } 1036 1037 class ValueTypeActionImpl { 1038 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum 1039 /// that indicates how instruction selection should deal with the type. 1040 LegalizeTypeAction ValueTypeActions[MVT::VALUETYPE_SIZE]; 1041 1042 public: ValueTypeActionImpl()1043 ValueTypeActionImpl() { 1044 std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions), 1045 TypeLegal); 1046 } 1047 getTypeAction(MVT VT)1048 LegalizeTypeAction getTypeAction(MVT VT) const { 1049 return ValueTypeActions[VT.SimpleTy]; 1050 } 1051 setTypeAction(MVT VT,LegalizeTypeAction Action)1052 void setTypeAction(MVT VT, LegalizeTypeAction Action) { 1053 ValueTypeActions[VT.SimpleTy] = Action; 1054 } 1055 }; 1056 getValueTypeActions()1057 const ValueTypeActionImpl &getValueTypeActions() const { 1058 return ValueTypeActions; 1059 } 1060 1061 /// Return pair that represents the legalization kind (first) that needs to 1062 /// happen to EVT (second) in order to type-legalize it. 1063 /// 1064 /// First: how we should legalize values of this type, either it is already 1065 /// legal (return 'Legal') or we need to promote it to a larger type (return 1066 /// 'Promote'), or we need to expand it into multiple registers of smaller 1067 /// integer type (return 'Expand'). 'Custom' is not an option. 1068 /// 1069 /// Second: for types supported by the target, this is an identity function. 1070 /// For types that must be promoted to larger types, this returns the larger 1071 /// type to promote to. For integer types that are larger than the largest 1072 /// integer register, this contains one step in the expansion to get to the 1073 /// smaller register. For illegal floating point types, this returns the 1074 /// integer type to transform to. 1075 LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const; 1076 1077 /// Return how we should legalize values of this type, either it is already 1078 /// legal (return 'Legal') or we need to promote it to a larger type (return 1079 /// 'Promote'), or we need to expand it into multiple registers of smaller 1080 /// integer type (return 'Expand'). 'Custom' is not an option. getTypeAction(LLVMContext & Context,EVT VT)1081 LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const { 1082 return getTypeConversion(Context, VT).first; 1083 } getTypeAction(MVT VT)1084 LegalizeTypeAction getTypeAction(MVT VT) const { 1085 return ValueTypeActions.getTypeAction(VT); 1086 } 1087 1088 /// For types supported by the target, this is an identity function. For 1089 /// types that must be promoted to larger types, this returns the larger type 1090 /// to promote to. For integer types that are larger than the largest integer 1091 /// register, this contains one step in the expansion to get to the smaller 1092 /// register. For illegal floating point types, this returns the integer type 1093 /// to transform to. getTypeToTransformTo(LLVMContext & Context,EVT VT)1094 virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const { 1095 return getTypeConversion(Context, VT).second; 1096 } 1097 1098 /// For types supported by the target, this is an identity function. For 1099 /// types that must be expanded (i.e. integer types that are larger than the 1100 /// largest integer register or illegal floating point types), this returns 1101 /// the largest legal type it will be expanded to. getTypeToExpandTo(LLVMContext & Context,EVT VT)1102 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const { 1103 assert(!VT.isVector()); 1104 while (true) { 1105 switch (getTypeAction(Context, VT)) { 1106 case TypeLegal: 1107 return VT; 1108 case TypeExpandInteger: 1109 VT = getTypeToTransformTo(Context, VT); 1110 break; 1111 default: 1112 llvm_unreachable("Type is not legal nor is it to be expanded!"); 1113 } 1114 } 1115 } 1116 1117 /// Vector types are broken down into some number of legal first class types. 1118 /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8 1119 /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64 1120 /// turns into 4 EVT::i32 values with both PPC and X86. 1121 /// 1122 /// This method returns the number of registers needed, and the VT for each 1123 /// register. It also returns the VT and quantity of the intermediate values 1124 /// before they are promoted/expanded. 1125 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, 1126 EVT &IntermediateVT, 1127 unsigned &NumIntermediates, 1128 MVT &RegisterVT) const; 1129 1130 /// Certain targets such as MIPS require that some types such as vectors are 1131 /// always broken down into scalars in some contexts. This occurs even if the 1132 /// vector type is legal. getVectorTypeBreakdownForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT,EVT & IntermediateVT,unsigned & NumIntermediates,MVT & RegisterVT)1133 virtual unsigned getVectorTypeBreakdownForCallingConv( 1134 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, 1135 unsigned &NumIntermediates, MVT &RegisterVT) const { 1136 return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates, 1137 RegisterVT); 1138 } 1139 1140 struct IntrinsicInfo { 1141 unsigned opc = 0; // target opcode 1142 EVT memVT; // memory VT 1143 1144 // value representing memory location 1145 PointerUnion<const Value *, const PseudoSourceValue *> ptrVal; 1146 1147 // Fallback address space for use if ptrVal is nullptr. std::nullopt means 1148 // unknown address space. 1149 std::optional<unsigned> fallbackAddressSpace; 1150 1151 int offset = 0; // offset off of ptrVal 1152 uint64_t size = 0; // the size of the memory location 1153 // (taken from memVT if zero) 1154 MaybeAlign align = Align(1); // alignment 1155 1156 MachineMemOperand::Flags flags = MachineMemOperand::MONone; 1157 IntrinsicInfo() = default; 1158 }; 1159 1160 /// Given an intrinsic, checks if on the target the intrinsic will need to map 1161 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns 1162 /// true and store the intrinsic information into the IntrinsicInfo that was 1163 /// passed to the function. getTgtMemIntrinsic(IntrinsicInfo &,const CallInst &,MachineFunction &,unsigned)1164 virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, 1165 MachineFunction &, 1166 unsigned /*Intrinsic*/) const { 1167 return false; 1168 } 1169 1170 /// Returns true if the target can instruction select the specified FP 1171 /// immediate natively. If false, the legalizer will materialize the FP 1172 /// immediate as a load from a constant pool. 1173 virtual bool isFPImmLegal(const APFloat & /*Imm*/, EVT /*VT*/, 1174 bool ForCodeSize = false) const { 1175 return false; 1176 } 1177 1178 /// Targets can use this to indicate that they only support *some* 1179 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a 1180 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be 1181 /// legal. isShuffleMaskLegal(ArrayRef<int>,EVT)1182 virtual bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const { 1183 return true; 1184 } 1185 1186 /// Returns true if the operation can trap for the value type. 1187 /// 1188 /// VT must be a legal type. By default, we optimistically assume most 1189 /// operations don't trap except for integer divide and remainder. 1190 virtual bool canOpTrap(unsigned Op, EVT VT) const; 1191 1192 /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there 1193 /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a 1194 /// constant pool entry. isVectorClearMaskLegal(ArrayRef<int>,EVT)1195 virtual bool isVectorClearMaskLegal(ArrayRef<int> /*Mask*/, 1196 EVT /*VT*/) const { 1197 return false; 1198 } 1199 1200 /// How to legalize this custom operation? getCustomOperationAction(SDNode & Op)1201 virtual LegalizeAction getCustomOperationAction(SDNode &Op) const { 1202 return Legal; 1203 } 1204 1205 /// Return how this operation should be treated: either it is legal, needs to 1206 /// be promoted to a larger size, needs to be expanded to some other code 1207 /// sequence, or the target has a custom expander for it. getOperationAction(unsigned Op,EVT VT)1208 LegalizeAction getOperationAction(unsigned Op, EVT VT) const { 1209 if (VT.isExtended()) return Expand; 1210 // If a target-specific SDNode requires legalization, require the target 1211 // to provide custom legalization for it. 1212 if (Op >= std::size(OpActions[0])) 1213 return Custom; 1214 return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op]; 1215 } 1216 1217 /// Custom method defined by each target to indicate if an operation which 1218 /// may require a scale is supported natively by the target. 1219 /// If not, the operation is illegal. isSupportedFixedPointOperation(unsigned Op,EVT VT,unsigned Scale)1220 virtual bool isSupportedFixedPointOperation(unsigned Op, EVT VT, 1221 unsigned Scale) const { 1222 return false; 1223 } 1224 1225 /// Some fixed point operations may be natively supported by the target but 1226 /// only for specific scales. This method allows for checking 1227 /// if the width is supported by the target for a given operation that may 1228 /// depend on scale. getFixedPointOperationAction(unsigned Op,EVT VT,unsigned Scale)1229 LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, 1230 unsigned Scale) const { 1231 auto Action = getOperationAction(Op, VT); 1232 if (Action != Legal) 1233 return Action; 1234 1235 // This operation is supported in this type but may only work on specific 1236 // scales. 1237 bool Supported; 1238 switch (Op) { 1239 default: 1240 llvm_unreachable("Unexpected fixed point operation."); 1241 case ISD::SMULFIX: 1242 case ISD::SMULFIXSAT: 1243 case ISD::UMULFIX: 1244 case ISD::UMULFIXSAT: 1245 case ISD::SDIVFIX: 1246 case ISD::SDIVFIXSAT: 1247 case ISD::UDIVFIX: 1248 case ISD::UDIVFIXSAT: 1249 Supported = isSupportedFixedPointOperation(Op, VT, Scale); 1250 break; 1251 } 1252 1253 return Supported ? Action : Expand; 1254 } 1255 1256 // If Op is a strict floating-point operation, return the result 1257 // of getOperationAction for the equivalent non-strict operation. getStrictFPOperationAction(unsigned Op,EVT VT)1258 LegalizeAction getStrictFPOperationAction(unsigned Op, EVT VT) const { 1259 unsigned EqOpc; 1260 switch (Op) { 1261 default: llvm_unreachable("Unexpected FP pseudo-opcode"); 1262 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 1263 case ISD::STRICT_##DAGN: EqOpc = ISD::DAGN; break; 1264 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 1265 case ISD::STRICT_##DAGN: EqOpc = ISD::SETCC; break; 1266 #include "llvm/IR/ConstrainedOps.def" 1267 } 1268 1269 return getOperationAction(EqOpc, VT); 1270 } 1271 1272 /// Return true if the specified operation is legal on this target or can be 1273 /// made legal with custom lowering. This is used to help guide high-level 1274 /// lowering decisions. LegalOnly is an optional convenience for code paths 1275 /// traversed pre and post legalisation. 1276 bool isOperationLegalOrCustom(unsigned Op, EVT VT, 1277 bool LegalOnly = false) const { 1278 if (LegalOnly) 1279 return isOperationLegal(Op, VT); 1280 1281 return (VT == MVT::Other || isTypeLegal(VT)) && 1282 (getOperationAction(Op, VT) == Legal || 1283 getOperationAction(Op, VT) == Custom); 1284 } 1285 1286 /// Return true if the specified operation is legal on this target or can be 1287 /// made legal using promotion. This is used to help guide high-level lowering 1288 /// decisions. LegalOnly is an optional convenience for code paths traversed 1289 /// pre and post legalisation. 1290 bool isOperationLegalOrPromote(unsigned Op, EVT VT, 1291 bool LegalOnly = false) const { 1292 if (LegalOnly) 1293 return isOperationLegal(Op, VT); 1294 1295 return (VT == MVT::Other || isTypeLegal(VT)) && 1296 (getOperationAction(Op, VT) == Legal || 1297 getOperationAction(Op, VT) == Promote); 1298 } 1299 1300 /// Return true if the specified operation is legal on this target or can be 1301 /// made legal with custom lowering or using promotion. This is used to help 1302 /// guide high-level lowering decisions. LegalOnly is an optional convenience 1303 /// for code paths traversed pre and post legalisation. 1304 bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, 1305 bool LegalOnly = false) const { 1306 if (LegalOnly) 1307 return isOperationLegal(Op, VT); 1308 1309 return (VT == MVT::Other || isTypeLegal(VT)) && 1310 (getOperationAction(Op, VT) == Legal || 1311 getOperationAction(Op, VT) == Custom || 1312 getOperationAction(Op, VT) == Promote); 1313 } 1314 1315 /// Return true if the operation uses custom lowering, regardless of whether 1316 /// the type is legal or not. isOperationCustom(unsigned Op,EVT VT)1317 bool isOperationCustom(unsigned Op, EVT VT) const { 1318 return getOperationAction(Op, VT) == Custom; 1319 } 1320 1321 /// Return true if lowering to a jump table is allowed. areJTsAllowed(const Function * Fn)1322 virtual bool areJTsAllowed(const Function *Fn) const { 1323 if (Fn->getFnAttribute("no-jump-tables").getValueAsBool()) 1324 return false; 1325 1326 return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) || 1327 isOperationLegalOrCustom(ISD::BRIND, MVT::Other); 1328 } 1329 1330 /// Check whether the range [Low,High] fits in a machine word. rangeFitsInWord(const APInt & Low,const APInt & High,const DataLayout & DL)1331 bool rangeFitsInWord(const APInt &Low, const APInt &High, 1332 const DataLayout &DL) const { 1333 // FIXME: Using the pointer type doesn't seem ideal. 1334 uint64_t BW = DL.getIndexSizeInBits(0u); 1335 uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1; 1336 return Range <= BW; 1337 } 1338 1339 /// Return true if lowering to a jump table is suitable for a set of case 1340 /// clusters which may contain \p NumCases cases, \p Range range of values. 1341 virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, 1342 uint64_t Range, ProfileSummaryInfo *PSI, 1343 BlockFrequencyInfo *BFI) const; 1344 1345 /// Returns preferred type for switch condition. 1346 virtual MVT getPreferredSwitchConditionType(LLVMContext &Context, 1347 EVT ConditionVT) const; 1348 1349 /// Return true if lowering to a bit test is suitable for a set of case 1350 /// clusters which contains \p NumDests unique destinations, \p Low and 1351 /// \p High as its lowest and highest case values, and expects \p NumCmps 1352 /// case value comparisons. Check if the number of destinations, comparison 1353 /// metric, and range are all suitable. isSuitableForBitTests(unsigned NumDests,unsigned NumCmps,const APInt & Low,const APInt & High,const DataLayout & DL)1354 bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps, 1355 const APInt &Low, const APInt &High, 1356 const DataLayout &DL) const { 1357 // FIXME: I don't think NumCmps is the correct metric: a single case and a 1358 // range of cases both require only one branch to lower. Just looking at the 1359 // number of clusters and destinations should be enough to decide whether to 1360 // build bit tests. 1361 1362 // To lower a range with bit tests, the range must fit the bitwidth of a 1363 // machine word. 1364 if (!rangeFitsInWord(Low, High, DL)) 1365 return false; 1366 1367 // Decide whether it's profitable to lower this range with bit tests. Each 1368 // destination requires a bit test and branch, and there is an overall range 1369 // check branch. For a small number of clusters, separate comparisons might 1370 // be cheaper, and for many destinations, splitting the range might be 1371 // better. 1372 return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) || 1373 (NumDests == 3 && NumCmps >= 6); 1374 } 1375 1376 /// Return true if the specified operation is illegal on this target or 1377 /// unlikely to be made legal with custom lowering. This is used to help guide 1378 /// high-level lowering decisions. isOperationExpand(unsigned Op,EVT VT)1379 bool isOperationExpand(unsigned Op, EVT VT) const { 1380 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand); 1381 } 1382 1383 /// Return true if the specified operation is legal on this target. isOperationLegal(unsigned Op,EVT VT)1384 bool isOperationLegal(unsigned Op, EVT VT) const { 1385 return (VT == MVT::Other || isTypeLegal(VT)) && 1386 getOperationAction(Op, VT) == Legal; 1387 } 1388 1389 /// Return how this load with extension should be treated: either it is legal, 1390 /// needs to be promoted to a larger size, needs to be expanded to some other 1391 /// code sequence, or the target has a custom expander for it. getLoadExtAction(unsigned ExtType,EVT ValVT,EVT MemVT)1392 LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, 1393 EVT MemVT) const { 1394 if (ValVT.isExtended() || MemVT.isExtended()) return Expand; 1395 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy; 1396 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy; 1397 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::VALUETYPE_SIZE && 1398 MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!"); 1399 unsigned Shift = 4 * ExtType; 1400 return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf); 1401 } 1402 1403 /// Return true if the specified load with extension is legal on this target. isLoadExtLegal(unsigned ExtType,EVT ValVT,EVT MemVT)1404 bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const { 1405 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal; 1406 } 1407 1408 /// Return true if the specified load with extension is legal or custom 1409 /// on this target. isLoadExtLegalOrCustom(unsigned ExtType,EVT ValVT,EVT MemVT)1410 bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const { 1411 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal || 1412 getLoadExtAction(ExtType, ValVT, MemVT) == Custom; 1413 } 1414 1415 /// Return how this store with truncation should be treated: either it is 1416 /// legal, needs to be promoted to a larger size, needs to be expanded to some 1417 /// other code sequence, or the target has a custom expander for it. getTruncStoreAction(EVT ValVT,EVT MemVT)1418 LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const { 1419 if (ValVT.isExtended() || MemVT.isExtended()) return Expand; 1420 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy; 1421 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy; 1422 assert(ValI < MVT::VALUETYPE_SIZE && MemI < MVT::VALUETYPE_SIZE && 1423 "Table isn't big enough!"); 1424 return TruncStoreActions[ValI][MemI]; 1425 } 1426 1427 /// Return true if the specified store with truncation is legal on this 1428 /// target. isTruncStoreLegal(EVT ValVT,EVT MemVT)1429 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const { 1430 return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal; 1431 } 1432 1433 /// Return true if the specified store with truncation has solution on this 1434 /// target. isTruncStoreLegalOrCustom(EVT ValVT,EVT MemVT)1435 bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const { 1436 return isTypeLegal(ValVT) && 1437 (getTruncStoreAction(ValVT, MemVT) == Legal || 1438 getTruncStoreAction(ValVT, MemVT) == Custom); 1439 } 1440 canCombineTruncStore(EVT ValVT,EVT MemVT,bool LegalOnly)1441 virtual bool canCombineTruncStore(EVT ValVT, EVT MemVT, 1442 bool LegalOnly) const { 1443 if (LegalOnly) 1444 return isTruncStoreLegal(ValVT, MemVT); 1445 1446 return isTruncStoreLegalOrCustom(ValVT, MemVT); 1447 } 1448 1449 /// Return how the indexed load should be treated: either it is legal, needs 1450 /// to be promoted to a larger size, needs to be expanded to some other code 1451 /// sequence, or the target has a custom expander for it. getIndexedLoadAction(unsigned IdxMode,MVT VT)1452 LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const { 1453 return getIndexedModeAction(IdxMode, VT, IMAB_Load); 1454 } 1455 1456 /// Return true if the specified indexed load is legal on this target. isIndexedLoadLegal(unsigned IdxMode,EVT VT)1457 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const { 1458 return VT.isSimple() && 1459 (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal || 1460 getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom); 1461 } 1462 1463 /// Return how the indexed store should be treated: either it is legal, needs 1464 /// to be promoted to a larger size, needs to be expanded to some other code 1465 /// sequence, or the target has a custom expander for it. getIndexedStoreAction(unsigned IdxMode,MVT VT)1466 LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const { 1467 return getIndexedModeAction(IdxMode, VT, IMAB_Store); 1468 } 1469 1470 /// Return true if the specified indexed load is legal on this target. isIndexedStoreLegal(unsigned IdxMode,EVT VT)1471 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const { 1472 return VT.isSimple() && 1473 (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal || 1474 getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom); 1475 } 1476 1477 /// Return how the indexed load should be treated: either it is legal, needs 1478 /// to be promoted to a larger size, needs to be expanded to some other code 1479 /// sequence, or the target has a custom expander for it. getIndexedMaskedLoadAction(unsigned IdxMode,MVT VT)1480 LegalizeAction getIndexedMaskedLoadAction(unsigned IdxMode, MVT VT) const { 1481 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad); 1482 } 1483 1484 /// Return true if the specified indexed load is legal on this target. isIndexedMaskedLoadLegal(unsigned IdxMode,EVT VT)1485 bool isIndexedMaskedLoadLegal(unsigned IdxMode, EVT VT) const { 1486 return VT.isSimple() && 1487 (getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Legal || 1488 getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Custom); 1489 } 1490 1491 /// Return how the indexed store should be treated: either it is legal, needs 1492 /// to be promoted to a larger size, needs to be expanded to some other code 1493 /// sequence, or the target has a custom expander for it. getIndexedMaskedStoreAction(unsigned IdxMode,MVT VT)1494 LegalizeAction getIndexedMaskedStoreAction(unsigned IdxMode, MVT VT) const { 1495 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedStore); 1496 } 1497 1498 /// Return true if the specified indexed load is legal on this target. isIndexedMaskedStoreLegal(unsigned IdxMode,EVT VT)1499 bool isIndexedMaskedStoreLegal(unsigned IdxMode, EVT VT) const { 1500 return VT.isSimple() && 1501 (getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Legal || 1502 getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Custom); 1503 } 1504 1505 /// Returns true if the index type for a masked gather/scatter requires 1506 /// extending shouldExtendGSIndex(EVT VT,EVT & EltTy)1507 virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const { return false; } 1508 1509 // Returns true if Extend can be folded into the index of a masked gathers/scatters 1510 // on this target. shouldRemoveExtendFromGSIndex(SDValue Extend,EVT DataVT)1511 virtual bool shouldRemoveExtendFromGSIndex(SDValue Extend, EVT DataVT) const { 1512 return false; 1513 } 1514 1515 // Return true if the target supports a scatter/gather instruction with 1516 // indices which are scaled by the particular value. Note that all targets 1517 // must by definition support scale of 1. isLegalScaleForGatherScatter(uint64_t Scale,uint64_t ElemSize)1518 virtual bool isLegalScaleForGatherScatter(uint64_t Scale, 1519 uint64_t ElemSize) const { 1520 // MGATHER/MSCATTER are only required to support scaling by one or by the 1521 // element size. 1522 if (Scale != ElemSize && Scale != 1) 1523 return false; 1524 return true; 1525 } 1526 1527 /// Return how the condition code should be treated: either it is legal, needs 1528 /// to be expanded to some other code sequence, or the target has a custom 1529 /// expander for it. 1530 LegalizeAction getCondCodeAction(ISD::CondCode CC,MVT VT)1531 getCondCodeAction(ISD::CondCode CC, MVT VT) const { 1532 assert((unsigned)CC < std::size(CondCodeActions) && 1533 ((unsigned)VT.SimpleTy >> 3) < std::size(CondCodeActions[0]) && 1534 "Table isn't big enough!"); 1535 // See setCondCodeAction for how this is encoded. 1536 uint32_t Shift = 4 * (VT.SimpleTy & 0x7); 1537 uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3]; 1538 LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF); 1539 assert(Action != Promote && "Can't promote condition code!"); 1540 return Action; 1541 } 1542 1543 /// Return true if the specified condition code is legal on this target. isCondCodeLegal(ISD::CondCode CC,MVT VT)1544 bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const { 1545 return getCondCodeAction(CC, VT) == Legal; 1546 } 1547 1548 /// Return true if the specified condition code is legal or custom on this 1549 /// target. isCondCodeLegalOrCustom(ISD::CondCode CC,MVT VT)1550 bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const { 1551 return getCondCodeAction(CC, VT) == Legal || 1552 getCondCodeAction(CC, VT) == Custom; 1553 } 1554 1555 /// If the action for this operation is to promote, this method returns the 1556 /// ValueType to promote to. getTypeToPromoteTo(unsigned Op,MVT VT)1557 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const { 1558 assert(getOperationAction(Op, VT) == Promote && 1559 "This operation isn't promoted!"); 1560 1561 // See if this has an explicit type specified. 1562 std::map<std::pair<unsigned, MVT::SimpleValueType>, 1563 MVT::SimpleValueType>::const_iterator PTTI = 1564 PromoteToType.find(std::make_pair(Op, VT.SimpleTy)); 1565 if (PTTI != PromoteToType.end()) return PTTI->second; 1566 1567 assert((VT.isInteger() || VT.isFloatingPoint()) && 1568 "Cannot autopromote this type, add it with AddPromotedToType."); 1569 1570 MVT NVT = VT; 1571 do { 1572 NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1); 1573 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid && 1574 "Didn't find type to promote to!"); 1575 } while (!isTypeLegal(NVT) || 1576 getOperationAction(Op, NVT) == Promote); 1577 return NVT; 1578 } 1579 1580 virtual EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, 1581 bool AllowUnknown = false) const { 1582 return getValueType(DL, Ty, AllowUnknown); 1583 } 1584 1585 /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM 1586 /// operations except for the pointer size. If AllowUnknown is true, this 1587 /// will return MVT::Other for types with no EVT counterpart (e.g. structs), 1588 /// otherwise it will assert. 1589 EVT getValueType(const DataLayout &DL, Type *Ty, 1590 bool AllowUnknown = false) const { 1591 // Lower scalar pointers to native pointer types. 1592 if (auto *PTy = dyn_cast<PointerType>(Ty)) 1593 return getPointerTy(DL, PTy->getAddressSpace()); 1594 1595 if (auto *VTy = dyn_cast<VectorType>(Ty)) { 1596 Type *EltTy = VTy->getElementType(); 1597 // Lower vectors of pointers to native pointer types. 1598 if (auto *PTy = dyn_cast<PointerType>(EltTy)) { 1599 EVT PointerTy(getPointerTy(DL, PTy->getAddressSpace())); 1600 EltTy = PointerTy.getTypeForEVT(Ty->getContext()); 1601 } 1602 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false), 1603 VTy->getElementCount()); 1604 } 1605 1606 return EVT::getEVT(Ty, AllowUnknown); 1607 } 1608 1609 EVT getMemValueType(const DataLayout &DL, Type *Ty, 1610 bool AllowUnknown = false) const { 1611 // Lower scalar pointers to native pointer types. 1612 if (auto *PTy = dyn_cast<PointerType>(Ty)) 1613 return getPointerMemTy(DL, PTy->getAddressSpace()); 1614 1615 if (auto *VTy = dyn_cast<VectorType>(Ty)) { 1616 Type *EltTy = VTy->getElementType(); 1617 if (auto *PTy = dyn_cast<PointerType>(EltTy)) { 1618 EVT PointerTy(getPointerMemTy(DL, PTy->getAddressSpace())); 1619 EltTy = PointerTy.getTypeForEVT(Ty->getContext()); 1620 } 1621 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false), 1622 VTy->getElementCount()); 1623 } 1624 1625 return getValueType(DL, Ty, AllowUnknown); 1626 } 1627 1628 1629 /// Return the MVT corresponding to this LLVM type. See getValueType. 1630 MVT getSimpleValueType(const DataLayout &DL, Type *Ty, 1631 bool AllowUnknown = false) const { 1632 return getValueType(DL, Ty, AllowUnknown).getSimpleVT(); 1633 } 1634 1635 /// Return the desired alignment for ByVal or InAlloca aggregate function 1636 /// arguments in the caller parameter area. This is the actual alignment, not 1637 /// its logarithm. 1638 virtual uint64_t getByValTypeAlignment(Type *Ty, const DataLayout &DL) const; 1639 1640 /// Return the type of registers that this ValueType will eventually require. getRegisterType(MVT VT)1641 MVT getRegisterType(MVT VT) const { 1642 assert((unsigned)VT.SimpleTy < std::size(RegisterTypeForVT)); 1643 return RegisterTypeForVT[VT.SimpleTy]; 1644 } 1645 1646 /// Return the type of registers that this ValueType will eventually require. getRegisterType(LLVMContext & Context,EVT VT)1647 MVT getRegisterType(LLVMContext &Context, EVT VT) const { 1648 if (VT.isSimple()) 1649 return getRegisterType(VT.getSimpleVT()); 1650 if (VT.isVector()) { 1651 EVT VT1; 1652 MVT RegisterVT; 1653 unsigned NumIntermediates; 1654 (void)getVectorTypeBreakdown(Context, VT, VT1, 1655 NumIntermediates, RegisterVT); 1656 return RegisterVT; 1657 } 1658 if (VT.isInteger()) { 1659 return getRegisterType(Context, getTypeToTransformTo(Context, VT)); 1660 } 1661 llvm_unreachable("Unsupported extended type!"); 1662 } 1663 1664 /// Return the number of registers that this ValueType will eventually 1665 /// require. 1666 /// 1667 /// This is one for any types promoted to live in larger registers, but may be 1668 /// more than one for types (like i64) that are split into pieces. For types 1669 /// like i140, which are first promoted then expanded, it is the number of 1670 /// registers needed to hold all the bits of the original type. For an i140 1671 /// on a 32 bit machine this means 5 registers. 1672 /// 1673 /// RegisterVT may be passed as a way to override the default settings, for 1674 /// instance with i128 inline assembly operands on SystemZ. 1675 virtual unsigned 1676 getNumRegisters(LLVMContext &Context, EVT VT, 1677 std::optional<MVT> RegisterVT = std::nullopt) const { 1678 if (VT.isSimple()) { 1679 assert((unsigned)VT.getSimpleVT().SimpleTy < 1680 std::size(NumRegistersForVT)); 1681 return NumRegistersForVT[VT.getSimpleVT().SimpleTy]; 1682 } 1683 if (VT.isVector()) { 1684 EVT VT1; 1685 MVT VT2; 1686 unsigned NumIntermediates; 1687 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2); 1688 } 1689 if (VT.isInteger()) { 1690 unsigned BitWidth = VT.getSizeInBits(); 1691 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits(); 1692 return (BitWidth + RegWidth - 1) / RegWidth; 1693 } 1694 llvm_unreachable("Unsupported extended type!"); 1695 } 1696 1697 /// Certain combinations of ABIs, Targets and features require that types 1698 /// are legal for some operations and not for other operations. 1699 /// For MIPS all vector types must be passed through the integer register set. getRegisterTypeForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT)1700 virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, 1701 CallingConv::ID CC, EVT VT) const { 1702 return getRegisterType(Context, VT); 1703 } 1704 1705 /// Certain targets require unusual breakdowns of certain types. For MIPS, 1706 /// this occurs when a vector type is used, as vector are passed through the 1707 /// integer register set. getNumRegistersForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT)1708 virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, 1709 CallingConv::ID CC, 1710 EVT VT) const { 1711 return getNumRegisters(Context, VT); 1712 } 1713 1714 /// Certain targets have context sensitive alignment requirements, where one 1715 /// type has the alignment requirement of another type. getABIAlignmentForCallingConv(Type * ArgTy,const DataLayout & DL)1716 virtual Align getABIAlignmentForCallingConv(Type *ArgTy, 1717 const DataLayout &DL) const { 1718 return DL.getABITypeAlign(ArgTy); 1719 } 1720 1721 /// If true, then instruction selection should seek to shrink the FP constant 1722 /// of the specified type to a smaller type in order to save space and / or 1723 /// reduce runtime. ShouldShrinkFPConstant(EVT)1724 virtual bool ShouldShrinkFPConstant(EVT) const { return true; } 1725 1726 /// Return true if it is profitable to reduce a load to a smaller type. 1727 /// Example: (i16 (trunc (i32 (load x))) -> i16 load x shouldReduceLoadWidth(SDNode * Load,ISD::LoadExtType ExtTy,EVT NewVT)1728 virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, 1729 EVT NewVT) const { 1730 // By default, assume that it is cheaper to extract a subvector from a wide 1731 // vector load rather than creating multiple narrow vector loads. 1732 if (NewVT.isVector() && !Load->hasOneUse()) 1733 return false; 1734 1735 return true; 1736 } 1737 1738 /// Return true (the default) if it is profitable to remove a sext_inreg(x) 1739 /// where the sext is redundant, and use x directly. shouldRemoveRedundantExtend(SDValue Op)1740 virtual bool shouldRemoveRedundantExtend(SDValue Op) const { return true; } 1741 1742 /// When splitting a value of the specified type into parts, does the Lo 1743 /// or Hi part come first? This usually follows the endianness, except 1744 /// for ppcf128, where the Hi part always comes first. hasBigEndianPartOrdering(EVT VT,const DataLayout & DL)1745 bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const { 1746 return DL.isBigEndian() || VT == MVT::ppcf128; 1747 } 1748 1749 /// If true, the target has custom DAG combine transformations that it can 1750 /// perform for the specified node. hasTargetDAGCombine(ISD::NodeType NT)1751 bool hasTargetDAGCombine(ISD::NodeType NT) const { 1752 assert(unsigned(NT >> 3) < std::size(TargetDAGCombineArray)); 1753 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7)); 1754 } 1755 getGatherAllAliasesMaxDepth()1756 unsigned getGatherAllAliasesMaxDepth() const { 1757 return GatherAllAliasesMaxDepth; 1758 } 1759 1760 /// Returns the size of the platform's va_list object. getVaListSizeInBits(const DataLayout & DL)1761 virtual unsigned getVaListSizeInBits(const DataLayout &DL) const { 1762 return getPointerTy(DL).getSizeInBits(); 1763 } 1764 1765 /// Get maximum # of store operations permitted for llvm.memset 1766 /// 1767 /// This function returns the maximum number of store operations permitted 1768 /// to replace a call to llvm.memset. The value is set by the target at the 1769 /// performance threshold for such a replacement. If OptSize is true, 1770 /// return the limit for functions that have OptSize attribute. getMaxStoresPerMemset(bool OptSize)1771 unsigned getMaxStoresPerMemset(bool OptSize) const { 1772 return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset; 1773 } 1774 1775 /// Get maximum # of store operations permitted for llvm.memcpy 1776 /// 1777 /// This function returns the maximum number of store operations permitted 1778 /// to replace a call to llvm.memcpy. The value is set by the target at the 1779 /// performance threshold for such a replacement. If OptSize is true, 1780 /// return the limit for functions that have OptSize attribute. getMaxStoresPerMemcpy(bool OptSize)1781 unsigned getMaxStoresPerMemcpy(bool OptSize) const { 1782 return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy; 1783 } 1784 1785 /// \brief Get maximum # of store operations to be glued together 1786 /// 1787 /// This function returns the maximum number of store operations permitted 1788 /// to glue together during lowering of llvm.memcpy. The value is set by 1789 // the target at the performance threshold for such a replacement. getMaxGluedStoresPerMemcpy()1790 virtual unsigned getMaxGluedStoresPerMemcpy() const { 1791 return MaxGluedStoresPerMemcpy; 1792 } 1793 1794 /// Get maximum # of load operations permitted for memcmp 1795 /// 1796 /// This function returns the maximum number of load operations permitted 1797 /// to replace a call to memcmp. The value is set by the target at the 1798 /// performance threshold for such a replacement. If OptSize is true, 1799 /// return the limit for functions that have OptSize attribute. getMaxExpandSizeMemcmp(bool OptSize)1800 unsigned getMaxExpandSizeMemcmp(bool OptSize) const { 1801 return OptSize ? MaxLoadsPerMemcmpOptSize : MaxLoadsPerMemcmp; 1802 } 1803 1804 /// Get maximum # of store operations permitted for llvm.memmove 1805 /// 1806 /// This function returns the maximum number of store operations permitted 1807 /// to replace a call to llvm.memmove. The value is set by the target at the 1808 /// performance threshold for such a replacement. If OptSize is true, 1809 /// return the limit for functions that have OptSize attribute. getMaxStoresPerMemmove(bool OptSize)1810 unsigned getMaxStoresPerMemmove(bool OptSize) const { 1811 return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove; 1812 } 1813 1814 /// Determine if the target supports unaligned memory accesses. 1815 /// 1816 /// This function returns true if the target allows unaligned memory accesses 1817 /// of the specified type in the given address space. If true, it also returns 1818 /// a relative speed of the unaligned memory access in the last argument by 1819 /// reference. The higher the speed number the faster the operation comparing 1820 /// to a number returned by another such call. This is used, for example, in 1821 /// situations where an array copy/move/set is converted to a sequence of 1822 /// store operations. Its use helps to ensure that such replacements don't 1823 /// generate code that causes an alignment error (trap) on the target machine. 1824 virtual bool allowsMisalignedMemoryAccesses( 1825 EVT, unsigned AddrSpace = 0, Align Alignment = Align(1), 1826 MachineMemOperand::Flags Flags = MachineMemOperand::MONone, 1827 unsigned * /*Fast*/ = nullptr) const { 1828 return false; 1829 } 1830 1831 /// LLT handling variant. 1832 virtual bool allowsMisalignedMemoryAccesses( 1833 LLT, unsigned AddrSpace = 0, Align Alignment = Align(1), 1834 MachineMemOperand::Flags Flags = MachineMemOperand::MONone, 1835 unsigned * /*Fast*/ = nullptr) const { 1836 return false; 1837 } 1838 1839 /// This function returns true if the memory access is aligned or if the 1840 /// target allows this specific unaligned memory access. If the access is 1841 /// allowed, the optional final parameter returns a relative speed of the 1842 /// access (as defined by the target). 1843 bool allowsMemoryAccessForAlignment( 1844 LLVMContext &Context, const DataLayout &DL, EVT VT, 1845 unsigned AddrSpace = 0, Align Alignment = Align(1), 1846 MachineMemOperand::Flags Flags = MachineMemOperand::MONone, 1847 unsigned *Fast = nullptr) const; 1848 1849 /// Return true if the memory access of this type is aligned or if the target 1850 /// allows this specific unaligned access for the given MachineMemOperand. 1851 /// If the access is allowed, the optional final parameter returns a relative 1852 /// speed of the access (as defined by the target). 1853 bool allowsMemoryAccessForAlignment(LLVMContext &Context, 1854 const DataLayout &DL, EVT VT, 1855 const MachineMemOperand &MMO, 1856 unsigned *Fast = nullptr) const; 1857 1858 /// Return true if the target supports a memory access of this type for the 1859 /// given address space and alignment. If the access is allowed, the optional 1860 /// final parameter returns the relative speed of the access (as defined by 1861 /// the target). 1862 virtual bool 1863 allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, 1864 unsigned AddrSpace = 0, Align Alignment = Align(1), 1865 MachineMemOperand::Flags Flags = MachineMemOperand::MONone, 1866 unsigned *Fast = nullptr) const; 1867 1868 /// Return true if the target supports a memory access of this type for the 1869 /// given MachineMemOperand. If the access is allowed, the optional 1870 /// final parameter returns the relative access speed (as defined by the 1871 /// target). 1872 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, 1873 const MachineMemOperand &MMO, 1874 unsigned *Fast = nullptr) const; 1875 1876 /// LLT handling variant. 1877 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, LLT Ty, 1878 const MachineMemOperand &MMO, 1879 unsigned *Fast = nullptr) const; 1880 1881 /// Returns the target specific optimal type for load and store operations as 1882 /// a result of memset, memcpy, and memmove lowering. 1883 /// It returns EVT::Other if the type should be determined using generic 1884 /// target-independent logic. 1885 virtual EVT getOptimalMemOpType(const MemOp & Op,const AttributeList &)1886 getOptimalMemOpType(const MemOp &Op, 1887 const AttributeList & /*FuncAttributes*/) const { 1888 return MVT::Other; 1889 } 1890 1891 /// LLT returning variant. 1892 virtual LLT getOptimalMemOpLLT(const MemOp & Op,const AttributeList &)1893 getOptimalMemOpLLT(const MemOp &Op, 1894 const AttributeList & /*FuncAttributes*/) const { 1895 return LLT(); 1896 } 1897 1898 /// Returns true if it's safe to use load / store of the specified type to 1899 /// expand memcpy / memset inline. 1900 /// 1901 /// This is mostly true for all types except for some special cases. For 1902 /// example, on X86 targets without SSE2 f64 load / store are done with fldl / 1903 /// fstpl which also does type conversion. Note the specified type doesn't 1904 /// have to be legal as the hook is used before type legalization. isSafeMemOpType(MVT)1905 virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; } 1906 1907 /// Return lower limit for number of blocks in a jump table. 1908 virtual unsigned getMinimumJumpTableEntries() const; 1909 1910 /// Return lower limit of the density in a jump table. 1911 unsigned getMinimumJumpTableDensity(bool OptForSize) const; 1912 1913 /// Return upper limit for number of entries in a jump table. 1914 /// Zero if no limit. 1915 unsigned getMaximumJumpTableSize() const; 1916 1917 virtual bool isJumpTableRelative() const; 1918 1919 /// If a physical register, this specifies the register that 1920 /// llvm.savestack/llvm.restorestack should save and restore. getStackPointerRegisterToSaveRestore()1921 Register getStackPointerRegisterToSaveRestore() const { 1922 return StackPointerRegisterToSaveRestore; 1923 } 1924 1925 /// If a physical register, this returns the register that receives the 1926 /// exception address on entry to an EH pad. 1927 virtual Register getExceptionPointerRegister(const Constant * PersonalityFn)1928 getExceptionPointerRegister(const Constant *PersonalityFn) const { 1929 return Register(); 1930 } 1931 1932 /// If a physical register, this returns the register that receives the 1933 /// exception typeid on entry to a landing pad. 1934 virtual Register getExceptionSelectorRegister(const Constant * PersonalityFn)1935 getExceptionSelectorRegister(const Constant *PersonalityFn) const { 1936 return Register(); 1937 } 1938 needsFixedCatchObjects()1939 virtual bool needsFixedCatchObjects() const { 1940 report_fatal_error("Funclet EH is not implemented for this target"); 1941 } 1942 1943 /// Return the minimum stack alignment of an argument. getMinStackArgumentAlignment()1944 Align getMinStackArgumentAlignment() const { 1945 return MinStackArgumentAlignment; 1946 } 1947 1948 /// Return the minimum function alignment. getMinFunctionAlignment()1949 Align getMinFunctionAlignment() const { return MinFunctionAlignment; } 1950 1951 /// Return the preferred function alignment. getPrefFunctionAlignment()1952 Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; } 1953 1954 /// Return the preferred loop alignment. 1955 virtual Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const; 1956 1957 /// Return the maximum amount of bytes allowed to be emitted when padding for 1958 /// alignment 1959 virtual unsigned 1960 getMaxPermittedBytesForAlignment(MachineBasicBlock *MBB) const; 1961 1962 /// Should loops be aligned even when the function is marked OptSize (but not 1963 /// MinSize). alignLoopsWithOptSize()1964 virtual bool alignLoopsWithOptSize() const { return false; } 1965 1966 /// If the target has a standard location for the stack protector guard, 1967 /// returns the address of that location. Otherwise, returns nullptr. 1968 /// DEPRECATED: please override useLoadStackGuardNode and customize 1969 /// LOAD_STACK_GUARD, or customize \@llvm.stackguard(). 1970 virtual Value *getIRStackGuard(IRBuilderBase &IRB) const; 1971 1972 /// Inserts necessary declarations for SSP (stack protection) purpose. 1973 /// Should be used only when getIRStackGuard returns nullptr. 1974 virtual void insertSSPDeclarations(Module &M) const; 1975 1976 /// Return the variable that's previously inserted by insertSSPDeclarations, 1977 /// if any, otherwise return nullptr. Should be used only when 1978 /// getIRStackGuard returns nullptr. 1979 virtual Value *getSDagStackGuard(const Module &M) const; 1980 1981 /// If this function returns true, stack protection checks should XOR the 1982 /// frame pointer (or whichever pointer is used to address locals) into the 1983 /// stack guard value before checking it. getIRStackGuard must return nullptr 1984 /// if this returns true. useStackGuardXorFP()1985 virtual bool useStackGuardXorFP() const { return false; } 1986 1987 /// If the target has a standard stack protection check function that 1988 /// performs validation and error handling, returns the function. Otherwise, 1989 /// returns nullptr. Must be previously inserted by insertSSPDeclarations. 1990 /// Should be used only when getIRStackGuard returns nullptr. 1991 virtual Function *getSSPStackGuardCheck(const Module &M) const; 1992 1993 protected: 1994 Value *getDefaultSafeStackPointerLocation(IRBuilderBase &IRB, 1995 bool UseTLS) const; 1996 1997 public: 1998 /// Returns the target-specific address of the unsafe stack pointer. 1999 virtual Value *getSafeStackPointerLocation(IRBuilderBase &IRB) const; 2000 2001 /// Returns the name of the symbol used to emit stack probes or the empty 2002 /// string if not applicable. hasStackProbeSymbol(const MachineFunction & MF)2003 virtual bool hasStackProbeSymbol(const MachineFunction &MF) const { return false; } 2004 hasInlineStackProbe(const MachineFunction & MF)2005 virtual bool hasInlineStackProbe(const MachineFunction &MF) const { return false; } 2006 getStackProbeSymbolName(const MachineFunction & MF)2007 virtual StringRef getStackProbeSymbolName(const MachineFunction &MF) const { 2008 return ""; 2009 } 2010 2011 /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we 2012 /// are happy to sink it into basic blocks. A cast may be free, but not 2013 /// necessarily a no-op. e.g. a free truncate from a 64-bit to 32-bit pointer. 2014 virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const; 2015 2016 /// Return true if the pointer arguments to CI should be aligned by aligning 2017 /// the object whose address is being passed. If so then MinSize is set to the 2018 /// minimum size the object must be to be aligned and PrefAlign is set to the 2019 /// preferred alignment. shouldAlignPointerArgs(CallInst *,unsigned &,Align &)2020 virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/, 2021 Align & /*PrefAlign*/) const { 2022 return false; 2023 } 2024 2025 //===--------------------------------------------------------------------===// 2026 /// \name Helpers for TargetTransformInfo implementations 2027 /// @{ 2028 2029 /// Get the ISD node that corresponds to the Instruction class opcode. 2030 int InstructionOpcodeToISD(unsigned Opcode) const; 2031 2032 /// @} 2033 2034 //===--------------------------------------------------------------------===// 2035 /// \name Helpers for atomic expansion. 2036 /// @{ 2037 2038 /// Returns the maximum atomic operation size (in bits) supported by 2039 /// the backend. Atomic operations greater than this size (as well 2040 /// as ones that are not naturally aligned), will be expanded by 2041 /// AtomicExpandPass into an __atomic_* library call. getMaxAtomicSizeInBitsSupported()2042 unsigned getMaxAtomicSizeInBitsSupported() const { 2043 return MaxAtomicSizeInBitsSupported; 2044 } 2045 2046 /// Returns the size in bits of the maximum div/rem the backend supports. 2047 /// Larger operations will be expanded by ExpandLargeDivRem. getMaxDivRemBitWidthSupported()2048 unsigned getMaxDivRemBitWidthSupported() const { 2049 return MaxDivRemBitWidthSupported; 2050 } 2051 2052 /// Returns the size in bits of the maximum larget fp convert the backend 2053 /// supports. Larger operations will be expanded by ExpandLargeFPConvert. getMaxLargeFPConvertBitWidthSupported()2054 unsigned getMaxLargeFPConvertBitWidthSupported() const { 2055 return MaxLargeFPConvertBitWidthSupported; 2056 } 2057 2058 /// Returns the size of the smallest cmpxchg or ll/sc instruction 2059 /// the backend supports. Any smaller operations are widened in 2060 /// AtomicExpandPass. 2061 /// 2062 /// Note that *unlike* operations above the maximum size, atomic ops 2063 /// are still natively supported below the minimum; they just 2064 /// require a more complex expansion. getMinCmpXchgSizeInBits()2065 unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; } 2066 2067 /// Whether the target supports unaligned atomic operations. supportsUnalignedAtomics()2068 bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; } 2069 2070 /// Whether AtomicExpandPass should automatically insert fences and reduce 2071 /// ordering for this atomic. This should be true for most architectures with 2072 /// weak memory ordering. Defaults to false. shouldInsertFencesForAtomic(const Instruction * I)2073 virtual bool shouldInsertFencesForAtomic(const Instruction *I) const { 2074 return false; 2075 } 2076 2077 /// Whether AtomicExpandPass should automatically insert a trailing fence 2078 /// without reducing the ordering for this atomic. Defaults to false. 2079 virtual bool shouldInsertTrailingFenceForAtomicStore(const Instruction * I)2080 shouldInsertTrailingFenceForAtomicStore(const Instruction *I) const { 2081 return false; 2082 } 2083 2084 /// Perform a load-linked operation on Addr, returning a "Value *" with the 2085 /// corresponding pointee type. This may entail some non-trivial operations to 2086 /// truncate or reconstruct types that will be illegal in the backend. See 2087 /// ARMISelLowering for an example implementation. emitLoadLinked(IRBuilderBase & Builder,Type * ValueTy,Value * Addr,AtomicOrdering Ord)2088 virtual Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, 2089 Value *Addr, AtomicOrdering Ord) const { 2090 llvm_unreachable("Load linked unimplemented on this target"); 2091 } 2092 2093 /// Perform a store-conditional operation to Addr. Return the status of the 2094 /// store. This should be 0 if the store succeeded, non-zero otherwise. emitStoreConditional(IRBuilderBase & Builder,Value * Val,Value * Addr,AtomicOrdering Ord)2095 virtual Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, 2096 Value *Addr, AtomicOrdering Ord) const { 2097 llvm_unreachable("Store conditional unimplemented on this target"); 2098 } 2099 2100 /// Perform a masked atomicrmw using a target-specific intrinsic. This 2101 /// represents the core LL/SC loop which will be lowered at a late stage by 2102 /// the backend. The target-specific intrinsic returns the loaded value and 2103 /// is not responsible for masking and shifting the result. emitMaskedAtomicRMWIntrinsic(IRBuilderBase & Builder,AtomicRMWInst * AI,Value * AlignedAddr,Value * Incr,Value * Mask,Value * ShiftAmt,AtomicOrdering Ord)2104 virtual Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, 2105 AtomicRMWInst *AI, 2106 Value *AlignedAddr, Value *Incr, 2107 Value *Mask, Value *ShiftAmt, 2108 AtomicOrdering Ord) const { 2109 llvm_unreachable("Masked atomicrmw expansion unimplemented on this target"); 2110 } 2111 2112 /// Perform a atomicrmw expansion using a target-specific way. This is 2113 /// expected to be called when masked atomicrmw and bit test atomicrmw don't 2114 /// work, and the target supports another way to lower atomicrmw. emitExpandAtomicRMW(AtomicRMWInst * AI)2115 virtual void emitExpandAtomicRMW(AtomicRMWInst *AI) const { 2116 llvm_unreachable( 2117 "Generic atomicrmw expansion unimplemented on this target"); 2118 } 2119 2120 /// Perform a bit test atomicrmw using a target-specific intrinsic. This 2121 /// represents the combined bit test intrinsic which will be lowered at a late 2122 /// stage by the backend. emitBitTestAtomicRMWIntrinsic(AtomicRMWInst * AI)2123 virtual void emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const { 2124 llvm_unreachable( 2125 "Bit test atomicrmw expansion unimplemented on this target"); 2126 } 2127 2128 /// Perform a atomicrmw which the result is only used by comparison, using a 2129 /// target-specific intrinsic. This represents the combined atomic and compare 2130 /// intrinsic which will be lowered at a late stage by the backend. emitCmpArithAtomicRMWIntrinsic(AtomicRMWInst * AI)2131 virtual void emitCmpArithAtomicRMWIntrinsic(AtomicRMWInst *AI) const { 2132 llvm_unreachable( 2133 "Compare arith atomicrmw expansion unimplemented on this target"); 2134 } 2135 2136 /// Perform a masked cmpxchg using a target-specific intrinsic. This 2137 /// represents the core LL/SC loop which will be lowered at a late stage by 2138 /// the backend. The target-specific intrinsic returns the loaded value and 2139 /// is not responsible for masking and shifting the result. emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase & Builder,AtomicCmpXchgInst * CI,Value * AlignedAddr,Value * CmpVal,Value * NewVal,Value * Mask,AtomicOrdering Ord)2140 virtual Value *emitMaskedAtomicCmpXchgIntrinsic( 2141 IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, 2142 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const { 2143 llvm_unreachable("Masked cmpxchg expansion unimplemented on this target"); 2144 } 2145 2146 //===--------------------------------------------------------------------===// 2147 /// \name KCFI check lowering. 2148 /// @{ 2149 EmitKCFICheck(MachineBasicBlock & MBB,MachineBasicBlock::instr_iterator & MBBI,const TargetInstrInfo * TII)2150 virtual MachineInstr *EmitKCFICheck(MachineBasicBlock &MBB, 2151 MachineBasicBlock::instr_iterator &MBBI, 2152 const TargetInstrInfo *TII) const { 2153 llvm_unreachable("KCFI is not supported on this target"); 2154 } 2155 2156 /// @} 2157 2158 /// Inserts in the IR a target-specific intrinsic specifying a fence. 2159 /// It is called by AtomicExpandPass before expanding an 2160 /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad 2161 /// if shouldInsertFencesForAtomic returns true. 2162 /// 2163 /// Inst is the original atomic instruction, prior to other expansions that 2164 /// may be performed. 2165 /// 2166 /// This function should either return a nullptr, or a pointer to an IR-level 2167 /// Instruction*. Even complex fence sequences can be represented by a 2168 /// single Instruction* through an intrinsic to be lowered later. 2169 /// Backends should override this method to produce target-specific intrinsic 2170 /// for their fences. 2171 /// FIXME: Please note that the default implementation here in terms of 2172 /// IR-level fences exists for historical/compatibility reasons and is 2173 /// *unsound* ! Fences cannot, in general, be used to restore sequential 2174 /// consistency. For example, consider the following example: 2175 /// atomic<int> x = y = 0; 2176 /// int r1, r2, r3, r4; 2177 /// Thread 0: 2178 /// x.store(1); 2179 /// Thread 1: 2180 /// y.store(1); 2181 /// Thread 2: 2182 /// r1 = x.load(); 2183 /// r2 = y.load(); 2184 /// Thread 3: 2185 /// r3 = y.load(); 2186 /// r4 = x.load(); 2187 /// r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all 2188 /// seq_cst. But if they are lowered to monotonic accesses, no amount of 2189 /// IR-level fences can prevent it. 2190 /// @{ 2191 virtual Instruction *emitLeadingFence(IRBuilderBase &Builder, 2192 Instruction *Inst, 2193 AtomicOrdering Ord) const; 2194 2195 virtual Instruction *emitTrailingFence(IRBuilderBase &Builder, 2196 Instruction *Inst, 2197 AtomicOrdering Ord) const; 2198 /// @} 2199 2200 // Emits code that executes when the comparison result in the ll/sc 2201 // expansion of a cmpxchg instruction is such that the store-conditional will 2202 // not execute. This makes it possible to balance out the load-linked with 2203 // a dedicated instruction, if desired. 2204 // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would 2205 // be unnecessarily held, except if clrex, inserted by this hook, is executed. emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase & Builder)2206 virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const {} 2207 2208 /// Returns true if arguments should be sign-extended in lib calls. shouldSignExtendTypeInLibCall(EVT Type,bool IsSigned)2209 virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const { 2210 return IsSigned; 2211 } 2212 2213 /// Returns true if arguments should be extended in lib calls. shouldExtendTypeInLibCall(EVT Type)2214 virtual bool shouldExtendTypeInLibCall(EVT Type) const { 2215 return true; 2216 } 2217 2218 /// Returns how the given (atomic) load should be expanded by the 2219 /// IR-level AtomicExpand pass. shouldExpandAtomicLoadInIR(LoadInst * LI)2220 virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const { 2221 return AtomicExpansionKind::None; 2222 } 2223 2224 /// Returns how the given (atomic) load should be cast by the IR-level 2225 /// AtomicExpand pass. shouldCastAtomicLoadInIR(LoadInst * LI)2226 virtual AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const { 2227 if (LI->getType()->isFloatingPointTy()) 2228 return AtomicExpansionKind::CastToInteger; 2229 return AtomicExpansionKind::None; 2230 } 2231 2232 /// Returns how the given (atomic) store should be expanded by the IR-level 2233 /// AtomicExpand pass into. For instance AtomicExpansionKind::Expand will try 2234 /// to use an atomicrmw xchg. shouldExpandAtomicStoreInIR(StoreInst * SI)2235 virtual AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const { 2236 return AtomicExpansionKind::None; 2237 } 2238 2239 /// Returns how the given (atomic) store should be cast by the IR-level 2240 /// AtomicExpand pass into. For instance AtomicExpansionKind::CastToInteger 2241 /// will try to cast the operands to integer values. shouldCastAtomicStoreInIR(StoreInst * SI)2242 virtual AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const { 2243 if (SI->getValueOperand()->getType()->isFloatingPointTy()) 2244 return AtomicExpansionKind::CastToInteger; 2245 return AtomicExpansionKind::None; 2246 } 2247 2248 /// Returns how the given atomic cmpxchg should be expanded by the IR-level 2249 /// AtomicExpand pass. 2250 virtual AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst * AI)2251 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const { 2252 return AtomicExpansionKind::None; 2253 } 2254 2255 /// Returns how the IR-level AtomicExpand pass should expand the given 2256 /// AtomicRMW, if at all. Default is to never expand. shouldExpandAtomicRMWInIR(AtomicRMWInst * RMW)2257 virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { 2258 return RMW->isFloatingPointOperation() ? 2259 AtomicExpansionKind::CmpXChg : AtomicExpansionKind::None; 2260 } 2261 2262 /// Returns how the given atomic atomicrmw should be cast by the IR-level 2263 /// AtomicExpand pass. 2264 virtual AtomicExpansionKind shouldCastAtomicRMWIInIR(AtomicRMWInst * RMWI)2265 shouldCastAtomicRMWIInIR(AtomicRMWInst *RMWI) const { 2266 if (RMWI->getOperation() == AtomicRMWInst::Xchg && 2267 (RMWI->getValOperand()->getType()->isFloatingPointTy() || 2268 RMWI->getValOperand()->getType()->isPointerTy())) 2269 return AtomicExpansionKind::CastToInteger; 2270 2271 return AtomicExpansionKind::None; 2272 } 2273 2274 /// On some platforms, an AtomicRMW that never actually modifies the value 2275 /// (such as fetch_add of 0) can be turned into a fence followed by an 2276 /// atomic load. This may sound useless, but it makes it possible for the 2277 /// processor to keep the cacheline shared, dramatically improving 2278 /// performance. And such idempotent RMWs are useful for implementing some 2279 /// kinds of locks, see for example (justification + benchmarks): 2280 /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf 2281 /// This method tries doing that transformation, returning the atomic load if 2282 /// it succeeds, and nullptr otherwise. 2283 /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo 2284 /// another round of expansion. 2285 virtual LoadInst * lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst * RMWI)2286 lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const { 2287 return nullptr; 2288 } 2289 2290 /// Returns how the platform's atomic operations are extended (ZERO_EXTEND, 2291 /// SIGN_EXTEND, or ANY_EXTEND). getExtendForAtomicOps()2292 virtual ISD::NodeType getExtendForAtomicOps() const { 2293 return ISD::ZERO_EXTEND; 2294 } 2295 2296 /// Returns how the platform's atomic compare and swap expects its comparison 2297 /// value to be extended (ZERO_EXTEND, SIGN_EXTEND, or ANY_EXTEND). This is 2298 /// separate from getExtendForAtomicOps, which is concerned with the 2299 /// sign-extension of the instruction's output, whereas here we are concerned 2300 /// with the sign-extension of the input. For targets with compare-and-swap 2301 /// instructions (or sub-word comparisons in their LL/SC loop expansions), 2302 /// the input can be ANY_EXTEND, but the output will still have a specific 2303 /// extension. getExtendForAtomicCmpSwapArg()2304 virtual ISD::NodeType getExtendForAtomicCmpSwapArg() const { 2305 return ISD::ANY_EXTEND; 2306 } 2307 2308 /// @} 2309 2310 /// Returns true if we should normalize 2311 /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and 2312 /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely 2313 /// that it saves us from materializing N0 and N1 in an integer register. 2314 /// Targets that are able to perform and/or on flags should return false here. shouldNormalizeToSelectSequence(LLVMContext & Context,EVT VT)2315 virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context, 2316 EVT VT) const { 2317 // If a target has multiple condition registers, then it likely has logical 2318 // operations on those registers. 2319 if (hasMultipleConditionRegisters()) 2320 return false; 2321 // Only do the transform if the value won't be split into multiple 2322 // registers. 2323 LegalizeTypeAction Action = getTypeAction(Context, VT); 2324 return Action != TypeExpandInteger && Action != TypeExpandFloat && 2325 Action != TypeSplitVector; 2326 } 2327 isProfitableToCombineMinNumMaxNum(EVT VT)2328 virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const { return true; } 2329 2330 /// Return true if a select of constants (select Cond, C1, C2) should be 2331 /// transformed into simple math ops with the condition value. For example: 2332 /// select Cond, C1, C1-1 --> add (zext Cond), C1-1 convertSelectOfConstantsToMath(EVT VT)2333 virtual bool convertSelectOfConstantsToMath(EVT VT) const { 2334 return false; 2335 } 2336 2337 /// Return true if it is profitable to transform an integer 2338 /// multiplication-by-constant into simpler operations like shifts and adds. 2339 /// This may be true if the target does not directly support the 2340 /// multiplication operation for the specified type or the sequence of simpler 2341 /// ops is faster than the multiply. decomposeMulByConstant(LLVMContext & Context,EVT VT,SDValue C)2342 virtual bool decomposeMulByConstant(LLVMContext &Context, 2343 EVT VT, SDValue C) const { 2344 return false; 2345 } 2346 2347 /// Return true if it may be profitable to transform 2348 /// (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2). 2349 /// This may not be true if c1 and c2 can be represented as immediates but 2350 /// c1*c2 cannot, for example. 2351 /// The target should check if c1, c2 and c1*c2 can be represented as 2352 /// immediates, or have to be materialized into registers. If it is not sure 2353 /// about some cases, a default true can be returned to let the DAGCombiner 2354 /// decide. 2355 /// AddNode is (add x, c1), and ConstNode is c2. isMulAddWithConstProfitable(SDValue AddNode,SDValue ConstNode)2356 virtual bool isMulAddWithConstProfitable(SDValue AddNode, 2357 SDValue ConstNode) const { 2358 return true; 2359 } 2360 2361 /// Return true if it is more correct/profitable to use strict FP_TO_INT 2362 /// conversion operations - canonicalizing the FP source value instead of 2363 /// converting all cases and then selecting based on value. 2364 /// This may be true if the target throws exceptions for out of bounds 2365 /// conversions or has fast FP CMOV. shouldUseStrictFP_TO_INT(EVT FpVT,EVT IntVT,bool IsSigned)2366 virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT, 2367 bool IsSigned) const { 2368 return false; 2369 } 2370 2371 /// Return true if it is beneficial to expand an @llvm.powi.* intrinsic. 2372 /// If not optimizing for size, expanding @llvm.powi.* intrinsics is always 2373 /// considered beneficial. 2374 /// If optimizing for size, expansion is only considered beneficial for upto 2375 /// 5 multiplies and a divide (if the exponent is negative). isBeneficialToExpandPowI(int64_t Exponent,bool OptForSize)2376 bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const { 2377 if (Exponent < 0) 2378 Exponent = -Exponent; 2379 uint64_t E = static_cast<uint64_t>(Exponent); 2380 return !OptForSize || (llvm::popcount(E) + Log2_64(E) < 7); 2381 } 2382 2383 //===--------------------------------------------------------------------===// 2384 // TargetLowering Configuration Methods - These methods should be invoked by 2385 // the derived class constructor to configure this object for the target. 2386 // 2387 protected: 2388 /// Specify how the target extends the result of integer and floating point 2389 /// boolean values from i1 to a wider type. See getBooleanContents. setBooleanContents(BooleanContent Ty)2390 void setBooleanContents(BooleanContent Ty) { 2391 BooleanContents = Ty; 2392 BooleanFloatContents = Ty; 2393 } 2394 2395 /// Specify how the target extends the result of integer and floating point 2396 /// boolean values from i1 to a wider type. See getBooleanContents. setBooleanContents(BooleanContent IntTy,BooleanContent FloatTy)2397 void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) { 2398 BooleanContents = IntTy; 2399 BooleanFloatContents = FloatTy; 2400 } 2401 2402 /// Specify how the target extends the result of a vector boolean value from a 2403 /// vector of i1 to a wider type. See getBooleanContents. setBooleanVectorContents(BooleanContent Ty)2404 void setBooleanVectorContents(BooleanContent Ty) { 2405 BooleanVectorContents = Ty; 2406 } 2407 2408 /// Specify the target scheduling preference. setSchedulingPreference(Sched::Preference Pref)2409 void setSchedulingPreference(Sched::Preference Pref) { 2410 SchedPreferenceInfo = Pref; 2411 } 2412 2413 /// Indicate the minimum number of blocks to generate jump tables. 2414 void setMinimumJumpTableEntries(unsigned Val); 2415 2416 /// Indicate the maximum number of entries in jump tables. 2417 /// Set to zero to generate unlimited jump tables. 2418 void setMaximumJumpTableSize(unsigned); 2419 2420 /// If set to a physical register, this specifies the register that 2421 /// llvm.savestack/llvm.restorestack should save and restore. setStackPointerRegisterToSaveRestore(Register R)2422 void setStackPointerRegisterToSaveRestore(Register R) { 2423 StackPointerRegisterToSaveRestore = R; 2424 } 2425 2426 /// Tells the code generator that the target has multiple (allocatable) 2427 /// condition registers that can be used to store the results of comparisons 2428 /// for use by selects and conditional branches. With multiple condition 2429 /// registers, the code generator will not aggressively sink comparisons into 2430 /// the blocks of their users. 2431 void setHasMultipleConditionRegisters(bool hasManyRegs = true) { 2432 HasMultipleConditionRegisters = hasManyRegs; 2433 } 2434 2435 /// Tells the code generator that the target has BitExtract instructions. 2436 /// The code generator will aggressively sink "shift"s into the blocks of 2437 /// their users if the users will generate "and" instructions which can be 2438 /// combined with "shift" to BitExtract instructions. 2439 void setHasExtractBitsInsn(bool hasExtractInsn = true) { 2440 HasExtractBitsInsn = hasExtractInsn; 2441 } 2442 2443 /// Tells the code generator not to expand logic operations on comparison 2444 /// predicates into separate sequences that increase the amount of flow 2445 /// control. 2446 void setJumpIsExpensive(bool isExpensive = true); 2447 2448 /// Tells the code generator which bitwidths to bypass. addBypassSlowDiv(unsigned int SlowBitWidth,unsigned int FastBitWidth)2449 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) { 2450 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth; 2451 } 2452 2453 /// Add the specified register class as an available regclass for the 2454 /// specified value type. This indicates the selector can handle values of 2455 /// that class natively. addRegisterClass(MVT VT,const TargetRegisterClass * RC)2456 void addRegisterClass(MVT VT, const TargetRegisterClass *RC) { 2457 assert((unsigned)VT.SimpleTy < std::size(RegClassForVT)); 2458 RegClassForVT[VT.SimpleTy] = RC; 2459 } 2460 2461 /// Return the largest legal super-reg register class of the register class 2462 /// for the specified type and its associated "cost". 2463 virtual std::pair<const TargetRegisterClass *, uint8_t> 2464 findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const; 2465 2466 /// Once all of the register classes are added, this allows us to compute 2467 /// derived properties we expose. 2468 void computeRegisterProperties(const TargetRegisterInfo *TRI); 2469 2470 /// Indicate that the specified operation does not work with the specified 2471 /// type and indicate what to do about it. Note that VT may refer to either 2472 /// the type of a result or that of an operand of Op. setOperationAction(unsigned Op,MVT VT,LegalizeAction Action)2473 void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action) { 2474 assert(Op < std::size(OpActions[0]) && "Table isn't big enough!"); 2475 OpActions[(unsigned)VT.SimpleTy][Op] = Action; 2476 } setOperationAction(ArrayRef<unsigned> Ops,MVT VT,LegalizeAction Action)2477 void setOperationAction(ArrayRef<unsigned> Ops, MVT VT, 2478 LegalizeAction Action) { 2479 for (auto Op : Ops) 2480 setOperationAction(Op, VT, Action); 2481 } setOperationAction(ArrayRef<unsigned> Ops,ArrayRef<MVT> VTs,LegalizeAction Action)2482 void setOperationAction(ArrayRef<unsigned> Ops, ArrayRef<MVT> VTs, 2483 LegalizeAction Action) { 2484 for (auto VT : VTs) 2485 setOperationAction(Ops, VT, Action); 2486 } 2487 2488 /// Indicate that the specified load with extension does not work with the 2489 /// specified type and indicate what to do about it. setLoadExtAction(unsigned ExtType,MVT ValVT,MVT MemVT,LegalizeAction Action)2490 void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, 2491 LegalizeAction Action) { 2492 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() && 2493 MemVT.isValid() && "Table isn't big enough!"); 2494 assert((unsigned)Action < 0x10 && "too many bits for bitfield array"); 2495 unsigned Shift = 4 * ExtType; 2496 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift); 2497 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift; 2498 } setLoadExtAction(ArrayRef<unsigned> ExtTypes,MVT ValVT,MVT MemVT,LegalizeAction Action)2499 void setLoadExtAction(ArrayRef<unsigned> ExtTypes, MVT ValVT, MVT MemVT, 2500 LegalizeAction Action) { 2501 for (auto ExtType : ExtTypes) 2502 setLoadExtAction(ExtType, ValVT, MemVT, Action); 2503 } setLoadExtAction(ArrayRef<unsigned> ExtTypes,MVT ValVT,ArrayRef<MVT> MemVTs,LegalizeAction Action)2504 void setLoadExtAction(ArrayRef<unsigned> ExtTypes, MVT ValVT, 2505 ArrayRef<MVT> MemVTs, LegalizeAction Action) { 2506 for (auto MemVT : MemVTs) 2507 setLoadExtAction(ExtTypes, ValVT, MemVT, Action); 2508 } 2509 2510 /// Indicate that the specified truncating store does not work with the 2511 /// specified type and indicate what to do about it. setTruncStoreAction(MVT ValVT,MVT MemVT,LegalizeAction Action)2512 void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action) { 2513 assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!"); 2514 TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action; 2515 } 2516 2517 /// Indicate that the specified indexed load does or does not work with the 2518 /// specified type and indicate what to do abort it. 2519 /// 2520 /// NOTE: All indexed mode loads are initialized to Expand in 2521 /// TargetLowering.cpp setIndexedLoadAction(ArrayRef<unsigned> IdxModes,MVT VT,LegalizeAction Action)2522 void setIndexedLoadAction(ArrayRef<unsigned> IdxModes, MVT VT, 2523 LegalizeAction Action) { 2524 for (auto IdxMode : IdxModes) 2525 setIndexedModeAction(IdxMode, VT, IMAB_Load, Action); 2526 } 2527 setIndexedLoadAction(ArrayRef<unsigned> IdxModes,ArrayRef<MVT> VTs,LegalizeAction Action)2528 void setIndexedLoadAction(ArrayRef<unsigned> IdxModes, ArrayRef<MVT> VTs, 2529 LegalizeAction Action) { 2530 for (auto VT : VTs) 2531 setIndexedLoadAction(IdxModes, VT, Action); 2532 } 2533 2534 /// Indicate that the specified indexed store does or does not work with the 2535 /// specified type and indicate what to do about it. 2536 /// 2537 /// NOTE: All indexed mode stores are initialized to Expand in 2538 /// TargetLowering.cpp setIndexedStoreAction(ArrayRef<unsigned> IdxModes,MVT VT,LegalizeAction Action)2539 void setIndexedStoreAction(ArrayRef<unsigned> IdxModes, MVT VT, 2540 LegalizeAction Action) { 2541 for (auto IdxMode : IdxModes) 2542 setIndexedModeAction(IdxMode, VT, IMAB_Store, Action); 2543 } 2544 setIndexedStoreAction(ArrayRef<unsigned> IdxModes,ArrayRef<MVT> VTs,LegalizeAction Action)2545 void setIndexedStoreAction(ArrayRef<unsigned> IdxModes, ArrayRef<MVT> VTs, 2546 LegalizeAction Action) { 2547 for (auto VT : VTs) 2548 setIndexedStoreAction(IdxModes, VT, Action); 2549 } 2550 2551 /// Indicate that the specified indexed masked load does or does not work with 2552 /// the specified type and indicate what to do about it. 2553 /// 2554 /// NOTE: All indexed mode masked loads are initialized to Expand in 2555 /// TargetLowering.cpp setIndexedMaskedLoadAction(unsigned IdxMode,MVT VT,LegalizeAction Action)2556 void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT, 2557 LegalizeAction Action) { 2558 setIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad, Action); 2559 } 2560 2561 /// Indicate that the specified indexed masked store does or does not work 2562 /// with the specified type and indicate what to do about it. 2563 /// 2564 /// NOTE: All indexed mode masked stores are initialized to Expand in 2565 /// TargetLowering.cpp setIndexedMaskedStoreAction(unsigned IdxMode,MVT VT,LegalizeAction Action)2566 void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT, 2567 LegalizeAction Action) { 2568 setIndexedModeAction(IdxMode, VT, IMAB_MaskedStore, Action); 2569 } 2570 2571 /// Indicate that the specified condition code is or isn't supported on the 2572 /// target and indicate what to do about it. setCondCodeAction(ArrayRef<ISD::CondCode> CCs,MVT VT,LegalizeAction Action)2573 void setCondCodeAction(ArrayRef<ISD::CondCode> CCs, MVT VT, 2574 LegalizeAction Action) { 2575 for (auto CC : CCs) { 2576 assert(VT.isValid() && (unsigned)CC < std::size(CondCodeActions) && 2577 "Table isn't big enough!"); 2578 assert((unsigned)Action < 0x10 && "too many bits for bitfield array"); 2579 /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the 2580 /// 32-bit value and the upper 29 bits index into the second dimension of 2581 /// the array to select what 32-bit value to use. 2582 uint32_t Shift = 4 * (VT.SimpleTy & 0x7); 2583 CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift); 2584 CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift; 2585 } 2586 } setCondCodeAction(ArrayRef<ISD::CondCode> CCs,ArrayRef<MVT> VTs,LegalizeAction Action)2587 void setCondCodeAction(ArrayRef<ISD::CondCode> CCs, ArrayRef<MVT> VTs, 2588 LegalizeAction Action) { 2589 for (auto VT : VTs) 2590 setCondCodeAction(CCs, VT, Action); 2591 } 2592 2593 /// If Opc/OrigVT is specified as being promoted, the promotion code defaults 2594 /// to trying a larger integer/fp until it can find one that works. If that 2595 /// default is insufficient, this method can be used by the target to override 2596 /// the default. AddPromotedToType(unsigned Opc,MVT OrigVT,MVT DestVT)2597 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) { 2598 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy; 2599 } 2600 2601 /// Convenience method to set an operation to Promote and specify the type 2602 /// in a single call. setOperationPromotedToType(unsigned Opc,MVT OrigVT,MVT DestVT)2603 void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) { 2604 setOperationAction(Opc, OrigVT, Promote); 2605 AddPromotedToType(Opc, OrigVT, DestVT); 2606 } setOperationPromotedToType(ArrayRef<unsigned> Ops,MVT OrigVT,MVT DestVT)2607 void setOperationPromotedToType(ArrayRef<unsigned> Ops, MVT OrigVT, 2608 MVT DestVT) { 2609 for (auto Op : Ops) { 2610 setOperationAction(Op, OrigVT, Promote); 2611 AddPromotedToType(Op, OrigVT, DestVT); 2612 } 2613 } 2614 2615 /// Targets should invoke this method for each target independent node that 2616 /// they want to provide a custom DAG combiner for by implementing the 2617 /// PerformDAGCombine virtual method. setTargetDAGCombine(ArrayRef<ISD::NodeType> NTs)2618 void setTargetDAGCombine(ArrayRef<ISD::NodeType> NTs) { 2619 for (auto NT : NTs) { 2620 assert(unsigned(NT >> 3) < std::size(TargetDAGCombineArray)); 2621 TargetDAGCombineArray[NT >> 3] |= 1 << (NT & 7); 2622 } 2623 } 2624 2625 /// Set the target's minimum function alignment. setMinFunctionAlignment(Align Alignment)2626 void setMinFunctionAlignment(Align Alignment) { 2627 MinFunctionAlignment = Alignment; 2628 } 2629 2630 /// Set the target's preferred function alignment. This should be set if 2631 /// there is a performance benefit to higher-than-minimum alignment setPrefFunctionAlignment(Align Alignment)2632 void setPrefFunctionAlignment(Align Alignment) { 2633 PrefFunctionAlignment = Alignment; 2634 } 2635 2636 /// Set the target's preferred loop alignment. Default alignment is one, it 2637 /// means the target does not care about loop alignment. The target may also 2638 /// override getPrefLoopAlignment to provide per-loop values. setPrefLoopAlignment(Align Alignment)2639 void setPrefLoopAlignment(Align Alignment) { PrefLoopAlignment = Alignment; } setMaxBytesForAlignment(unsigned MaxBytes)2640 void setMaxBytesForAlignment(unsigned MaxBytes) { 2641 MaxBytesForAlignment = MaxBytes; 2642 } 2643 2644 /// Set the minimum stack alignment of an argument. setMinStackArgumentAlignment(Align Alignment)2645 void setMinStackArgumentAlignment(Align Alignment) { 2646 MinStackArgumentAlignment = Alignment; 2647 } 2648 2649 /// Set the maximum atomic operation size supported by the 2650 /// backend. Atomic operations greater than this size (as well as 2651 /// ones that are not naturally aligned), will be expanded by 2652 /// AtomicExpandPass into an __atomic_* library call. setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)2653 void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) { 2654 MaxAtomicSizeInBitsSupported = SizeInBits; 2655 } 2656 2657 /// Set the size in bits of the maximum div/rem the backend supports. 2658 /// Larger operations will be expanded by ExpandLargeDivRem. setMaxDivRemBitWidthSupported(unsigned SizeInBits)2659 void setMaxDivRemBitWidthSupported(unsigned SizeInBits) { 2660 MaxDivRemBitWidthSupported = SizeInBits; 2661 } 2662 2663 /// Set the size in bits of the maximum fp convert the backend supports. 2664 /// Larger operations will be expanded by ExpandLargeFPConvert. setMaxLargeFPConvertBitWidthSupported(unsigned SizeInBits)2665 void setMaxLargeFPConvertBitWidthSupported(unsigned SizeInBits) { 2666 MaxLargeFPConvertBitWidthSupported = SizeInBits; 2667 } 2668 2669 /// Sets the minimum cmpxchg or ll/sc size supported by the backend. setMinCmpXchgSizeInBits(unsigned SizeInBits)2670 void setMinCmpXchgSizeInBits(unsigned SizeInBits) { 2671 MinCmpXchgSizeInBits = SizeInBits; 2672 } 2673 2674 /// Sets whether unaligned atomic operations are supported. setSupportsUnalignedAtomics(bool UnalignedSupported)2675 void setSupportsUnalignedAtomics(bool UnalignedSupported) { 2676 SupportsUnalignedAtomics = UnalignedSupported; 2677 } 2678 2679 public: 2680 //===--------------------------------------------------------------------===// 2681 // Addressing mode description hooks (used by LSR etc). 2682 // 2683 2684 /// CodeGenPrepare sinks address calculations into the same BB as Load/Store 2685 /// instructions reading the address. This allows as much computation as 2686 /// possible to be done in the address mode for that operand. This hook lets 2687 /// targets also pass back when this should be done on intrinsics which 2688 /// load/store. getAddrModeArguments(IntrinsicInst *,SmallVectorImpl<Value * > &,Type * &)2689 virtual bool getAddrModeArguments(IntrinsicInst * /*I*/, 2690 SmallVectorImpl<Value*> &/*Ops*/, 2691 Type *&/*AccessTy*/) const { 2692 return false; 2693 } 2694 2695 /// This represents an addressing mode of: 2696 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg 2697 /// If BaseGV is null, there is no BaseGV. 2698 /// If BaseOffs is zero, there is no base offset. 2699 /// If HasBaseReg is false, there is no base register. 2700 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with 2701 /// no scale. 2702 struct AddrMode { 2703 GlobalValue *BaseGV = nullptr; 2704 int64_t BaseOffs = 0; 2705 bool HasBaseReg = false; 2706 int64_t Scale = 0; 2707 AddrMode() = default; 2708 }; 2709 2710 /// Return true if the addressing mode represented by AM is legal for this 2711 /// target, for a load/store of the specified type. 2712 /// 2713 /// The type may be VoidTy, in which case only return true if the addressing 2714 /// mode is legal for a load/store of any legal type. TODO: Handle 2715 /// pre/postinc as well. 2716 /// 2717 /// If the address space cannot be determined, it will be -1. 2718 /// 2719 /// TODO: Remove default argument 2720 virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, 2721 Type *Ty, unsigned AddrSpace, 2722 Instruction *I = nullptr) const; 2723 2724 /// Return the prefered common base offset. getPreferredLargeGEPBaseOffset(int64_t MinOffset,int64_t MaxOffset)2725 virtual int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, 2726 int64_t MaxOffset) const { 2727 return 0; 2728 } 2729 2730 /// Return true if the specified immediate is legal icmp immediate, that is 2731 /// the target has icmp instructions which can compare a register against the 2732 /// immediate without having to materialize the immediate into a register. isLegalICmpImmediate(int64_t)2733 virtual bool isLegalICmpImmediate(int64_t) const { 2734 return true; 2735 } 2736 2737 /// Return true if the specified immediate is legal add immediate, that is the 2738 /// target has add instructions which can add a register with the immediate 2739 /// without having to materialize the immediate into a register. isLegalAddImmediate(int64_t)2740 virtual bool isLegalAddImmediate(int64_t) const { 2741 return true; 2742 } 2743 2744 /// Return true if the specified immediate is legal for the value input of a 2745 /// store instruction. isLegalStoreImmediate(int64_t Value)2746 virtual bool isLegalStoreImmediate(int64_t Value) const { 2747 // Default implementation assumes that at least 0 works since it is likely 2748 // that a zero register exists or a zero immediate is allowed. 2749 return Value == 0; 2750 } 2751 2752 /// Return true if it's significantly cheaper to shift a vector by a uniform 2753 /// scalar than by an amount which will vary across each lane. On x86 before 2754 /// AVX2 for example, there is a "psllw" instruction for the former case, but 2755 /// no simple instruction for a general "a << b" operation on vectors. 2756 /// This should also apply to lowering for vector funnel shifts (rotates). isVectorShiftByScalarCheap(Type * Ty)2757 virtual bool isVectorShiftByScalarCheap(Type *Ty) const { 2758 return false; 2759 } 2760 2761 /// Given a shuffle vector SVI representing a vector splat, return a new 2762 /// scalar type of size equal to SVI's scalar type if the new type is more 2763 /// profitable. Returns nullptr otherwise. For example under MVE float splats 2764 /// are converted to integer to prevent the need to move from SPR to GPR 2765 /// registers. shouldConvertSplatType(ShuffleVectorInst * SVI)2766 virtual Type* shouldConvertSplatType(ShuffleVectorInst* SVI) const { 2767 return nullptr; 2768 } 2769 2770 /// Given a set in interconnected phis of type 'From' that are loaded/stored 2771 /// or bitcast to type 'To', return true if the set should be converted to 2772 /// 'To'. shouldConvertPhiType(Type * From,Type * To)2773 virtual bool shouldConvertPhiType(Type *From, Type *To) const { 2774 return (From->isIntegerTy() || From->isFloatingPointTy()) && 2775 (To->isIntegerTy() || To->isFloatingPointTy()); 2776 } 2777 2778 /// Returns true if the opcode is a commutative binary operation. isCommutativeBinOp(unsigned Opcode)2779 virtual bool isCommutativeBinOp(unsigned Opcode) const { 2780 // FIXME: This should get its info from the td file. 2781 switch (Opcode) { 2782 case ISD::ADD: 2783 case ISD::SMIN: 2784 case ISD::SMAX: 2785 case ISD::UMIN: 2786 case ISD::UMAX: 2787 case ISD::MUL: 2788 case ISD::MULHU: 2789 case ISD::MULHS: 2790 case ISD::SMUL_LOHI: 2791 case ISD::UMUL_LOHI: 2792 case ISD::FADD: 2793 case ISD::FMUL: 2794 case ISD::AND: 2795 case ISD::OR: 2796 case ISD::XOR: 2797 case ISD::SADDO: 2798 case ISD::UADDO: 2799 case ISD::ADDC: 2800 case ISD::ADDE: 2801 case ISD::SADDSAT: 2802 case ISD::UADDSAT: 2803 case ISD::FMINNUM: 2804 case ISD::FMAXNUM: 2805 case ISD::FMINNUM_IEEE: 2806 case ISD::FMAXNUM_IEEE: 2807 case ISD::FMINIMUM: 2808 case ISD::FMAXIMUM: 2809 case ISD::AVGFLOORS: 2810 case ISD::AVGFLOORU: 2811 case ISD::AVGCEILS: 2812 case ISD::AVGCEILU: 2813 case ISD::ABDS: 2814 case ISD::ABDU: 2815 return true; 2816 default: return false; 2817 } 2818 } 2819 2820 /// Return true if the node is a math/logic binary operator. isBinOp(unsigned Opcode)2821 virtual bool isBinOp(unsigned Opcode) const { 2822 // A commutative binop must be a binop. 2823 if (isCommutativeBinOp(Opcode)) 2824 return true; 2825 // These are non-commutative binops. 2826 switch (Opcode) { 2827 case ISD::SUB: 2828 case ISD::SHL: 2829 case ISD::SRL: 2830 case ISD::SRA: 2831 case ISD::ROTL: 2832 case ISD::ROTR: 2833 case ISD::SDIV: 2834 case ISD::UDIV: 2835 case ISD::SREM: 2836 case ISD::UREM: 2837 case ISD::SSUBSAT: 2838 case ISD::USUBSAT: 2839 case ISD::FSUB: 2840 case ISD::FDIV: 2841 case ISD::FREM: 2842 return true; 2843 default: 2844 return false; 2845 } 2846 } 2847 2848 /// Return true if it's free to truncate a value of type FromTy to type 2849 /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16 2850 /// by referencing its sub-register AX. 2851 /// Targets must return false when FromTy <= ToTy. isTruncateFree(Type * FromTy,Type * ToTy)2852 virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const { 2853 return false; 2854 } 2855 2856 /// Return true if a truncation from FromTy to ToTy is permitted when deciding 2857 /// whether a call is in tail position. Typically this means that both results 2858 /// would be assigned to the same register or stack slot, but it could mean 2859 /// the target performs adequate checks of its own before proceeding with the 2860 /// tail call. Targets must return false when FromTy <= ToTy. allowTruncateForTailCall(Type * FromTy,Type * ToTy)2861 virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const { 2862 return false; 2863 } 2864 isTruncateFree(EVT FromVT,EVT ToVT)2865 virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const { return false; } isTruncateFree(LLT FromTy,LLT ToTy,const DataLayout & DL,LLVMContext & Ctx)2866 virtual bool isTruncateFree(LLT FromTy, LLT ToTy, const DataLayout &DL, 2867 LLVMContext &Ctx) const { 2868 return isTruncateFree(getApproximateEVTForLLT(FromTy, DL, Ctx), 2869 getApproximateEVTForLLT(ToTy, DL, Ctx)); 2870 } 2871 2872 /// Return true if truncating the specific node Val to type VT2 is free. isTruncateFree(SDValue Val,EVT VT2)2873 virtual bool isTruncateFree(SDValue Val, EVT VT2) const { 2874 // Fallback to type matching. 2875 return isTruncateFree(Val.getValueType(), VT2); 2876 } 2877 isProfitableToHoist(Instruction * I)2878 virtual bool isProfitableToHoist(Instruction *I) const { return true; } 2879 2880 /// Return true if the extension represented by \p I is free. 2881 /// Unlikely the is[Z|FP]ExtFree family which is based on types, 2882 /// this method can use the context provided by \p I to decide 2883 /// whether or not \p I is free. 2884 /// This method extends the behavior of the is[Z|FP]ExtFree family. 2885 /// In other words, if is[Z|FP]Free returns true, then this method 2886 /// returns true as well. The converse is not true. 2887 /// The target can perform the adequate checks by overriding isExtFreeImpl. 2888 /// \pre \p I must be a sign, zero, or fp extension. isExtFree(const Instruction * I)2889 bool isExtFree(const Instruction *I) const { 2890 switch (I->getOpcode()) { 2891 case Instruction::FPExt: 2892 if (isFPExtFree(EVT::getEVT(I->getType()), 2893 EVT::getEVT(I->getOperand(0)->getType()))) 2894 return true; 2895 break; 2896 case Instruction::ZExt: 2897 if (isZExtFree(I->getOperand(0)->getType(), I->getType())) 2898 return true; 2899 break; 2900 case Instruction::SExt: 2901 break; 2902 default: 2903 llvm_unreachable("Instruction is not an extension"); 2904 } 2905 return isExtFreeImpl(I); 2906 } 2907 2908 /// Return true if \p Load and \p Ext can form an ExtLoad. 2909 /// For example, in AArch64 2910 /// %L = load i8, i8* %ptr 2911 /// %E = zext i8 %L to i32 2912 /// can be lowered into one load instruction 2913 /// ldrb w0, [x0] isExtLoad(const LoadInst * Load,const Instruction * Ext,const DataLayout & DL)2914 bool isExtLoad(const LoadInst *Load, const Instruction *Ext, 2915 const DataLayout &DL) const { 2916 EVT VT = getValueType(DL, Ext->getType()); 2917 EVT LoadVT = getValueType(DL, Load->getType()); 2918 2919 // If the load has other users and the truncate is not free, the ext 2920 // probably isn't free. 2921 if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) && 2922 !isTruncateFree(Ext->getType(), Load->getType())) 2923 return false; 2924 2925 // Check whether the target supports casts folded into loads. 2926 unsigned LType; 2927 if (isa<ZExtInst>(Ext)) 2928 LType = ISD::ZEXTLOAD; 2929 else { 2930 assert(isa<SExtInst>(Ext) && "Unexpected ext type!"); 2931 LType = ISD::SEXTLOAD; 2932 } 2933 2934 return isLoadExtLegal(LType, VT, LoadVT); 2935 } 2936 2937 /// Return true if any actual instruction that defines a value of type FromTy 2938 /// implicitly zero-extends the value to ToTy in the result register. 2939 /// 2940 /// The function should return true when it is likely that the truncate can 2941 /// be freely folded with an instruction defining a value of FromTy. If 2942 /// the defining instruction is unknown (because you're looking at a 2943 /// function argument, PHI, etc.) then the target may require an 2944 /// explicit truncate, which is not necessarily free, but this function 2945 /// does not deal with those cases. 2946 /// Targets must return false when FromTy >= ToTy. isZExtFree(Type * FromTy,Type * ToTy)2947 virtual bool isZExtFree(Type *FromTy, Type *ToTy) const { 2948 return false; 2949 } 2950 isZExtFree(EVT FromTy,EVT ToTy)2951 virtual bool isZExtFree(EVT FromTy, EVT ToTy) const { return false; } isZExtFree(LLT FromTy,LLT ToTy,const DataLayout & DL,LLVMContext & Ctx)2952 virtual bool isZExtFree(LLT FromTy, LLT ToTy, const DataLayout &DL, 2953 LLVMContext &Ctx) const { 2954 return isZExtFree(getApproximateEVTForLLT(FromTy, DL, Ctx), 2955 getApproximateEVTForLLT(ToTy, DL, Ctx)); 2956 } 2957 2958 /// Return true if zero-extending the specific node Val to type VT2 is free 2959 /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or 2960 /// because it's folded such as X86 zero-extending loads). isZExtFree(SDValue Val,EVT VT2)2961 virtual bool isZExtFree(SDValue Val, EVT VT2) const { 2962 return isZExtFree(Val.getValueType(), VT2); 2963 } 2964 2965 /// Return true if sign-extension from FromTy to ToTy is cheaper than 2966 /// zero-extension. isSExtCheaperThanZExt(EVT FromTy,EVT ToTy)2967 virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const { 2968 return false; 2969 } 2970 2971 /// Return true if this constant should be sign extended when promoting to 2972 /// a larger type. signExtendConstant(const ConstantInt * C)2973 virtual bool signExtendConstant(const ConstantInt *C) const { return false; } 2974 2975 /// Return true if sinking I's operands to the same basic block as I is 2976 /// profitable, e.g. because the operands can be folded into a target 2977 /// instruction during instruction selection. After calling the function 2978 /// \p Ops contains the Uses to sink ordered by dominance (dominating users 2979 /// come first). shouldSinkOperands(Instruction * I,SmallVectorImpl<Use * > & Ops)2980 virtual bool shouldSinkOperands(Instruction *I, 2981 SmallVectorImpl<Use *> &Ops) const { 2982 return false; 2983 } 2984 2985 /// Try to optimize extending or truncating conversion instructions (like 2986 /// zext, trunc, fptoui, uitofp) for the target. 2987 virtual bool optimizeExtendOrTruncateConversion(Instruction * I,Loop * L,const TargetTransformInfo & TTI)2988 optimizeExtendOrTruncateConversion(Instruction *I, Loop *L, 2989 const TargetTransformInfo &TTI) const { 2990 return false; 2991 } 2992 2993 /// Return true if the target supplies and combines to a paired load 2994 /// two loaded values of type LoadedType next to each other in memory. 2995 /// RequiredAlignment gives the minimal alignment constraints that must be met 2996 /// to be able to select this paired load. 2997 /// 2998 /// This information is *not* used to generate actual paired loads, but it is 2999 /// used to generate a sequence of loads that is easier to combine into a 3000 /// paired load. 3001 /// For instance, something like this: 3002 /// a = load i64* addr 3003 /// b = trunc i64 a to i32 3004 /// c = lshr i64 a, 32 3005 /// d = trunc i64 c to i32 3006 /// will be optimized into: 3007 /// b = load i32* addr1 3008 /// d = load i32* addr2 3009 /// Where addr1 = addr2 +/- sizeof(i32). 3010 /// 3011 /// In other words, unless the target performs a post-isel load combining, 3012 /// this information should not be provided because it will generate more 3013 /// loads. hasPairedLoad(EVT,Align &)3014 virtual bool hasPairedLoad(EVT /*LoadedType*/, 3015 Align & /*RequiredAlignment*/) const { 3016 return false; 3017 } 3018 3019 /// Return true if the target has a vector blend instruction. hasVectorBlend()3020 virtual bool hasVectorBlend() const { return false; } 3021 3022 /// Get the maximum supported factor for interleaved memory accesses. 3023 /// Default to be the minimum interleave factor: 2. getMaxSupportedInterleaveFactor()3024 virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; } 3025 3026 /// Lower an interleaved load to target specific intrinsics. Return 3027 /// true on success. 3028 /// 3029 /// \p LI is the vector load instruction. 3030 /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector. 3031 /// \p Indices is the corresponding indices for each shufflevector. 3032 /// \p Factor is the interleave factor. lowerInterleavedLoad(LoadInst * LI,ArrayRef<ShuffleVectorInst * > Shuffles,ArrayRef<unsigned> Indices,unsigned Factor)3033 virtual bool lowerInterleavedLoad(LoadInst *LI, 3034 ArrayRef<ShuffleVectorInst *> Shuffles, 3035 ArrayRef<unsigned> Indices, 3036 unsigned Factor) const { 3037 return false; 3038 } 3039 3040 /// Lower an interleaved store to target specific intrinsics. Return 3041 /// true on success. 3042 /// 3043 /// \p SI is the vector store instruction. 3044 /// \p SVI is the shufflevector to RE-interleave the stored vector. 3045 /// \p Factor is the interleave factor. lowerInterleavedStore(StoreInst * SI,ShuffleVectorInst * SVI,unsigned Factor)3046 virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, 3047 unsigned Factor) const { 3048 return false; 3049 } 3050 3051 /// Lower a deinterleave intrinsic to a target specific load intrinsic. 3052 /// Return true on success. Currently only supports 3053 /// llvm.experimental.vector.deinterleave2 3054 /// 3055 /// \p DI is the deinterleave intrinsic. 3056 /// \p LI is the accompanying load instruction lowerDeinterleaveIntrinsicToLoad(IntrinsicInst * DI,LoadInst * LI)3057 virtual bool lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *DI, 3058 LoadInst *LI) const { 3059 return false; 3060 } 3061 3062 /// Lower an interleave intrinsic to a target specific store intrinsic. 3063 /// Return true on success. Currently only supports 3064 /// llvm.experimental.vector.interleave2 3065 /// 3066 /// \p II is the interleave intrinsic. 3067 /// \p SI is the accompanying store instruction lowerInterleaveIntrinsicToStore(IntrinsicInst * II,StoreInst * SI)3068 virtual bool lowerInterleaveIntrinsicToStore(IntrinsicInst *II, 3069 StoreInst *SI) const { 3070 return false; 3071 } 3072 3073 /// Return true if an fpext operation is free (for instance, because 3074 /// single-precision floating-point numbers are implicitly extended to 3075 /// double-precision). isFPExtFree(EVT DestVT,EVT SrcVT)3076 virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const { 3077 assert(SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() && 3078 "invalid fpext types"); 3079 return false; 3080 } 3081 3082 /// Return true if an fpext operation input to an \p Opcode operation is free 3083 /// (for instance, because half-precision floating-point numbers are 3084 /// implicitly extended to float-precision) for an FMA instruction. isFPExtFoldable(const MachineInstr & MI,unsigned Opcode,LLT DestTy,LLT SrcTy)3085 virtual bool isFPExtFoldable(const MachineInstr &MI, unsigned Opcode, 3086 LLT DestTy, LLT SrcTy) const { 3087 return false; 3088 } 3089 3090 /// Return true if an fpext operation input to an \p Opcode operation is free 3091 /// (for instance, because half-precision floating-point numbers are 3092 /// implicitly extended to float-precision) for an FMA instruction. isFPExtFoldable(const SelectionDAG & DAG,unsigned Opcode,EVT DestVT,EVT SrcVT)3093 virtual bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode, 3094 EVT DestVT, EVT SrcVT) const { 3095 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && 3096 "invalid fpext types"); 3097 return isFPExtFree(DestVT, SrcVT); 3098 } 3099 3100 /// Return true if folding a vector load into ExtVal (a sign, zero, or any 3101 /// extend node) is profitable. isVectorLoadExtDesirable(SDValue ExtVal)3102 virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; } 3103 3104 /// Return true if an fneg operation is free to the point where it is never 3105 /// worthwhile to replace it with a bitwise operation. isFNegFree(EVT VT)3106 virtual bool isFNegFree(EVT VT) const { 3107 assert(VT.isFloatingPoint()); 3108 return false; 3109 } 3110 3111 /// Return true if an fabs operation is free to the point where it is never 3112 /// worthwhile to replace it with a bitwise operation. isFAbsFree(EVT VT)3113 virtual bool isFAbsFree(EVT VT) const { 3114 assert(VT.isFloatingPoint()); 3115 return false; 3116 } 3117 3118 /// Return true if an FMA operation is faster than a pair of fmul and fadd 3119 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method 3120 /// returns true, otherwise fmuladd is expanded to fmul + fadd. 3121 /// 3122 /// NOTE: This may be called before legalization on types for which FMAs are 3123 /// not legal, but should return true if those types will eventually legalize 3124 /// to types that support FMAs. After legalization, it will only be called on 3125 /// types that support FMAs (via Legal or Custom actions) isFMAFasterThanFMulAndFAdd(const MachineFunction & MF,EVT)3126 virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 3127 EVT) const { 3128 return false; 3129 } 3130 3131 /// Return true if an FMA operation is faster than a pair of fmul and fadd 3132 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method 3133 /// returns true, otherwise fmuladd is expanded to fmul + fadd. 3134 /// 3135 /// NOTE: This may be called before legalization on types for which FMAs are 3136 /// not legal, but should return true if those types will eventually legalize 3137 /// to types that support FMAs. After legalization, it will only be called on 3138 /// types that support FMAs (via Legal or Custom actions) isFMAFasterThanFMulAndFAdd(const MachineFunction & MF,LLT)3139 virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 3140 LLT) const { 3141 return false; 3142 } 3143 3144 /// IR version isFMAFasterThanFMulAndFAdd(const Function & F,Type *)3145 virtual bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *) const { 3146 return false; 3147 } 3148 3149 /// Returns true if \p MI can be combined with another instruction to 3150 /// form TargetOpcode::G_FMAD. \p N may be an TargetOpcode::G_FADD, 3151 /// TargetOpcode::G_FSUB, or an TargetOpcode::G_FMUL which will be 3152 /// distributed into an fadd/fsub. isFMADLegal(const MachineInstr & MI,LLT Ty)3153 virtual bool isFMADLegal(const MachineInstr &MI, LLT Ty) const { 3154 assert((MI.getOpcode() == TargetOpcode::G_FADD || 3155 MI.getOpcode() == TargetOpcode::G_FSUB || 3156 MI.getOpcode() == TargetOpcode::G_FMUL) && 3157 "unexpected node in FMAD forming combine"); 3158 switch (Ty.getScalarSizeInBits()) { 3159 case 16: 3160 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f16); 3161 case 32: 3162 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f32); 3163 case 64: 3164 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f64); 3165 default: 3166 break; 3167 } 3168 3169 return false; 3170 } 3171 3172 /// Returns true if be combined with to form an ISD::FMAD. \p N may be an 3173 /// ISD::FADD, ISD::FSUB, or an ISD::FMUL which will be distributed into an 3174 /// fadd/fsub. isFMADLegal(const SelectionDAG & DAG,const SDNode * N)3175 virtual bool isFMADLegal(const SelectionDAG &DAG, const SDNode *N) const { 3176 assert((N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB || 3177 N->getOpcode() == ISD::FMUL) && 3178 "unexpected node in FMAD forming combine"); 3179 return isOperationLegal(ISD::FMAD, N->getValueType(0)); 3180 } 3181 3182 // Return true when the decision to generate FMA's (or FMS, FMLA etc) rather 3183 // than FMUL and ADD is delegated to the machine combiner. generateFMAsInMachineCombiner(EVT VT,CodeGenOptLevel OptLevel)3184 virtual bool generateFMAsInMachineCombiner(EVT VT, 3185 CodeGenOptLevel OptLevel) const { 3186 return false; 3187 } 3188 3189 /// Return true if it's profitable to narrow operations of type SrcVT to 3190 /// DestVT. e.g. on x86, it's profitable to narrow from i32 to i8 but not from 3191 /// i32 to i16. isNarrowingProfitable(EVT SrcVT,EVT DestVT)3192 virtual bool isNarrowingProfitable(EVT SrcVT, EVT DestVT) const { 3193 return false; 3194 } 3195 3196 /// Return true if pulling a binary operation into a select with an identity 3197 /// constant is profitable. This is the inverse of an IR transform. 3198 /// Example: X + (Cond ? Y : 0) --> Cond ? (X + Y) : X shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,EVT VT)3199 virtual bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, 3200 EVT VT) const { 3201 return false; 3202 } 3203 3204 /// Return true if it is beneficial to convert a load of a constant to 3205 /// just the constant itself. 3206 /// On some targets it might be more efficient to use a combination of 3207 /// arithmetic instructions to materialize the constant instead of loading it 3208 /// from a constant pool. shouldConvertConstantLoadToIntImm(const APInt & Imm,Type * Ty)3209 virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm, 3210 Type *Ty) const { 3211 return false; 3212 } 3213 3214 /// Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type 3215 /// from this source type with this index. This is needed because 3216 /// EXTRACT_SUBVECTOR usually has custom lowering that depends on the index of 3217 /// the first element, and only the target knows which lowering is cheap. isExtractSubvectorCheap(EVT ResVT,EVT SrcVT,unsigned Index)3218 virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, 3219 unsigned Index) const { 3220 return false; 3221 } 3222 3223 /// Try to convert an extract element of a vector binary operation into an 3224 /// extract element followed by a scalar operation. shouldScalarizeBinop(SDValue VecOp)3225 virtual bool shouldScalarizeBinop(SDValue VecOp) const { 3226 return false; 3227 } 3228 3229 /// Return true if extraction of a scalar element from the given vector type 3230 /// at the given index is cheap. For example, if scalar operations occur on 3231 /// the same register file as vector operations, then an extract element may 3232 /// be a sub-register rename rather than an actual instruction. isExtractVecEltCheap(EVT VT,unsigned Index)3233 virtual bool isExtractVecEltCheap(EVT VT, unsigned Index) const { 3234 return false; 3235 } 3236 3237 /// Try to convert math with an overflow comparison into the corresponding DAG 3238 /// node operation. Targets may want to override this independently of whether 3239 /// the operation is legal/custom for the given type because it may obscure 3240 /// matching of other patterns. shouldFormOverflowOp(unsigned Opcode,EVT VT,bool MathUsed)3241 virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT, 3242 bool MathUsed) const { 3243 // TODO: The default logic is inherited from code in CodeGenPrepare. 3244 // The opcode should not make a difference by default? 3245 if (Opcode != ISD::UADDO) 3246 return false; 3247 3248 // Allow the transform as long as we have an integer type that is not 3249 // obviously illegal and unsupported and if the math result is used 3250 // besides the overflow check. On some targets (e.g. SPARC), it is 3251 // not profitable to form on overflow op if the math result has no 3252 // concrete users. 3253 if (VT.isVector()) 3254 return false; 3255 return MathUsed && (VT.isSimple() || !isOperationExpand(Opcode, VT)); 3256 } 3257 3258 // Return true if it is profitable to use a scalar input to a BUILD_VECTOR 3259 // even if the vector itself has multiple uses. aggressivelyPreferBuildVectorSources(EVT VecVT)3260 virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const { 3261 return false; 3262 } 3263 3264 // Return true if CodeGenPrepare should consider splitting large offset of a 3265 // GEP to make the GEP fit into the addressing mode and can be sunk into the 3266 // same blocks of its users. shouldConsiderGEPOffsetSplit()3267 virtual bool shouldConsiderGEPOffsetSplit() const { return false; } 3268 3269 /// Return true if creating a shift of the type by the given 3270 /// amount is not profitable. shouldAvoidTransformToShift(EVT VT,unsigned Amount)3271 virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const { 3272 return false; 3273 } 3274 3275 // Should we fold (select_cc seteq (and x, y), 0, 0, A) -> (and (sra (shl x)) 3276 // A) where y has a single bit set? shouldFoldSelectWithSingleBitTest(EVT VT,const APInt & AndMask)3277 virtual bool shouldFoldSelectWithSingleBitTest(EVT VT, 3278 const APInt &AndMask) const { 3279 unsigned ShCt = AndMask.getBitWidth() - 1; 3280 return !shouldAvoidTransformToShift(VT, ShCt); 3281 } 3282 3283 /// Does this target require the clearing of high-order bits in a register 3284 /// passed to the fp16 to fp conversion library function. shouldKeepZExtForFP16Conv()3285 virtual bool shouldKeepZExtForFP16Conv() const { return false; } 3286 3287 /// Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT 3288 /// from min(max(fptoi)) saturation patterns. shouldConvertFpToSat(unsigned Op,EVT FPVT,EVT VT)3289 virtual bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const { 3290 return isOperationLegalOrCustom(Op, VT); 3291 } 3292 3293 /// Does this target support complex deinterleaving isComplexDeinterleavingSupported()3294 virtual bool isComplexDeinterleavingSupported() const { return false; } 3295 3296 /// Does this target support complex deinterleaving with the given operation 3297 /// and type isComplexDeinterleavingOperationSupported(ComplexDeinterleavingOperation Operation,Type * Ty)3298 virtual bool isComplexDeinterleavingOperationSupported( 3299 ComplexDeinterleavingOperation Operation, Type *Ty) const { 3300 return false; 3301 } 3302 3303 /// Create the IR node for the given complex deinterleaving operation. 3304 /// If one cannot be created using all the given inputs, nullptr should be 3305 /// returned. 3306 virtual Value *createComplexDeinterleavingIR( 3307 IRBuilderBase &B, ComplexDeinterleavingOperation OperationType, 3308 ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB, 3309 Value *Accumulator = nullptr) const { 3310 return nullptr; 3311 } 3312 3313 //===--------------------------------------------------------------------===// 3314 // Runtime Library hooks 3315 // 3316 3317 /// Rename the default libcall routine name for the specified libcall. setLibcallName(RTLIB::Libcall Call,const char * Name)3318 void setLibcallName(RTLIB::Libcall Call, const char *Name) { 3319 LibcallRoutineNames[Call] = Name; 3320 } setLibcallName(ArrayRef<RTLIB::Libcall> Calls,const char * Name)3321 void setLibcallName(ArrayRef<RTLIB::Libcall> Calls, const char *Name) { 3322 for (auto Call : Calls) 3323 setLibcallName(Call, Name); 3324 } 3325 3326 /// Get the libcall routine name for the specified libcall. getLibcallName(RTLIB::Libcall Call)3327 const char *getLibcallName(RTLIB::Libcall Call) const { 3328 return LibcallRoutineNames[Call]; 3329 } 3330 3331 /// Override the default CondCode to be used to test the result of the 3332 /// comparison libcall against zero. setCmpLibcallCC(RTLIB::Libcall Call,ISD::CondCode CC)3333 void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) { 3334 CmpLibcallCCs[Call] = CC; 3335 } 3336 3337 /// Get the CondCode that's to be used to test the result of the comparison 3338 /// libcall against zero. getCmpLibcallCC(RTLIB::Libcall Call)3339 ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const { 3340 return CmpLibcallCCs[Call]; 3341 } 3342 3343 /// Set the CallingConv that should be used for the specified libcall. setLibcallCallingConv(RTLIB::Libcall Call,CallingConv::ID CC)3344 void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) { 3345 LibcallCallingConvs[Call] = CC; 3346 } 3347 3348 /// Get the CallingConv that should be used for the specified libcall. getLibcallCallingConv(RTLIB::Libcall Call)3349 CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const { 3350 return LibcallCallingConvs[Call]; 3351 } 3352 3353 /// Execute target specific actions to finalize target lowering. 3354 /// This is used to set extra flags in MachineFrameInformation and freezing 3355 /// the set of reserved registers. 3356 /// The default implementation just freezes the set of reserved registers. 3357 virtual void finalizeLowering(MachineFunction &MF) const; 3358 3359 //===----------------------------------------------------------------------===// 3360 // GlobalISel Hooks 3361 //===----------------------------------------------------------------------===// 3362 /// Check whether or not \p MI needs to be moved close to its uses. 3363 virtual bool shouldLocalize(const MachineInstr &MI, const TargetTransformInfo *TTI) const; 3364 3365 3366 private: 3367 const TargetMachine &TM; 3368 3369 /// Tells the code generator that the target has multiple (allocatable) 3370 /// condition registers that can be used to store the results of comparisons 3371 /// for use by selects and conditional branches. With multiple condition 3372 /// registers, the code generator will not aggressively sink comparisons into 3373 /// the blocks of their users. 3374 bool HasMultipleConditionRegisters; 3375 3376 /// Tells the code generator that the target has BitExtract instructions. 3377 /// The code generator will aggressively sink "shift"s into the blocks of 3378 /// their users if the users will generate "and" instructions which can be 3379 /// combined with "shift" to BitExtract instructions. 3380 bool HasExtractBitsInsn; 3381 3382 /// Tells the code generator to bypass slow divide or remainder 3383 /// instructions. For example, BypassSlowDivWidths[32,8] tells the code 3384 /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer 3385 /// div/rem when the operands are positive and less than 256. 3386 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths; 3387 3388 /// Tells the code generator that it shouldn't generate extra flow control 3389 /// instructions and should attempt to combine flow control instructions via 3390 /// predication. 3391 bool JumpIsExpensive; 3392 3393 /// Information about the contents of the high-bits in boolean values held in 3394 /// a type wider than i1. See getBooleanContents. 3395 BooleanContent BooleanContents; 3396 3397 /// Information about the contents of the high-bits in boolean values held in 3398 /// a type wider than i1. See getBooleanContents. 3399 BooleanContent BooleanFloatContents; 3400 3401 /// Information about the contents of the high-bits in boolean vector values 3402 /// when the element type is wider than i1. See getBooleanContents. 3403 BooleanContent BooleanVectorContents; 3404 3405 /// The target scheduling preference: shortest possible total cycles or lowest 3406 /// register usage. 3407 Sched::Preference SchedPreferenceInfo; 3408 3409 /// The minimum alignment that any argument on the stack needs to have. 3410 Align MinStackArgumentAlignment; 3411 3412 /// The minimum function alignment (used when optimizing for size, and to 3413 /// prevent explicitly provided alignment from leading to incorrect code). 3414 Align MinFunctionAlignment; 3415 3416 /// The preferred function alignment (used when alignment unspecified and 3417 /// optimizing for speed). 3418 Align PrefFunctionAlignment; 3419 3420 /// The preferred loop alignment (in log2 bot in bytes). 3421 Align PrefLoopAlignment; 3422 /// The maximum amount of bytes permitted to be emitted for alignment. 3423 unsigned MaxBytesForAlignment; 3424 3425 /// Size in bits of the maximum atomics size the backend supports. 3426 /// Accesses larger than this will be expanded by AtomicExpandPass. 3427 unsigned MaxAtomicSizeInBitsSupported; 3428 3429 /// Size in bits of the maximum div/rem size the backend supports. 3430 /// Larger operations will be expanded by ExpandLargeDivRem. 3431 unsigned MaxDivRemBitWidthSupported; 3432 3433 /// Size in bits of the maximum larget fp convert size the backend 3434 /// supports. Larger operations will be expanded by ExpandLargeFPConvert. 3435 unsigned MaxLargeFPConvertBitWidthSupported; 3436 3437 /// Size in bits of the minimum cmpxchg or ll/sc operation the 3438 /// backend supports. 3439 unsigned MinCmpXchgSizeInBits; 3440 3441 /// This indicates if the target supports unaligned atomic operations. 3442 bool SupportsUnalignedAtomics; 3443 3444 /// If set to a physical register, this specifies the register that 3445 /// llvm.savestack/llvm.restorestack should save and restore. 3446 Register StackPointerRegisterToSaveRestore; 3447 3448 /// This indicates the default register class to use for each ValueType the 3449 /// target supports natively. 3450 const TargetRegisterClass *RegClassForVT[MVT::VALUETYPE_SIZE]; 3451 uint16_t NumRegistersForVT[MVT::VALUETYPE_SIZE]; 3452 MVT RegisterTypeForVT[MVT::VALUETYPE_SIZE]; 3453 3454 /// This indicates the "representative" register class to use for each 3455 /// ValueType the target supports natively. This information is used by the 3456 /// scheduler to track register pressure. By default, the representative 3457 /// register class is the largest legal super-reg register class of the 3458 /// register class of the specified type. e.g. On x86, i8, i16, and i32's 3459 /// representative class would be GR32. 3460 const TargetRegisterClass *RepRegClassForVT[MVT::VALUETYPE_SIZE] = {0}; 3461 3462 /// This indicates the "cost" of the "representative" register class for each 3463 /// ValueType. The cost is used by the scheduler to approximate register 3464 /// pressure. 3465 uint8_t RepRegClassCostForVT[MVT::VALUETYPE_SIZE]; 3466 3467 /// For any value types we are promoting or expanding, this contains the value 3468 /// type that we are changing to. For Expanded types, this contains one step 3469 /// of the expand (e.g. i64 -> i32), even if there are multiple steps required 3470 /// (e.g. i64 -> i16). For types natively supported by the system, this holds 3471 /// the same type (e.g. i32 -> i32). 3472 MVT TransformToType[MVT::VALUETYPE_SIZE]; 3473 3474 /// For each operation and each value type, keep a LegalizeAction that 3475 /// indicates how instruction selection should deal with the operation. Most 3476 /// operations are Legal (aka, supported natively by the target), but 3477 /// operations that are not should be described. Note that operations on 3478 /// non-legal value types are not described here. 3479 LegalizeAction OpActions[MVT::VALUETYPE_SIZE][ISD::BUILTIN_OP_END]; 3480 3481 /// For each load extension type and each value type, keep a LegalizeAction 3482 /// that indicates how instruction selection should deal with a load of a 3483 /// specific value type and extension type. Uses 4-bits to store the action 3484 /// for each of the 4 load ext types. 3485 uint16_t LoadExtActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE]; 3486 3487 /// For each value type pair keep a LegalizeAction that indicates whether a 3488 /// truncating store of a specific value type and truncating type is legal. 3489 LegalizeAction TruncStoreActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE]; 3490 3491 /// For each indexed mode and each value type, keep a quad of LegalizeAction 3492 /// that indicates how instruction selection should deal with the load / 3493 /// store / maskedload / maskedstore. 3494 /// 3495 /// The first dimension is the value_type for the reference. The second 3496 /// dimension represents the various modes for load store. 3497 uint16_t IndexedModeActions[MVT::VALUETYPE_SIZE][ISD::LAST_INDEXED_MODE]; 3498 3499 /// For each condition code (ISD::CondCode) keep a LegalizeAction that 3500 /// indicates how instruction selection should deal with the condition code. 3501 /// 3502 /// Because each CC action takes up 4 bits, we need to have the array size be 3503 /// large enough to fit all of the value types. This can be done by rounding 3504 /// up the MVT::VALUETYPE_SIZE value to the next multiple of 8. 3505 uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::VALUETYPE_SIZE + 7) / 8]; 3506 3507 ValueTypeActionImpl ValueTypeActions; 3508 3509 private: 3510 /// Targets can specify ISD nodes that they would like PerformDAGCombine 3511 /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this 3512 /// array. 3513 unsigned char 3514 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT]; 3515 3516 /// For operations that must be promoted to a specific type, this holds the 3517 /// destination type. This map should be sparse, so don't hold it as an 3518 /// array. 3519 /// 3520 /// Targets add entries to this map with AddPromotedToType(..), clients access 3521 /// this with getTypeToPromoteTo(..). 3522 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType> 3523 PromoteToType; 3524 3525 /// Stores the name each libcall. 3526 const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL + 1]; 3527 3528 /// The ISD::CondCode that should be used to test the result of each of the 3529 /// comparison libcall against zero. 3530 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL]; 3531 3532 /// Stores the CallingConv that should be used for each libcall. 3533 CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL]; 3534 3535 /// Set default libcall names and calling conventions. 3536 void InitLibcalls(const Triple &TT); 3537 3538 /// The bits of IndexedModeActions used to store the legalisation actions 3539 /// We store the data as | ML | MS | L | S | each taking 4 bits. 3540 enum IndexedModeActionsBits { 3541 IMAB_Store = 0, 3542 IMAB_Load = 4, 3543 IMAB_MaskedStore = 8, 3544 IMAB_MaskedLoad = 12 3545 }; 3546 setIndexedModeAction(unsigned IdxMode,MVT VT,unsigned Shift,LegalizeAction Action)3547 void setIndexedModeAction(unsigned IdxMode, MVT VT, unsigned Shift, 3548 LegalizeAction Action) { 3549 assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE && 3550 (unsigned)Action < 0xf && "Table isn't big enough!"); 3551 unsigned Ty = (unsigned)VT.SimpleTy; 3552 IndexedModeActions[Ty][IdxMode] &= ~(0xf << Shift); 3553 IndexedModeActions[Ty][IdxMode] |= ((uint16_t)Action) << Shift; 3554 } 3555 getIndexedModeAction(unsigned IdxMode,MVT VT,unsigned Shift)3556 LegalizeAction getIndexedModeAction(unsigned IdxMode, MVT VT, 3557 unsigned Shift) const { 3558 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() && 3559 "Table isn't big enough!"); 3560 unsigned Ty = (unsigned)VT.SimpleTy; 3561 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] >> Shift) & 0xf); 3562 } 3563 3564 protected: 3565 /// Return true if the extension represented by \p I is free. 3566 /// \pre \p I is a sign, zero, or fp extension and 3567 /// is[Z|FP]ExtFree of the related types is not true. isExtFreeImpl(const Instruction * I)3568 virtual bool isExtFreeImpl(const Instruction *I) const { return false; } 3569 3570 /// Depth that GatherAllAliases should continue looking for chain 3571 /// dependencies when trying to find a more preferable chain. As an 3572 /// approximation, this should be more than the number of consecutive stores 3573 /// expected to be merged. 3574 unsigned GatherAllAliasesMaxDepth; 3575 3576 /// \brief Specify maximum number of store instructions per memset call. 3577 /// 3578 /// When lowering \@llvm.memset this field specifies the maximum number of 3579 /// store operations that may be substituted for the call to memset. Targets 3580 /// must set this value based on the cost threshold for that target. Targets 3581 /// should assume that the memset will be done using as many of the largest 3582 /// store operations first, followed by smaller ones, if necessary, per 3583 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine 3584 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte 3585 /// store. This only applies to setting a constant array of a constant size. 3586 unsigned MaxStoresPerMemset; 3587 /// Likewise for functions with the OptSize attribute. 3588 unsigned MaxStoresPerMemsetOptSize; 3589 3590 /// \brief Specify maximum number of store instructions per memcpy call. 3591 /// 3592 /// When lowering \@llvm.memcpy this field specifies the maximum number of 3593 /// store operations that may be substituted for a call to memcpy. Targets 3594 /// must set this value based on the cost threshold for that target. Targets 3595 /// should assume that the memcpy will be done using as many of the largest 3596 /// store operations first, followed by smaller ones, if necessary, per 3597 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine 3598 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store 3599 /// and one 1-byte store. This only applies to copying a constant array of 3600 /// constant size. 3601 unsigned MaxStoresPerMemcpy; 3602 /// Likewise for functions with the OptSize attribute. 3603 unsigned MaxStoresPerMemcpyOptSize; 3604 /// \brief Specify max number of store instructions to glue in inlined memcpy. 3605 /// 3606 /// When memcpy is inlined based on MaxStoresPerMemcpy, specify maximum number 3607 /// of store instructions to keep together. This helps in pairing and 3608 // vectorization later on. 3609 unsigned MaxGluedStoresPerMemcpy = 0; 3610 3611 /// \brief Specify maximum number of load instructions per memcmp call. 3612 /// 3613 /// When lowering \@llvm.memcmp this field specifies the maximum number of 3614 /// pairs of load operations that may be substituted for a call to memcmp. 3615 /// Targets must set this value based on the cost threshold for that target. 3616 /// Targets should assume that the memcmp will be done using as many of the 3617 /// largest load operations first, followed by smaller ones, if necessary, per 3618 /// alignment restrictions. For example, loading 7 bytes on a 32-bit machine 3619 /// with 32-bit alignment would result in one 4-byte load, a one 2-byte load 3620 /// and one 1-byte load. This only applies to copying a constant array of 3621 /// constant size. 3622 unsigned MaxLoadsPerMemcmp; 3623 /// Likewise for functions with the OptSize attribute. 3624 unsigned MaxLoadsPerMemcmpOptSize; 3625 3626 /// \brief Specify maximum number of store instructions per memmove call. 3627 /// 3628 /// When lowering \@llvm.memmove this field specifies the maximum number of 3629 /// store instructions that may be substituted for a call to memmove. Targets 3630 /// must set this value based on the cost threshold for that target. Targets 3631 /// should assume that the memmove will be done using as many of the largest 3632 /// store operations first, followed by smaller ones, if necessary, per 3633 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine 3634 /// with 8-bit alignment would result in nine 1-byte stores. This only 3635 /// applies to copying a constant array of constant size. 3636 unsigned MaxStoresPerMemmove; 3637 /// Likewise for functions with the OptSize attribute. 3638 unsigned MaxStoresPerMemmoveOptSize; 3639 3640 /// Tells the code generator that select is more expensive than a branch if 3641 /// the branch is usually predicted right. 3642 bool PredictableSelectIsExpensive; 3643 3644 /// \see enableExtLdPromotion. 3645 bool EnableExtLdPromotion; 3646 3647 /// Return true if the value types that can be represented by the specified 3648 /// register class are all legal. 3649 bool isLegalRC(const TargetRegisterInfo &TRI, 3650 const TargetRegisterClass &RC) const; 3651 3652 /// Replace/modify any TargetFrameIndex operands with a targte-dependent 3653 /// sequence of memory operands that is recognized by PrologEpilogInserter. 3654 MachineBasicBlock *emitPatchPoint(MachineInstr &MI, 3655 MachineBasicBlock *MBB) const; 3656 3657 bool IsStrictFPEnabled; 3658 }; 3659 3660 /// This class defines information used to lower LLVM code to legal SelectionDAG 3661 /// operators that the target instruction selector can accept natively. 3662 /// 3663 /// This class also defines callbacks that targets must implement to lower 3664 /// target-specific constructs to SelectionDAG operators. 3665 class TargetLowering : public TargetLoweringBase { 3666 public: 3667 struct DAGCombinerInfo; 3668 struct MakeLibCallOptions; 3669 3670 TargetLowering(const TargetLowering &) = delete; 3671 TargetLowering &operator=(const TargetLowering &) = delete; 3672 3673 explicit TargetLowering(const TargetMachine &TM); 3674 3675 bool isPositionIndependent() const; 3676 isSDNodeSourceOfDivergence(const SDNode * N,FunctionLoweringInfo * FLI,UniformityInfo * UA)3677 virtual bool isSDNodeSourceOfDivergence(const SDNode *N, 3678 FunctionLoweringInfo *FLI, 3679 UniformityInfo *UA) const { 3680 return false; 3681 } 3682 3683 // Lets target to control the following reassociation of operands: (op (op x, 3684 // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By 3685 // default consider profitable any case where N0 has single use. This 3686 // behavior reflects the condition replaced by this target hook call in the 3687 // DAGCombiner. Any particular target can implement its own heuristic to 3688 // restrict common combiner. isReassocProfitable(SelectionDAG & DAG,SDValue N0,SDValue N1)3689 virtual bool isReassocProfitable(SelectionDAG &DAG, SDValue N0, 3690 SDValue N1) const { 3691 return N0.hasOneUse(); 3692 } 3693 3694 // Lets target to control the following reassociation of operands: (op (op x, 3695 // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By 3696 // default consider profitable any case where N0 has single use. This 3697 // behavior reflects the condition replaced by this target hook call in the 3698 // combiner. Any particular target can implement its own heuristic to 3699 // restrict common combiner. isReassocProfitable(MachineRegisterInfo & MRI,Register N0,Register N1)3700 virtual bool isReassocProfitable(MachineRegisterInfo &MRI, Register N0, 3701 Register N1) const { 3702 return MRI.hasOneNonDBGUse(N0); 3703 } 3704 isSDNodeAlwaysUniform(const SDNode * N)3705 virtual bool isSDNodeAlwaysUniform(const SDNode * N) const { 3706 return false; 3707 } 3708 3709 /// Returns true by value, base pointer and offset pointer and addressing mode 3710 /// by reference if the node's address can be legally represented as 3711 /// pre-indexed load / store address. getPreIndexedAddressParts(SDNode *,SDValue &,SDValue &,ISD::MemIndexedMode &,SelectionDAG &)3712 virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/, 3713 SDValue &/*Offset*/, 3714 ISD::MemIndexedMode &/*AM*/, 3715 SelectionDAG &/*DAG*/) const { 3716 return false; 3717 } 3718 3719 /// Returns true by value, base pointer and offset pointer and addressing mode 3720 /// by reference if this node can be combined with a load / store to form a 3721 /// post-indexed load / store. getPostIndexedAddressParts(SDNode *,SDNode *,SDValue &,SDValue &,ISD::MemIndexedMode &,SelectionDAG &)3722 virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/, 3723 SDValue &/*Base*/, 3724 SDValue &/*Offset*/, 3725 ISD::MemIndexedMode &/*AM*/, 3726 SelectionDAG &/*DAG*/) const { 3727 return false; 3728 } 3729 3730 /// Returns true if the specified base+offset is a legal indexed addressing 3731 /// mode for this target. \p MI is the load or store instruction that is being 3732 /// considered for transformation. isIndexingLegal(MachineInstr & MI,Register Base,Register Offset,bool IsPre,MachineRegisterInfo & MRI)3733 virtual bool isIndexingLegal(MachineInstr &MI, Register Base, Register Offset, 3734 bool IsPre, MachineRegisterInfo &MRI) const { 3735 return false; 3736 } 3737 3738 /// Return the entry encoding for a jump table in the current function. The 3739 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum. 3740 virtual unsigned getJumpTableEncoding() const; 3741 3742 virtual const MCExpr * LowerCustomJumpTableEntry(const MachineJumpTableInfo *,const MachineBasicBlock *,unsigned,MCContext &)3743 LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/, 3744 const MachineBasicBlock * /*MBB*/, unsigned /*uid*/, 3745 MCContext &/*Ctx*/) const { 3746 llvm_unreachable("Need to implement this hook if target has custom JTIs"); 3747 } 3748 3749 /// Returns relocation base for the given PIC jumptable. 3750 virtual SDValue getPICJumpTableRelocBase(SDValue Table, 3751 SelectionDAG &DAG) const; 3752 3753 /// This returns the relocation base for the given PIC jumptable, the same as 3754 /// getPICJumpTableRelocBase, but as an MCExpr. 3755 virtual const MCExpr * 3756 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 3757 unsigned JTI, MCContext &Ctx) const; 3758 3759 /// Return true if folding a constant offset with the given GlobalAddress is 3760 /// legal. It is frequently not legal in PIC relocation models. 3761 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; 3762 3763 /// On x86, return true if the operand with index OpNo is a CALL or JUMP 3764 /// instruction, which can use either a memory constraint or an address 3765 /// constraint. -fasm-blocks "__asm call foo" lowers to 3766 /// call void asm sideeffect inteldialect "call ${0:P}", "*m..." 3767 /// 3768 /// This function is used by a hack to choose the address constraint, 3769 /// lowering to a direct call. 3770 virtual bool isInlineAsmTargetBranch(const SmallVectorImpl<StringRef> & AsmStrs,unsigned OpNo)3771 isInlineAsmTargetBranch(const SmallVectorImpl<StringRef> &AsmStrs, 3772 unsigned OpNo) const { 3773 return false; 3774 } 3775 3776 bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, 3777 SDValue &Chain) const; 3778 3779 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, 3780 SDValue &NewRHS, ISD::CondCode &CCCode, 3781 const SDLoc &DL, const SDValue OldLHS, 3782 const SDValue OldRHS) const; 3783 3784 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, 3785 SDValue &NewRHS, ISD::CondCode &CCCode, 3786 const SDLoc &DL, const SDValue OldLHS, 3787 const SDValue OldRHS, SDValue &Chain, 3788 bool IsSignaling = false) const; 3789 3790 /// Returns a pair of (return value, chain). 3791 /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC. 3792 std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, 3793 EVT RetVT, ArrayRef<SDValue> Ops, 3794 MakeLibCallOptions CallOptions, 3795 const SDLoc &dl, 3796 SDValue Chain = SDValue()) const; 3797 3798 /// Check whether parameters to a call that are passed in callee saved 3799 /// registers are the same as from the calling function. This needs to be 3800 /// checked for tail call eligibility. 3801 bool parametersInCSRMatch(const MachineRegisterInfo &MRI, 3802 const uint32_t *CallerPreservedMask, 3803 const SmallVectorImpl<CCValAssign> &ArgLocs, 3804 const SmallVectorImpl<SDValue> &OutVals) const; 3805 3806 //===--------------------------------------------------------------------===// 3807 // TargetLowering Optimization Methods 3808 // 3809 3810 /// A convenience struct that encapsulates a DAG, and two SDValues for 3811 /// returning information from TargetLowering to its clients that want to 3812 /// combine. 3813 struct TargetLoweringOpt { 3814 SelectionDAG &DAG; 3815 bool LegalTys; 3816 bool LegalOps; 3817 SDValue Old; 3818 SDValue New; 3819 TargetLoweringOptTargetLoweringOpt3820 explicit TargetLoweringOpt(SelectionDAG &InDAG, 3821 bool LT, bool LO) : 3822 DAG(InDAG), LegalTys(LT), LegalOps(LO) {} 3823 LegalTypesTargetLoweringOpt3824 bool LegalTypes() const { return LegalTys; } LegalOperationsTargetLoweringOpt3825 bool LegalOperations() const { return LegalOps; } 3826 CombineToTargetLoweringOpt3827 bool CombineTo(SDValue O, SDValue N) { 3828 Old = O; 3829 New = N; 3830 return true; 3831 } 3832 }; 3833 3834 /// Determines the optimal series of memory ops to replace the memset / memcpy. 3835 /// Return true if the number of memory ops is below the threshold (Limit). 3836 /// Note that this is always the case when Limit is ~0. 3837 /// It returns the types of the sequence of memory ops to perform 3838 /// memset / memcpy by reference. 3839 virtual bool 3840 findOptimalMemOpLowering(std::vector<EVT> &MemOps, unsigned Limit, 3841 const MemOp &Op, unsigned DstAS, unsigned SrcAS, 3842 const AttributeList &FuncAttributes) const; 3843 3844 /// Check to see if the specified operand of the specified instruction is a 3845 /// constant integer. If so, check to see if there are any bits set in the 3846 /// constant that are not demanded. If so, shrink the constant and return 3847 /// true. 3848 bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, 3849 const APInt &DemandedElts, 3850 TargetLoweringOpt &TLO) const; 3851 3852 /// Helper wrapper around ShrinkDemandedConstant, demanding all elements. 3853 bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, 3854 TargetLoweringOpt &TLO) const; 3855 3856 // Target hook to do target-specific const optimization, which is called by 3857 // ShrinkDemandedConstant. This function should return true if the target 3858 // doesn't want ShrinkDemandedConstant to further optimize the constant. targetShrinkDemandedConstant(SDValue Op,const APInt & DemandedBits,const APInt & DemandedElts,TargetLoweringOpt & TLO)3859 virtual bool targetShrinkDemandedConstant(SDValue Op, 3860 const APInt &DemandedBits, 3861 const APInt &DemandedElts, 3862 TargetLoweringOpt &TLO) const { 3863 return false; 3864 } 3865 3866 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. 3867 /// This uses isTruncateFree/isZExtFree and ANY_EXTEND for the widening cast, 3868 /// but it could be generalized for targets with other types of implicit 3869 /// widening casts. 3870 bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, 3871 const APInt &DemandedBits, 3872 TargetLoweringOpt &TLO) const; 3873 3874 /// Look at Op. At this point, we know that only the DemandedBits bits of the 3875 /// result of Op are ever used downstream. If we can use this information to 3876 /// simplify Op, create a new simplified DAG node and return true, returning 3877 /// the original and new nodes in Old and New. Otherwise, analyze the 3878 /// expression and return a mask of KnownOne and KnownZero bits for the 3879 /// expression (used to simplify the caller). The KnownZero/One bits may only 3880 /// be accurate for those bits in the Demanded masks. 3881 /// \p AssumeSingleUse When this parameter is true, this function will 3882 /// attempt to simplify \p Op even if there are multiple uses. 3883 /// Callers are responsible for correctly updating the DAG based on the 3884 /// results of this function, because simply replacing TLO.Old 3885 /// with TLO.New will be incorrect when this parameter is true and TLO.Old 3886 /// has multiple uses. 3887 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 3888 const APInt &DemandedElts, KnownBits &Known, 3889 TargetLoweringOpt &TLO, unsigned Depth = 0, 3890 bool AssumeSingleUse = false) const; 3891 3892 /// Helper wrapper around SimplifyDemandedBits, demanding all elements. 3893 /// Adds Op back to the worklist upon success. 3894 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 3895 KnownBits &Known, TargetLoweringOpt &TLO, 3896 unsigned Depth = 0, 3897 bool AssumeSingleUse = false) const; 3898 3899 /// Helper wrapper around SimplifyDemandedBits. 3900 /// Adds Op back to the worklist upon success. 3901 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 3902 DAGCombinerInfo &DCI) const; 3903 3904 /// Helper wrapper around SimplifyDemandedBits. 3905 /// Adds Op back to the worklist upon success. 3906 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 3907 const APInt &DemandedElts, 3908 DAGCombinerInfo &DCI) const; 3909 3910 /// More limited version of SimplifyDemandedBits that can be used to "look 3911 /// through" ops that don't contribute to the DemandedBits/DemandedElts - 3912 /// bitwise ops etc. 3913 SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, 3914 const APInt &DemandedElts, 3915 SelectionDAG &DAG, 3916 unsigned Depth = 0) const; 3917 3918 /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all 3919 /// elements. 3920 SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, 3921 SelectionDAG &DAG, 3922 unsigned Depth = 0) const; 3923 3924 /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all 3925 /// bits from only some vector elements. 3926 SDValue SimplifyMultipleUseDemandedVectorElts(SDValue Op, 3927 const APInt &DemandedElts, 3928 SelectionDAG &DAG, 3929 unsigned Depth = 0) const; 3930 3931 /// Look at Vector Op. At this point, we know that only the DemandedElts 3932 /// elements of the result of Op are ever used downstream. If we can use 3933 /// this information to simplify Op, create a new simplified DAG node and 3934 /// return true, storing the original and new nodes in TLO. 3935 /// Otherwise, analyze the expression and return a mask of KnownUndef and 3936 /// KnownZero elements for the expression (used to simplify the caller). 3937 /// The KnownUndef/Zero elements may only be accurate for those bits 3938 /// in the DemandedMask. 3939 /// \p AssumeSingleUse When this parameter is true, this function will 3940 /// attempt to simplify \p Op even if there are multiple uses. 3941 /// Callers are responsible for correctly updating the DAG based on the 3942 /// results of this function, because simply replacing TLO.Old 3943 /// with TLO.New will be incorrect when this parameter is true and TLO.Old 3944 /// has multiple uses. 3945 bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask, 3946 APInt &KnownUndef, APInt &KnownZero, 3947 TargetLoweringOpt &TLO, unsigned Depth = 0, 3948 bool AssumeSingleUse = false) const; 3949 3950 /// Helper wrapper around SimplifyDemandedVectorElts. 3951 /// Adds Op back to the worklist upon success. 3952 bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts, 3953 DAGCombinerInfo &DCI) const; 3954 3955 /// Return true if the target supports simplifying demanded vector elements by 3956 /// converting them to undefs. 3957 virtual bool shouldSimplifyDemandedVectorElts(SDValue Op,const TargetLoweringOpt & TLO)3958 shouldSimplifyDemandedVectorElts(SDValue Op, 3959 const TargetLoweringOpt &TLO) const { 3960 return true; 3961 } 3962 3963 /// Determine which of the bits specified in Mask are known to be either zero 3964 /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts 3965 /// argument allows us to only collect the known bits that are shared by the 3966 /// requested vector elements. 3967 virtual void computeKnownBitsForTargetNode(const SDValue Op, 3968 KnownBits &Known, 3969 const APInt &DemandedElts, 3970 const SelectionDAG &DAG, 3971 unsigned Depth = 0) const; 3972 3973 /// Determine which of the bits specified in Mask are known to be either zero 3974 /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts 3975 /// argument allows us to only collect the known bits that are shared by the 3976 /// requested vector elements. This is for GISel. 3977 virtual void computeKnownBitsForTargetInstr(GISelKnownBits &Analysis, 3978 Register R, KnownBits &Known, 3979 const APInt &DemandedElts, 3980 const MachineRegisterInfo &MRI, 3981 unsigned Depth = 0) const; 3982 3983 /// Determine the known alignment for the pointer value \p R. This is can 3984 /// typically be inferred from the number of low known 0 bits. However, for a 3985 /// pointer with a non-integral address space, the alignment value may be 3986 /// independent from the known low bits. 3987 virtual Align computeKnownAlignForTargetInstr(GISelKnownBits &Analysis, 3988 Register R, 3989 const MachineRegisterInfo &MRI, 3990 unsigned Depth = 0) const; 3991 3992 /// Determine which of the bits of FrameIndex \p FIOp are known to be 0. 3993 /// Default implementation computes low bits based on alignment 3994 /// information. This should preserve known bits passed into it. 3995 virtual void computeKnownBitsForFrameIndex(int FIOp, 3996 KnownBits &Known, 3997 const MachineFunction &MF) const; 3998 3999 /// This method can be implemented by targets that want to expose additional 4000 /// information about sign bits to the DAG Combiner. The DemandedElts 4001 /// argument allows us to only collect the minimum sign bits that are shared 4002 /// by the requested vector elements. 4003 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, 4004 const APInt &DemandedElts, 4005 const SelectionDAG &DAG, 4006 unsigned Depth = 0) const; 4007 4008 /// This method can be implemented by targets that want to expose additional 4009 /// information about sign bits to GlobalISel combiners. The DemandedElts 4010 /// argument allows us to only collect the minimum sign bits that are shared 4011 /// by the requested vector elements. 4012 virtual unsigned computeNumSignBitsForTargetInstr(GISelKnownBits &Analysis, 4013 Register R, 4014 const APInt &DemandedElts, 4015 const MachineRegisterInfo &MRI, 4016 unsigned Depth = 0) const; 4017 4018 /// Attempt to simplify any target nodes based on the demanded vector 4019 /// elements, returning true on success. Otherwise, analyze the expression and 4020 /// return a mask of KnownUndef and KnownZero elements for the expression 4021 /// (used to simplify the caller). The KnownUndef/Zero elements may only be 4022 /// accurate for those bits in the DemandedMask. 4023 virtual bool SimplifyDemandedVectorEltsForTargetNode( 4024 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, 4025 APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth = 0) const; 4026 4027 /// Attempt to simplify any target nodes based on the demanded bits/elts, 4028 /// returning true on success. Otherwise, analyze the 4029 /// expression and return a mask of KnownOne and KnownZero bits for the 4030 /// expression (used to simplify the caller). The KnownZero/One bits may only 4031 /// be accurate for those bits in the Demanded masks. 4032 virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op, 4033 const APInt &DemandedBits, 4034 const APInt &DemandedElts, 4035 KnownBits &Known, 4036 TargetLoweringOpt &TLO, 4037 unsigned Depth = 0) const; 4038 4039 /// More limited version of SimplifyDemandedBits that can be used to "look 4040 /// through" ops that don't contribute to the DemandedBits/DemandedElts - 4041 /// bitwise ops etc. 4042 virtual SDValue SimplifyMultipleUseDemandedBitsForTargetNode( 4043 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 4044 SelectionDAG &DAG, unsigned Depth) const; 4045 4046 /// Return true if this function can prove that \p Op is never poison 4047 /// and, if \p PoisonOnly is false, does not have undef bits. The DemandedElts 4048 /// argument limits the check to the requested vector elements. 4049 virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode( 4050 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 4051 bool PoisonOnly, unsigned Depth) const; 4052 4053 /// Return true if Op can create undef or poison from non-undef & non-poison 4054 /// operands. The DemandedElts argument limits the check to the requested 4055 /// vector elements. 4056 virtual bool 4057 canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, 4058 const SelectionDAG &DAG, bool PoisonOnly, 4059 bool ConsiderFlags, unsigned Depth) const; 4060 4061 /// Tries to build a legal vector shuffle using the provided parameters 4062 /// or equivalent variations. The Mask argument maybe be modified as the 4063 /// function tries different variations. 4064 /// Returns an empty SDValue if the operation fails. 4065 SDValue buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, 4066 SDValue N1, MutableArrayRef<int> Mask, 4067 SelectionDAG &DAG) const; 4068 4069 /// This method returns the constant pool value that will be loaded by LD. 4070 /// NOTE: You must check for implicit extensions of the constant by LD. 4071 virtual const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const; 4072 4073 /// If \p SNaN is false, \returns true if \p Op is known to never be any 4074 /// NaN. If \p sNaN is true, returns if \p Op is known to never be a signaling 4075 /// NaN. 4076 virtual bool isKnownNeverNaNForTargetNode(SDValue Op, 4077 const SelectionDAG &DAG, 4078 bool SNaN = false, 4079 unsigned Depth = 0) const; 4080 4081 /// Return true if vector \p Op has the same value across all \p DemandedElts, 4082 /// indicating any elements which may be undef in the output \p UndefElts. 4083 virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts, 4084 APInt &UndefElts, 4085 const SelectionDAG &DAG, 4086 unsigned Depth = 0) const; 4087 4088 /// Returns true if the given Opc is considered a canonical constant for the 4089 /// target, which should not be transformed back into a BUILD_VECTOR. isTargetCanonicalConstantNode(SDValue Op)4090 virtual bool isTargetCanonicalConstantNode(SDValue Op) const { 4091 return Op.getOpcode() == ISD::SPLAT_VECTOR || 4092 Op.getOpcode() == ISD::SPLAT_VECTOR_PARTS; 4093 } 4094 4095 struct DAGCombinerInfo { 4096 void *DC; // The DAG Combiner object. 4097 CombineLevel Level; 4098 bool CalledByLegalizer; 4099 4100 public: 4101 SelectionDAG &DAG; 4102 DAGCombinerInfoDAGCombinerInfo4103 DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc) 4104 : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {} 4105 isBeforeLegalizeDAGCombinerInfo4106 bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; } isBeforeLegalizeOpsDAGCombinerInfo4107 bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; } isAfterLegalizeDAGDAGCombinerInfo4108 bool isAfterLegalizeDAG() const { return Level >= AfterLegalizeDAG; } getDAGCombineLevelDAGCombinerInfo4109 CombineLevel getDAGCombineLevel() { return Level; } isCalledByLegalizerDAGCombinerInfo4110 bool isCalledByLegalizer() const { return CalledByLegalizer; } 4111 4112 void AddToWorklist(SDNode *N); 4113 SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true); 4114 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true); 4115 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true); 4116 4117 bool recursivelyDeleteUnusedNodes(SDNode *N); 4118 4119 void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO); 4120 }; 4121 4122 /// Return if the N is a constant or constant vector equal to the true value 4123 /// from getBooleanContents(). 4124 bool isConstTrueVal(SDValue N) const; 4125 4126 /// Return if the N is a constant or constant vector equal to the false value 4127 /// from getBooleanContents(). 4128 bool isConstFalseVal(SDValue N) const; 4129 4130 /// Return if \p N is a True value when extended to \p VT. 4131 bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const; 4132 4133 /// Try to simplify a setcc built with the specified operands and cc. If it is 4134 /// unable to simplify it, return a null SDValue. 4135 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, 4136 bool foldBooleans, DAGCombinerInfo &DCI, 4137 const SDLoc &dl) const; 4138 4139 // For targets which wrap address, unwrap for analysis. unwrapAddress(SDValue N)4140 virtual SDValue unwrapAddress(SDValue N) const { return N; } 4141 4142 /// Returns true (and the GlobalValue and the offset) if the node is a 4143 /// GlobalAddress + offset. 4144 virtual bool 4145 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const; 4146 4147 /// This method will be invoked for all target nodes and for any 4148 /// target-independent nodes that the target has registered with invoke it 4149 /// for. 4150 /// 4151 /// The semantics are as follows: 4152 /// Return Value: 4153 /// SDValue.Val == 0 - No change was made 4154 /// SDValue.Val == N - N was replaced, is dead, and is already handled. 4155 /// otherwise - N should be replaced by the returned Operand. 4156 /// 4157 /// In addition, methods provided by DAGCombinerInfo may be used to perform 4158 /// more complex transformations. 4159 /// 4160 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 4161 4162 /// Return true if it is profitable to move this shift by a constant amount 4163 /// through its operand, adjusting any immediate operands as necessary to 4164 /// preserve semantics. This transformation may not be desirable if it 4165 /// disrupts a particularly auspicious target-specific tree (e.g. bitfield 4166 /// extraction in AArch64). By default, it returns true. 4167 /// 4168 /// @param N the shift node 4169 /// @param Level the current DAGCombine legalization level. isDesirableToCommuteWithShift(const SDNode * N,CombineLevel Level)4170 virtual bool isDesirableToCommuteWithShift(const SDNode *N, 4171 CombineLevel Level) const { 4172 return true; 4173 } 4174 4175 /// GlobalISel - return true if it is profitable to move this shift by a 4176 /// constant amount through its operand, adjusting any immediate operands as 4177 /// necessary to preserve semantics. This transformation may not be desirable 4178 /// if it disrupts a particularly auspicious target-specific tree (e.g. 4179 /// bitfield extraction in AArch64). By default, it returns true. 4180 /// 4181 /// @param MI the shift instruction 4182 /// @param IsAfterLegal true if running after legalization. isDesirableToCommuteWithShift(const MachineInstr & MI,bool IsAfterLegal)4183 virtual bool isDesirableToCommuteWithShift(const MachineInstr &MI, 4184 bool IsAfterLegal) const { 4185 return true; 4186 } 4187 4188 /// GlobalISel - return true if it's profitable to perform the combine: 4189 /// shl ([sza]ext x), y => zext (shl x, y) isDesirableToPullExtFromShl(const MachineInstr & MI)4190 virtual bool isDesirableToPullExtFromShl(const MachineInstr &MI) const { 4191 return true; 4192 } 4193 4194 // Return AndOrSETCCFoldKind::{AddAnd, ABS} if its desirable to try and 4195 // optimize LogicOp(SETCC0, SETCC1). An example (what is implemented as of 4196 // writing this) is: 4197 // With C as a power of 2 and C != 0 and C != INT_MIN: 4198 // AddAnd: 4199 // (icmp eq A, C) | (icmp eq A, -C) 4200 // -> (icmp eq and(add(A, C), ~(C + C)), 0) 4201 // (icmp ne A, C) & (icmp ne A, -C)w 4202 // -> (icmp ne and(add(A, C), ~(C + C)), 0) 4203 // ABS: 4204 // (icmp eq A, C) | (icmp eq A, -C) 4205 // -> (icmp eq Abs(A), C) 4206 // (icmp ne A, C) & (icmp ne A, -C)w 4207 // -> (icmp ne Abs(A), C) 4208 // 4209 // @param LogicOp the logic op 4210 // @param SETCC0 the first of the SETCC nodes 4211 // @param SETCC0 the second of the SETCC nodes isDesirableToCombineLogicOpOfSETCC(const SDNode * LogicOp,const SDNode * SETCC0,const SDNode * SETCC1)4212 virtual AndOrSETCCFoldKind isDesirableToCombineLogicOpOfSETCC( 4213 const SDNode *LogicOp, const SDNode *SETCC0, const SDNode *SETCC1) const { 4214 return AndOrSETCCFoldKind::None; 4215 } 4216 4217 /// Return true if it is profitable to combine an XOR of a logical shift 4218 /// to create a logical shift of NOT. This transformation may not be desirable 4219 /// if it disrupts a particularly auspicious target-specific tree (e.g. 4220 /// BIC on ARM/AArch64). By default, it returns true. isDesirableToCommuteXorWithShift(const SDNode * N)4221 virtual bool isDesirableToCommuteXorWithShift(const SDNode *N) const { 4222 return true; 4223 } 4224 4225 /// Return true if the target has native support for the specified value type 4226 /// and it is 'desirable' to use the type for the given node type. e.g. On x86 4227 /// i16 is legal, but undesirable since i16 instruction encodings are longer 4228 /// and some i16 instructions are slow. isTypeDesirableForOp(unsigned,EVT VT)4229 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const { 4230 // By default, assume all legal types are desirable. 4231 return isTypeLegal(VT); 4232 } 4233 4234 /// Return true if it is profitable for dag combiner to transform a floating 4235 /// point op of specified opcode to a equivalent op of an integer 4236 /// type. e.g. f32 load -> i32 load can be profitable on ARM. isDesirableToTransformToIntegerOp(unsigned,EVT)4237 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/, 4238 EVT /*VT*/) const { 4239 return false; 4240 } 4241 4242 /// This method query the target whether it is beneficial for dag combiner to 4243 /// promote the specified node. If true, it should return the desired 4244 /// promotion type by reference. IsDesirableToPromoteOp(SDValue,EVT &)4245 virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const { 4246 return false; 4247 } 4248 4249 /// Return true if the target supports swifterror attribute. It optimizes 4250 /// loads and stores to reading and writing a specific register. supportSwiftError()4251 virtual bool supportSwiftError() const { 4252 return false; 4253 } 4254 4255 /// Return true if the target supports that a subset of CSRs for the given 4256 /// machine function is handled explicitly via copies. supportSplitCSR(MachineFunction * MF)4257 virtual bool supportSplitCSR(MachineFunction *MF) const { 4258 return false; 4259 } 4260 4261 /// Return true if the target supports kcfi operand bundles. supportKCFIBundles()4262 virtual bool supportKCFIBundles() const { return false; } 4263 4264 /// Perform necessary initialization to handle a subset of CSRs explicitly 4265 /// via copies. This function is called at the beginning of instruction 4266 /// selection. initializeSplitCSR(MachineBasicBlock * Entry)4267 virtual void initializeSplitCSR(MachineBasicBlock *Entry) const { 4268 llvm_unreachable("Not Implemented"); 4269 } 4270 4271 /// Insert explicit copies in entry and exit blocks. We copy a subset of 4272 /// CSRs to virtual registers in the entry block, and copy them back to 4273 /// physical registers in the exit blocks. This function is called at the end 4274 /// of instruction selection. insertCopiesSplitCSR(MachineBasicBlock * Entry,const SmallVectorImpl<MachineBasicBlock * > & Exits)4275 virtual void insertCopiesSplitCSR( 4276 MachineBasicBlock *Entry, 4277 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 4278 llvm_unreachable("Not Implemented"); 4279 } 4280 4281 /// Return the newly negated expression if the cost is not expensive and 4282 /// set the cost in \p Cost to indicate that if it is cheaper or neutral to 4283 /// do the negation. 4284 virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, 4285 bool LegalOps, bool OptForSize, 4286 NegatibleCost &Cost, 4287 unsigned Depth = 0) const; 4288 4289 SDValue getCheaperOrNeutralNegatedExpression( 4290 SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, 4291 const NegatibleCost CostThreshold = NegatibleCost::Neutral, 4292 unsigned Depth = 0) const { 4293 NegatibleCost Cost = NegatibleCost::Expensive; 4294 SDValue Neg = 4295 getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth); 4296 if (!Neg) 4297 return SDValue(); 4298 4299 if (Cost <= CostThreshold) 4300 return Neg; 4301 4302 // Remove the new created node to avoid the side effect to the DAG. 4303 if (Neg->use_empty()) 4304 DAG.RemoveDeadNode(Neg.getNode()); 4305 return SDValue(); 4306 } 4307 4308 /// This is the helper function to return the newly negated expression only 4309 /// when the cost is cheaper. 4310 SDValue getCheaperNegatedExpression(SDValue Op, SelectionDAG &DAG, 4311 bool LegalOps, bool OptForSize, 4312 unsigned Depth = 0) const { 4313 return getCheaperOrNeutralNegatedExpression(Op, DAG, LegalOps, OptForSize, 4314 NegatibleCost::Cheaper, Depth); 4315 } 4316 4317 /// This is the helper function to return the newly negated expression if 4318 /// the cost is not expensive. 4319 SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, 4320 bool OptForSize, unsigned Depth = 0) const { 4321 NegatibleCost Cost = NegatibleCost::Expensive; 4322 return getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth); 4323 } 4324 4325 //===--------------------------------------------------------------------===// 4326 // Lowering methods - These methods must be implemented by targets so that 4327 // the SelectionDAGBuilder code knows how to lower these. 4328 // 4329 4330 /// Target-specific splitting of values into parts that fit a register 4331 /// storing a legal type splitValueIntoRegisterParts(SelectionDAG & DAG,const SDLoc & DL,SDValue Val,SDValue * Parts,unsigned NumParts,MVT PartVT,std::optional<CallingConv::ID> CC)4332 virtual bool splitValueIntoRegisterParts( 4333 SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, 4334 unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const { 4335 return false; 4336 } 4337 4338 /// Allows the target to handle physreg-carried dependency 4339 /// in target-specific way. Used from the ScheduleDAGSDNodes to decide whether 4340 /// to add the edge to the dependency graph. 4341 /// Def - input: Selection DAG node defininfg physical register 4342 /// User - input: Selection DAG node using physical register 4343 /// Op - input: Number of User operand 4344 /// PhysReg - inout: set to the physical register if the edge is 4345 /// necessary, unchanged otherwise 4346 /// Cost - inout: physical register copy cost. 4347 /// Returns 'true' is the edge is necessary, 'false' otherwise checkForPhysRegDependency(SDNode * Def,SDNode * User,unsigned Op,const TargetRegisterInfo * TRI,const TargetInstrInfo * TII,unsigned & PhysReg,int & Cost)4348 virtual bool checkForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op, 4349 const TargetRegisterInfo *TRI, 4350 const TargetInstrInfo *TII, 4351 unsigned &PhysReg, int &Cost) const { 4352 return false; 4353 } 4354 4355 /// Target-specific combining of register parts into its original value 4356 virtual SDValue joinRegisterPartsIntoValue(SelectionDAG & DAG,const SDLoc & DL,const SDValue * Parts,unsigned NumParts,MVT PartVT,EVT ValueVT,std::optional<CallingConv::ID> CC)4357 joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, 4358 const SDValue *Parts, unsigned NumParts, 4359 MVT PartVT, EVT ValueVT, 4360 std::optional<CallingConv::ID> CC) const { 4361 return SDValue(); 4362 } 4363 4364 /// This hook must be implemented to lower the incoming (formal) arguments, 4365 /// described by the Ins array, into the specified DAG. The implementation 4366 /// should fill in the InVals array with legal-type argument values, and 4367 /// return the resulting token chain value. LowerFormalArguments(SDValue,CallingConv::ID,bool,const SmallVectorImpl<ISD::InputArg> &,const SDLoc &,SelectionDAG &,SmallVectorImpl<SDValue> &)4368 virtual SDValue LowerFormalArguments( 4369 SDValue /*Chain*/, CallingConv::ID /*CallConv*/, bool /*isVarArg*/, 4370 const SmallVectorImpl<ISD::InputArg> & /*Ins*/, const SDLoc & /*dl*/, 4371 SelectionDAG & /*DAG*/, SmallVectorImpl<SDValue> & /*InVals*/) const { 4372 llvm_unreachable("Not Implemented"); 4373 } 4374 4375 /// This structure contains all information that is necessary for lowering 4376 /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder 4377 /// needs to lower a call, and targets will see this struct in their LowerCall 4378 /// implementation. 4379 struct CallLoweringInfo { 4380 SDValue Chain; 4381 Type *RetTy = nullptr; 4382 bool RetSExt : 1; 4383 bool RetZExt : 1; 4384 bool IsVarArg : 1; 4385 bool IsInReg : 1; 4386 bool DoesNotReturn : 1; 4387 bool IsReturnValueUsed : 1; 4388 bool IsConvergent : 1; 4389 bool IsPatchPoint : 1; 4390 bool IsPreallocated : 1; 4391 bool NoMerge : 1; 4392 4393 // IsTailCall should be modified by implementations of 4394 // TargetLowering::LowerCall that perform tail call conversions. 4395 bool IsTailCall = false; 4396 4397 // Is Call lowering done post SelectionDAG type legalization. 4398 bool IsPostTypeLegalization = false; 4399 4400 unsigned NumFixedArgs = -1; 4401 CallingConv::ID CallConv = CallingConv::C; 4402 SDValue Callee; 4403 ArgListTy Args; 4404 SelectionDAG &DAG; 4405 SDLoc DL; 4406 const CallBase *CB = nullptr; 4407 SmallVector<ISD::OutputArg, 32> Outs; 4408 SmallVector<SDValue, 32> OutVals; 4409 SmallVector<ISD::InputArg, 32> Ins; 4410 SmallVector<SDValue, 4> InVals; 4411 const ConstantInt *CFIType = nullptr; 4412 CallLoweringInfoCallLoweringInfo4413 CallLoweringInfo(SelectionDAG &DAG) 4414 : RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false), 4415 DoesNotReturn(false), IsReturnValueUsed(true), IsConvergent(false), 4416 IsPatchPoint(false), IsPreallocated(false), NoMerge(false), 4417 DAG(DAG) {} 4418 setDebugLocCallLoweringInfo4419 CallLoweringInfo &setDebugLoc(const SDLoc &dl) { 4420 DL = dl; 4421 return *this; 4422 } 4423 setChainCallLoweringInfo4424 CallLoweringInfo &setChain(SDValue InChain) { 4425 Chain = InChain; 4426 return *this; 4427 } 4428 4429 // setCallee with target/module-specific attributes setLibCalleeCallLoweringInfo4430 CallLoweringInfo &setLibCallee(CallingConv::ID CC, Type *ResultType, 4431 SDValue Target, ArgListTy &&ArgsList) { 4432 RetTy = ResultType; 4433 Callee = Target; 4434 CallConv = CC; 4435 NumFixedArgs = ArgsList.size(); 4436 Args = std::move(ArgsList); 4437 4438 DAG.getTargetLoweringInfo().markLibCallAttributes( 4439 &(DAG.getMachineFunction()), CC, Args); 4440 return *this; 4441 } 4442 4443 CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType, 4444 SDValue Target, ArgListTy &&ArgsList, 4445 AttributeSet ResultAttrs = {}) { 4446 RetTy = ResultType; 4447 IsInReg = ResultAttrs.hasAttribute(Attribute::InReg); 4448 RetSExt = ResultAttrs.hasAttribute(Attribute::SExt); 4449 RetZExt = ResultAttrs.hasAttribute(Attribute::ZExt); 4450 NoMerge = ResultAttrs.hasAttribute(Attribute::NoMerge); 4451 4452 Callee = Target; 4453 CallConv = CC; 4454 NumFixedArgs = ArgsList.size(); 4455 Args = std::move(ArgsList); 4456 return *this; 4457 } 4458 setCalleeCallLoweringInfo4459 CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy, 4460 SDValue Target, ArgListTy &&ArgsList, 4461 const CallBase &Call) { 4462 RetTy = ResultType; 4463 4464 IsInReg = Call.hasRetAttr(Attribute::InReg); 4465 DoesNotReturn = 4466 Call.doesNotReturn() || 4467 (!isa<InvokeInst>(Call) && isa<UnreachableInst>(Call.getNextNode())); 4468 IsVarArg = FTy->isVarArg(); 4469 IsReturnValueUsed = !Call.use_empty(); 4470 RetSExt = Call.hasRetAttr(Attribute::SExt); 4471 RetZExt = Call.hasRetAttr(Attribute::ZExt); 4472 NoMerge = Call.hasFnAttr(Attribute::NoMerge); 4473 4474 Callee = Target; 4475 4476 CallConv = Call.getCallingConv(); 4477 NumFixedArgs = FTy->getNumParams(); 4478 Args = std::move(ArgsList); 4479 4480 CB = &Call; 4481 4482 return *this; 4483 } 4484 4485 CallLoweringInfo &setInRegister(bool Value = true) { 4486 IsInReg = Value; 4487 return *this; 4488 } 4489 4490 CallLoweringInfo &setNoReturn(bool Value = true) { 4491 DoesNotReturn = Value; 4492 return *this; 4493 } 4494 4495 CallLoweringInfo &setVarArg(bool Value = true) { 4496 IsVarArg = Value; 4497 return *this; 4498 } 4499 4500 CallLoweringInfo &setTailCall(bool Value = true) { 4501 IsTailCall = Value; 4502 return *this; 4503 } 4504 4505 CallLoweringInfo &setDiscardResult(bool Value = true) { 4506 IsReturnValueUsed = !Value; 4507 return *this; 4508 } 4509 4510 CallLoweringInfo &setConvergent(bool Value = true) { 4511 IsConvergent = Value; 4512 return *this; 4513 } 4514 4515 CallLoweringInfo &setSExtResult(bool Value = true) { 4516 RetSExt = Value; 4517 return *this; 4518 } 4519 4520 CallLoweringInfo &setZExtResult(bool Value = true) { 4521 RetZExt = Value; 4522 return *this; 4523 } 4524 4525 CallLoweringInfo &setIsPatchPoint(bool Value = true) { 4526 IsPatchPoint = Value; 4527 return *this; 4528 } 4529 4530 CallLoweringInfo &setIsPreallocated(bool Value = true) { 4531 IsPreallocated = Value; 4532 return *this; 4533 } 4534 4535 CallLoweringInfo &setIsPostTypeLegalization(bool Value=true) { 4536 IsPostTypeLegalization = Value; 4537 return *this; 4538 } 4539 setCFITypeCallLoweringInfo4540 CallLoweringInfo &setCFIType(const ConstantInt *Type) { 4541 CFIType = Type; 4542 return *this; 4543 } 4544 getArgsCallLoweringInfo4545 ArgListTy &getArgs() { 4546 return Args; 4547 } 4548 }; 4549 4550 /// This structure is used to pass arguments to makeLibCall function. 4551 struct MakeLibCallOptions { 4552 // By passing type list before soften to makeLibCall, the target hook 4553 // shouldExtendTypeInLibCall can get the original type before soften. 4554 ArrayRef<EVT> OpsVTBeforeSoften; 4555 EVT RetVTBeforeSoften; 4556 bool IsSExt : 1; 4557 bool DoesNotReturn : 1; 4558 bool IsReturnValueUsed : 1; 4559 bool IsPostTypeLegalization : 1; 4560 bool IsSoften : 1; 4561 MakeLibCallOptionsMakeLibCallOptions4562 MakeLibCallOptions() 4563 : IsSExt(false), DoesNotReturn(false), IsReturnValueUsed(true), 4564 IsPostTypeLegalization(false), IsSoften(false) {} 4565 4566 MakeLibCallOptions &setSExt(bool Value = true) { 4567 IsSExt = Value; 4568 return *this; 4569 } 4570 4571 MakeLibCallOptions &setNoReturn(bool Value = true) { 4572 DoesNotReturn = Value; 4573 return *this; 4574 } 4575 4576 MakeLibCallOptions &setDiscardResult(bool Value = true) { 4577 IsReturnValueUsed = !Value; 4578 return *this; 4579 } 4580 4581 MakeLibCallOptions &setIsPostTypeLegalization(bool Value = true) { 4582 IsPostTypeLegalization = Value; 4583 return *this; 4584 } 4585 4586 MakeLibCallOptions &setTypeListBeforeSoften(ArrayRef<EVT> OpsVT, EVT RetVT, 4587 bool Value = true) { 4588 OpsVTBeforeSoften = OpsVT; 4589 RetVTBeforeSoften = RetVT; 4590 IsSoften = Value; 4591 return *this; 4592 } 4593 }; 4594 4595 /// This function lowers an abstract call to a function into an actual call. 4596 /// This returns a pair of operands. The first element is the return value 4597 /// for the function (if RetTy is not VoidTy). The second element is the 4598 /// outgoing token chain. It calls LowerCall to do the actual lowering. 4599 std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const; 4600 4601 /// This hook must be implemented to lower calls into the specified 4602 /// DAG. The outgoing arguments to the call are described by the Outs array, 4603 /// and the values to be returned by the call are described by the Ins 4604 /// array. The implementation should fill in the InVals array with legal-type 4605 /// return values from the call, and return the resulting token chain value. 4606 virtual SDValue LowerCall(CallLoweringInfo &,SmallVectorImpl<SDValue> &)4607 LowerCall(CallLoweringInfo &/*CLI*/, 4608 SmallVectorImpl<SDValue> &/*InVals*/) const { 4609 llvm_unreachable("Not Implemented"); 4610 } 4611 4612 /// Target-specific cleanup for formal ByVal parameters. HandleByVal(CCState *,unsigned &,Align)4613 virtual void HandleByVal(CCState *, unsigned &, Align) const {} 4614 4615 /// This hook should be implemented to check whether the return values 4616 /// described by the Outs array can fit into the return registers. If false 4617 /// is returned, an sret-demotion is performed. CanLowerReturn(CallingConv::ID,MachineFunction &,bool,const SmallVectorImpl<ISD::OutputArg> &,LLVMContext &)4618 virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/, 4619 MachineFunction &/*MF*/, bool /*isVarArg*/, 4620 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/, 4621 LLVMContext &/*Context*/) const 4622 { 4623 // Return true by default to get preexisting behavior. 4624 return true; 4625 } 4626 4627 /// This hook must be implemented to lower outgoing return values, described 4628 /// by the Outs array, into the specified DAG. The implementation should 4629 /// return the resulting token chain value. LowerReturn(SDValue,CallingConv::ID,bool,const SmallVectorImpl<ISD::OutputArg> &,const SmallVectorImpl<SDValue> &,const SDLoc &,SelectionDAG &)4630 virtual SDValue LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/, 4631 bool /*isVarArg*/, 4632 const SmallVectorImpl<ISD::OutputArg> & /*Outs*/, 4633 const SmallVectorImpl<SDValue> & /*OutVals*/, 4634 const SDLoc & /*dl*/, 4635 SelectionDAG & /*DAG*/) const { 4636 llvm_unreachable("Not Implemented"); 4637 } 4638 4639 /// Return true if result of the specified node is used by a return node 4640 /// only. It also compute and return the input chain for the tail call. 4641 /// 4642 /// This is used to determine whether it is possible to codegen a libcall as 4643 /// tail call at legalization time. isUsedByReturnOnly(SDNode *,SDValue &)4644 virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const { 4645 return false; 4646 } 4647 4648 /// Return true if the target may be able emit the call instruction as a tail 4649 /// call. This is used by optimization passes to determine if it's profitable 4650 /// to duplicate return instructions to enable tailcall optimization. mayBeEmittedAsTailCall(const CallInst *)4651 virtual bool mayBeEmittedAsTailCall(const CallInst *) const { 4652 return false; 4653 } 4654 4655 /// Return the builtin name for the __builtin___clear_cache intrinsic 4656 /// Default is to invoke the clear cache library call getClearCacheBuiltinName()4657 virtual const char * getClearCacheBuiltinName() const { 4658 return "__clear_cache"; 4659 } 4660 4661 /// Return the register ID of the name passed in. Used by named register 4662 /// global variables extension. There is no target-independent behaviour 4663 /// so the default action is to bail. getRegisterByName(const char * RegName,LLT Ty,const MachineFunction & MF)4664 virtual Register getRegisterByName(const char* RegName, LLT Ty, 4665 const MachineFunction &MF) const { 4666 report_fatal_error("Named registers not implemented for this target"); 4667 } 4668 4669 /// Return the type that should be used to zero or sign extend a 4670 /// zeroext/signext integer return value. FIXME: Some C calling conventions 4671 /// require the return type to be promoted, but this is not true all the time, 4672 /// e.g. i1/i8/i16 on x86/x86_64. It is also not necessary for non-C calling 4673 /// conventions. The frontend should handle this and include all of the 4674 /// necessary information. getTypeForExtReturn(LLVMContext & Context,EVT VT,ISD::NodeType)4675 virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, 4676 ISD::NodeType /*ExtendKind*/) const { 4677 EVT MinVT = getRegisterType(MVT::i32); 4678 return VT.bitsLT(MinVT) ? MinVT : VT; 4679 } 4680 4681 /// For some targets, an LLVM struct type must be broken down into multiple 4682 /// simple types, but the calling convention specifies that the entire struct 4683 /// must be passed in a block of consecutive registers. 4684 virtual bool functionArgumentNeedsConsecutiveRegisters(Type * Ty,CallingConv::ID CallConv,bool isVarArg,const DataLayout & DL)4685 functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, 4686 bool isVarArg, 4687 const DataLayout &DL) const { 4688 return false; 4689 } 4690 4691 /// For most targets, an LLVM type must be broken down into multiple 4692 /// smaller types. Usually the halves are ordered according to the endianness 4693 /// but for some platform that would break. So this method will default to 4694 /// matching the endianness but can be overridden. 4695 virtual bool shouldSplitFunctionArgumentsAsLittleEndian(const DataLayout & DL)4696 shouldSplitFunctionArgumentsAsLittleEndian(const DataLayout &DL) const { 4697 return DL.isLittleEndian(); 4698 } 4699 4700 /// Returns a 0 terminated array of registers that can be safely used as 4701 /// scratch registers. getScratchRegisters(CallingConv::ID CC)4702 virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const { 4703 return nullptr; 4704 } 4705 4706 /// Returns a 0 terminated array of rounding control registers that can be 4707 /// attached into strict FP call. getRoundingControlRegisters()4708 virtual ArrayRef<MCPhysReg> getRoundingControlRegisters() const { 4709 return ArrayRef<MCPhysReg>(); 4710 } 4711 4712 /// This callback is used to prepare for a volatile or atomic load. 4713 /// It takes a chain node as input and returns the chain for the load itself. 4714 /// 4715 /// Having a callback like this is necessary for targets like SystemZ, 4716 /// which allows a CPU to reuse the result of a previous load indefinitely, 4717 /// even if a cache-coherent store is performed by another CPU. The default 4718 /// implementation does nothing. prepareVolatileOrAtomicLoad(SDValue Chain,const SDLoc & DL,SelectionDAG & DAG)4719 virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, 4720 SelectionDAG &DAG) const { 4721 return Chain; 4722 } 4723 4724 /// This callback is invoked by the type legalizer to legalize nodes with an 4725 /// illegal operand type but legal result types. It replaces the 4726 /// LowerOperation callback in the type Legalizer. The reason we can not do 4727 /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to 4728 /// use this callback. 4729 /// 4730 /// TODO: Consider merging with ReplaceNodeResults. 4731 /// 4732 /// The target places new result values for the node in Results (their number 4733 /// and types must exactly match those of the original return values of 4734 /// the node), or leaves Results empty, which indicates that the node is not 4735 /// to be custom lowered after all. 4736 /// The default implementation calls LowerOperation. 4737 virtual void LowerOperationWrapper(SDNode *N, 4738 SmallVectorImpl<SDValue> &Results, 4739 SelectionDAG &DAG) const; 4740 4741 /// This callback is invoked for operations that are unsupported by the 4742 /// target, which are registered to use 'custom' lowering, and whose defined 4743 /// values are all legal. If the target has no operations that require custom 4744 /// lowering, it need not implement this. The default implementation of this 4745 /// aborts. 4746 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; 4747 4748 /// This callback is invoked when a node result type is illegal for the 4749 /// target, and the operation was registered to use 'custom' lowering for that 4750 /// result type. The target places new result values for the node in Results 4751 /// (their number and types must exactly match those of the original return 4752 /// values of the node), or leaves Results empty, which indicates that the 4753 /// node is not to be custom lowered after all. 4754 /// 4755 /// If the target has no operations that require custom lowering, it need not 4756 /// implement this. The default implementation aborts. ReplaceNodeResults(SDNode *,SmallVectorImpl<SDValue> &,SelectionDAG &)4757 virtual void ReplaceNodeResults(SDNode * /*N*/, 4758 SmallVectorImpl<SDValue> &/*Results*/, 4759 SelectionDAG &/*DAG*/) const { 4760 llvm_unreachable("ReplaceNodeResults not implemented for this target!"); 4761 } 4762 4763 /// This method returns the name of a target specific DAG node. 4764 virtual const char *getTargetNodeName(unsigned Opcode) const; 4765 4766 /// This method returns a target specific FastISel object, or null if the 4767 /// target does not support "fast" ISel. createFastISel(FunctionLoweringInfo &,const TargetLibraryInfo *)4768 virtual FastISel *createFastISel(FunctionLoweringInfo &, 4769 const TargetLibraryInfo *) const { 4770 return nullptr; 4771 } 4772 4773 bool verifyReturnAddressArgumentIsConstant(SDValue Op, 4774 SelectionDAG &DAG) const; 4775 4776 //===--------------------------------------------------------------------===// 4777 // Inline Asm Support hooks 4778 // 4779 4780 /// This hook allows the target to expand an inline asm call to be explicit 4781 /// llvm code if it wants to. This is useful for turning simple inline asms 4782 /// into LLVM intrinsics, which gives the compiler more information about the 4783 /// behavior of the code. ExpandInlineAsm(CallInst *)4784 virtual bool ExpandInlineAsm(CallInst *) const { 4785 return false; 4786 } 4787 4788 enum ConstraintType { 4789 C_Register, // Constraint represents specific register(s). 4790 C_RegisterClass, // Constraint represents any of register(s) in class. 4791 C_Memory, // Memory constraint. 4792 C_Address, // Address constraint. 4793 C_Immediate, // Requires an immediate. 4794 C_Other, // Something else. 4795 C_Unknown // Unsupported constraint. 4796 }; 4797 4798 enum ConstraintWeight { 4799 // Generic weights. 4800 CW_Invalid = -1, // No match. 4801 CW_Okay = 0, // Acceptable. 4802 CW_Good = 1, // Good weight. 4803 CW_Better = 2, // Better weight. 4804 CW_Best = 3, // Best weight. 4805 4806 // Well-known weights. 4807 CW_SpecificReg = CW_Okay, // Specific register operands. 4808 CW_Register = CW_Good, // Register operands. 4809 CW_Memory = CW_Better, // Memory operands. 4810 CW_Constant = CW_Best, // Constant operand. 4811 CW_Default = CW_Okay // Default or don't know type. 4812 }; 4813 4814 /// This contains information for each constraint that we are lowering. 4815 struct AsmOperandInfo : public InlineAsm::ConstraintInfo { 4816 /// This contains the actual string for the code, like "m". TargetLowering 4817 /// picks the 'best' code from ConstraintInfo::Codes that most closely 4818 /// matches the operand. 4819 std::string ConstraintCode; 4820 4821 /// Information about the constraint code, e.g. Register, RegisterClass, 4822 /// Memory, Other, Unknown. 4823 TargetLowering::ConstraintType ConstraintType = TargetLowering::C_Unknown; 4824 4825 /// If this is the result output operand or a clobber, this is null, 4826 /// otherwise it is the incoming operand to the CallInst. This gets 4827 /// modified as the asm is processed. 4828 Value *CallOperandVal = nullptr; 4829 4830 /// The ValueType for the operand value. 4831 MVT ConstraintVT = MVT::Other; 4832 4833 /// Copy constructor for copying from a ConstraintInfo. AsmOperandInfoAsmOperandInfo4834 AsmOperandInfo(InlineAsm::ConstraintInfo Info) 4835 : InlineAsm::ConstraintInfo(std::move(Info)) {} 4836 4837 /// Return true of this is an input operand that is a matching constraint 4838 /// like "4". 4839 bool isMatchingInputConstraint() const; 4840 4841 /// If this is an input matching constraint, this method returns the output 4842 /// operand it matches. 4843 unsigned getMatchedOperand() const; 4844 }; 4845 4846 using AsmOperandInfoVector = std::vector<AsmOperandInfo>; 4847 4848 /// Split up the constraint string from the inline assembly value into the 4849 /// specific constraints and their prefixes, and also tie in the associated 4850 /// operand values. If this returns an empty vector, and if the constraint 4851 /// string itself isn't empty, there was an error parsing. 4852 virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, 4853 const TargetRegisterInfo *TRI, 4854 const CallBase &Call) const; 4855 4856 /// Examine constraint type and operand type and determine a weight value. 4857 /// The operand object must already have been set up with the operand type. 4858 virtual ConstraintWeight getMultipleConstraintMatchWeight( 4859 AsmOperandInfo &info, int maIndex) const; 4860 4861 /// Examine constraint string and operand type and determine a weight value. 4862 /// The operand object must already have been set up with the operand type. 4863 virtual ConstraintWeight getSingleConstraintMatchWeight( 4864 AsmOperandInfo &info, const char *constraint) const; 4865 4866 /// Determines the constraint code and constraint type to use for the specific 4867 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType. 4868 /// If the actual operand being passed in is available, it can be passed in as 4869 /// Op, otherwise an empty SDValue can be passed. 4870 virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, 4871 SDValue Op, 4872 SelectionDAG *DAG = nullptr) const; 4873 4874 /// Given a constraint, return the type of constraint it is for this target. 4875 virtual ConstraintType getConstraintType(StringRef Constraint) const; 4876 4877 using ConstraintPair = std::pair<StringRef, TargetLowering::ConstraintType>; 4878 using ConstraintGroup = SmallVector<ConstraintPair>; 4879 /// Given an OpInfo with list of constraints codes as strings, return a 4880 /// sorted Vector of pairs of constraint codes and their types in priority of 4881 /// what we'd prefer to lower them as. This may contain immediates that 4882 /// cannot be lowered, but it is meant to be a machine agnostic order of 4883 /// preferences. 4884 ConstraintGroup getConstraintPreferences(AsmOperandInfo &OpInfo) const; 4885 4886 /// Given a physical register constraint (e.g. {edx}), return the register 4887 /// number and the register class for the register. 4888 /// 4889 /// Given a register class constraint, like 'r', if this corresponds directly 4890 /// to an LLVM register class, return a register of 0 and the register class 4891 /// pointer. 4892 /// 4893 /// This should only be used for C_Register constraints. On error, this 4894 /// returns a register number of 0 and a null register class pointer. 4895 virtual std::pair<unsigned, const TargetRegisterClass *> 4896 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 4897 StringRef Constraint, MVT VT) const; 4898 4899 virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode)4900 getInlineAsmMemConstraint(StringRef ConstraintCode) const { 4901 if (ConstraintCode == "m") 4902 return InlineAsm::ConstraintCode::m; 4903 if (ConstraintCode == "o") 4904 return InlineAsm::ConstraintCode::o; 4905 if (ConstraintCode == "X") 4906 return InlineAsm::ConstraintCode::X; 4907 if (ConstraintCode == "p") 4908 return InlineAsm::ConstraintCode::p; 4909 return InlineAsm::ConstraintCode::Unknown; 4910 } 4911 4912 /// Try to replace an X constraint, which matches anything, with another that 4913 /// has more specific requirements based on the type of the corresponding 4914 /// operand. This returns null if there is no replacement to make. 4915 virtual const char *LowerXConstraint(EVT ConstraintVT) const; 4916 4917 /// Lower the specified operand into the Ops vector. If it is invalid, don't 4918 /// add anything to Ops. 4919 virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, 4920 std::vector<SDValue> &Ops, 4921 SelectionDAG &DAG) const; 4922 4923 // Lower custom output constraints. If invalid, return SDValue(). 4924 virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, 4925 const SDLoc &DL, 4926 const AsmOperandInfo &OpInfo, 4927 SelectionDAG &DAG) const; 4928 4929 // Targets may override this function to collect operands from the CallInst 4930 // and for example, lower them into the SelectionDAG operands. 4931 virtual void CollectTargetIntrinsicOperands(const CallInst &I, 4932 SmallVectorImpl<SDValue> &Ops, 4933 SelectionDAG &DAG) const; 4934 4935 //===--------------------------------------------------------------------===// 4936 // Div utility functions 4937 // 4938 4939 SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, 4940 SmallVectorImpl<SDNode *> &Created) const; 4941 SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, 4942 SmallVectorImpl<SDNode *> &Created) const; 4943 // Build sdiv by power-of-2 with conditional move instructions 4944 SDValue buildSDIVPow2WithCMov(SDNode *N, const APInt &Divisor, 4945 SelectionDAG &DAG, 4946 SmallVectorImpl<SDNode *> &Created) const; 4947 4948 /// Targets may override this function to provide custom SDIV lowering for 4949 /// power-of-2 denominators. If the target returns an empty SDValue, LLVM 4950 /// assumes SDIV is expensive and replaces it with a series of other integer 4951 /// operations. 4952 virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, 4953 SelectionDAG &DAG, 4954 SmallVectorImpl<SDNode *> &Created) const; 4955 4956 /// Targets may override this function to provide custom SREM lowering for 4957 /// power-of-2 denominators. If the target returns an empty SDValue, LLVM 4958 /// assumes SREM is expensive and replaces it with a series of other integer 4959 /// operations. 4960 virtual SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor, 4961 SelectionDAG &DAG, 4962 SmallVectorImpl<SDNode *> &Created) const; 4963 4964 /// Indicate whether this target prefers to combine FDIVs with the same 4965 /// divisor. If the transform should never be done, return zero. If the 4966 /// transform should be done, return the minimum number of divisor uses 4967 /// that must exist. combineRepeatedFPDivisors()4968 virtual unsigned combineRepeatedFPDivisors() const { 4969 return 0; 4970 } 4971 4972 /// Hooks for building estimates in place of slower divisions and square 4973 /// roots. 4974 4975 /// Return either a square root or its reciprocal estimate value for the input 4976 /// operand. 4977 /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or 4978 /// 'Enabled' as set by a potential default override attribute. 4979 /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson 4980 /// refinement iterations required to generate a sufficient (though not 4981 /// necessarily IEEE-754 compliant) estimate is returned in that parameter. 4982 /// The boolean UseOneConstNR output is used to select a Newton-Raphson 4983 /// algorithm implementation that uses either one or two constants. 4984 /// The boolean Reciprocal is used to select whether the estimate is for the 4985 /// square root of the input operand or the reciprocal of its square root. 4986 /// A target may choose to implement its own refinement within this function. 4987 /// If that's true, then return '0' as the number of RefinementSteps to avoid 4988 /// any further refinement of the estimate. 4989 /// An empty SDValue return means no estimate sequence can be created. getSqrtEstimate(SDValue Operand,SelectionDAG & DAG,int Enabled,int & RefinementSteps,bool & UseOneConstNR,bool Reciprocal)4990 virtual SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, 4991 int Enabled, int &RefinementSteps, 4992 bool &UseOneConstNR, bool Reciprocal) const { 4993 return SDValue(); 4994 } 4995 4996 /// Try to convert the fminnum/fmaxnum to a compare/select sequence. This is 4997 /// required for correctness since InstCombine might have canonicalized a 4998 /// fcmp+select sequence to a FMINNUM/FMAXNUM intrinsic. If we were to fall 4999 /// through to the default expansion/soften to libcall, we might introduce a 5000 /// link-time dependency on libm into a file that originally did not have one. 5001 SDValue createSelectForFMINNUM_FMAXNUM(SDNode *Node, SelectionDAG &DAG) const; 5002 5003 /// Return a reciprocal estimate value for the input operand. 5004 /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or 5005 /// 'Enabled' as set by a potential default override attribute. 5006 /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson 5007 /// refinement iterations required to generate a sufficient (though not 5008 /// necessarily IEEE-754 compliant) estimate is returned in that parameter. 5009 /// A target may choose to implement its own refinement within this function. 5010 /// If that's true, then return '0' as the number of RefinementSteps to avoid 5011 /// any further refinement of the estimate. 5012 /// An empty SDValue return means no estimate sequence can be created. getRecipEstimate(SDValue Operand,SelectionDAG & DAG,int Enabled,int & RefinementSteps)5013 virtual SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, 5014 int Enabled, int &RefinementSteps) const { 5015 return SDValue(); 5016 } 5017 5018 /// Return a target-dependent comparison result if the input operand is 5019 /// suitable for use with a square root estimate calculation. For example, the 5020 /// comparison may check if the operand is NAN, INF, zero, normal, etc. The 5021 /// result should be used as the condition operand for a select or branch. 5022 virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG, 5023 const DenormalMode &Mode) const; 5024 5025 /// Return a target-dependent result if the input operand is not suitable for 5026 /// use with a square root estimate calculation. getSqrtResultForDenormInput(SDValue Operand,SelectionDAG & DAG)5027 virtual SDValue getSqrtResultForDenormInput(SDValue Operand, 5028 SelectionDAG &DAG) const { 5029 return DAG.getConstantFP(0.0, SDLoc(Operand), Operand.getValueType()); 5030 } 5031 5032 //===--------------------------------------------------------------------===// 5033 // Legalization utility functions 5034 // 5035 5036 /// Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes, 5037 /// respectively, each computing an n/2-bit part of the result. 5038 /// \param Result A vector that will be filled with the parts of the result 5039 /// in little-endian order. 5040 /// \param LL Low bits of the LHS of the MUL. You can use this parameter 5041 /// if you want to control how low bits are extracted from the LHS. 5042 /// \param LH High bits of the LHS of the MUL. See LL for meaning. 5043 /// \param RL Low bits of the RHS of the MUL. See LL for meaning 5044 /// \param RH High bits of the RHS of the MUL. See LL for meaning. 5045 /// \returns true if the node has been expanded, false if it has not 5046 bool expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS, 5047 SDValue RHS, SmallVectorImpl<SDValue> &Result, EVT HiLoVT, 5048 SelectionDAG &DAG, MulExpansionKind Kind, 5049 SDValue LL = SDValue(), SDValue LH = SDValue(), 5050 SDValue RL = SDValue(), SDValue RH = SDValue()) const; 5051 5052 /// Expand a MUL into two nodes. One that computes the high bits of 5053 /// the result and one that computes the low bits. 5054 /// \param HiLoVT The value type to use for the Lo and Hi nodes. 5055 /// \param LL Low bits of the LHS of the MUL. You can use this parameter 5056 /// if you want to control how low bits are extracted from the LHS. 5057 /// \param LH High bits of the LHS of the MUL. See LL for meaning. 5058 /// \param RL Low bits of the RHS of the MUL. See LL for meaning 5059 /// \param RH High bits of the RHS of the MUL. See LL for meaning. 5060 /// \returns true if the node has been expanded. false if it has not 5061 bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, 5062 SelectionDAG &DAG, MulExpansionKind Kind, 5063 SDValue LL = SDValue(), SDValue LH = SDValue(), 5064 SDValue RL = SDValue(), SDValue RH = SDValue()) const; 5065 5066 /// Attempt to expand an n-bit div/rem/divrem by constant using a n/2-bit 5067 /// urem by constant and other arithmetic ops. The n/2-bit urem by constant 5068 /// will be expanded by DAGCombiner. This is not possible for all constant 5069 /// divisors. 5070 /// \param N Node to expand 5071 /// \param Result A vector that will be filled with the lo and high parts of 5072 /// the results. For *DIVREM, this will be the quotient parts followed 5073 /// by the remainder parts. 5074 /// \param HiLoVT The value type to use for the Lo and Hi parts. Should be 5075 /// half of VT. 5076 /// \param LL Low bits of the LHS of the operation. You can use this 5077 /// parameter if you want to control how low bits are extracted from 5078 /// the LHS. 5079 /// \param LH High bits of the LHS of the operation. See LL for meaning. 5080 /// \returns true if the node has been expanded, false if it has not. 5081 bool expandDIVREMByConstant(SDNode *N, SmallVectorImpl<SDValue> &Result, 5082 EVT HiLoVT, SelectionDAG &DAG, 5083 SDValue LL = SDValue(), 5084 SDValue LH = SDValue()) const; 5085 5086 /// Expand funnel shift. 5087 /// \param N Node to expand 5088 /// \returns The expansion if successful, SDValue() otherwise 5089 SDValue expandFunnelShift(SDNode *N, SelectionDAG &DAG) const; 5090 5091 /// Expand rotations. 5092 /// \param N Node to expand 5093 /// \param AllowVectorOps expand vector rotate, this should only be performed 5094 /// if the legalization is happening outside of LegalizeVectorOps 5095 /// \returns The expansion if successful, SDValue() otherwise 5096 SDValue expandROT(SDNode *N, bool AllowVectorOps, SelectionDAG &DAG) const; 5097 5098 /// Expand shift-by-parts. 5099 /// \param N Node to expand 5100 /// \param Lo lower-output-part after conversion 5101 /// \param Hi upper-output-part after conversion 5102 void expandShiftParts(SDNode *N, SDValue &Lo, SDValue &Hi, 5103 SelectionDAG &DAG) const; 5104 5105 /// Expand float(f32) to SINT(i64) conversion 5106 /// \param N Node to expand 5107 /// \param Result output after conversion 5108 /// \returns True, if the expansion was successful, false otherwise 5109 bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const; 5110 5111 /// Expand float to UINT conversion 5112 /// \param N Node to expand 5113 /// \param Result output after conversion 5114 /// \param Chain output chain after conversion 5115 /// \returns True, if the expansion was successful, false otherwise 5116 bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain, 5117 SelectionDAG &DAG) const; 5118 5119 /// Expand UINT(i64) to double(f64) conversion 5120 /// \param N Node to expand 5121 /// \param Result output after conversion 5122 /// \param Chain output chain after conversion 5123 /// \returns True, if the expansion was successful, false otherwise 5124 bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SDValue &Chain, 5125 SelectionDAG &DAG) const; 5126 5127 /// Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs. 5128 SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const; 5129 5130 /// Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max. 5131 /// \param N Node to expand 5132 /// \returns The expansion result 5133 SDValue expandFP_TO_INT_SAT(SDNode *N, SelectionDAG &DAG) const; 5134 5135 /// Expand check for floating point class. 5136 /// \param ResultVT The type of intrinsic call result. 5137 /// \param Op The tested value. 5138 /// \param Test The test to perform. 5139 /// \param Flags The optimization flags. 5140 /// \returns The expansion result or SDValue() if it fails. 5141 SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, 5142 SDNodeFlags Flags, const SDLoc &DL, 5143 SelectionDAG &DAG) const; 5144 5145 /// Expand CTPOP nodes. Expands vector/scalar CTPOP nodes, 5146 /// vector nodes can only succeed if all operations are legal/custom. 5147 /// \param N Node to expand 5148 /// \returns The expansion result or SDValue() if it fails. 5149 SDValue expandCTPOP(SDNode *N, SelectionDAG &DAG) const; 5150 5151 /// Expand VP_CTPOP nodes. 5152 /// \returns The expansion result or SDValue() if it fails. 5153 SDValue expandVPCTPOP(SDNode *N, SelectionDAG &DAG) const; 5154 5155 /// Expand CTLZ/CTLZ_ZERO_UNDEF nodes. Expands vector/scalar CTLZ nodes, 5156 /// vector nodes can only succeed if all operations are legal/custom. 5157 /// \param N Node to expand 5158 /// \returns The expansion result or SDValue() if it fails. 5159 SDValue expandCTLZ(SDNode *N, SelectionDAG &DAG) const; 5160 5161 /// Expand VP_CTLZ/VP_CTLZ_ZERO_UNDEF nodes. 5162 /// \param N Node to expand 5163 /// \returns The expansion result or SDValue() if it fails. 5164 SDValue expandVPCTLZ(SDNode *N, SelectionDAG &DAG) const; 5165 5166 /// Expand CTTZ via Table Lookup. 5167 /// \param N Node to expand 5168 /// \returns The expansion result or SDValue() if it fails. 5169 SDValue CTTZTableLookup(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, EVT VT, 5170 SDValue Op, unsigned NumBitsPerElt) const; 5171 5172 /// Expand CTTZ/CTTZ_ZERO_UNDEF nodes. Expands vector/scalar CTTZ nodes, 5173 /// vector nodes can only succeed if all operations are legal/custom. 5174 /// \param N Node to expand 5175 /// \returns The expansion result or SDValue() if it fails. 5176 SDValue expandCTTZ(SDNode *N, SelectionDAG &DAG) const; 5177 5178 /// Expand VP_CTTZ/VP_CTTZ_ZERO_UNDEF nodes. 5179 /// \param N Node to expand 5180 /// \returns The expansion result or SDValue() if it fails. 5181 SDValue expandVPCTTZ(SDNode *N, SelectionDAG &DAG) const; 5182 5183 /// Expand ABS nodes. Expands vector/scalar ABS nodes, 5184 /// vector nodes can only succeed if all operations are legal/custom. 5185 /// (ABS x) -> (XOR (ADD x, (SRA x, type_size)), (SRA x, type_size)) 5186 /// \param N Node to expand 5187 /// \param IsNegative indicate negated abs 5188 /// \returns The expansion result or SDValue() if it fails. 5189 SDValue expandABS(SDNode *N, SelectionDAG &DAG, 5190 bool IsNegative = false) const; 5191 5192 /// Expand ABDS/ABDU nodes. Expands vector/scalar ABDS/ABDU nodes. 5193 /// \param N Node to expand 5194 /// \returns The expansion result or SDValue() if it fails. 5195 SDValue expandABD(SDNode *N, SelectionDAG &DAG) const; 5196 5197 /// Expand BSWAP nodes. Expands scalar/vector BSWAP nodes with i16/i32/i64 5198 /// scalar types. Returns SDValue() if expand fails. 5199 /// \param N Node to expand 5200 /// \returns The expansion result or SDValue() if it fails. 5201 SDValue expandBSWAP(SDNode *N, SelectionDAG &DAG) const; 5202 5203 /// Expand VP_BSWAP nodes. Expands VP_BSWAP nodes with 5204 /// i16/i32/i64 scalar types. Returns SDValue() if expand fails. \param N Node 5205 /// to expand \returns The expansion result or SDValue() if it fails. 5206 SDValue expandVPBSWAP(SDNode *N, SelectionDAG &DAG) const; 5207 5208 /// Expand BITREVERSE nodes. Expands scalar/vector BITREVERSE nodes. 5209 /// Returns SDValue() if expand fails. 5210 /// \param N Node to expand 5211 /// \returns The expansion result or SDValue() if it fails. 5212 SDValue expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const; 5213 5214 /// Expand VP_BITREVERSE nodes. Expands VP_BITREVERSE nodes with 5215 /// i8/i16/i32/i64 scalar types. \param N Node to expand \returns The 5216 /// expansion result or SDValue() if it fails. 5217 SDValue expandVPBITREVERSE(SDNode *N, SelectionDAG &DAG) const; 5218 5219 /// Turn load of vector type into a load of the individual elements. 5220 /// \param LD load to expand 5221 /// \returns BUILD_VECTOR and TokenFactor nodes. 5222 std::pair<SDValue, SDValue> scalarizeVectorLoad(LoadSDNode *LD, 5223 SelectionDAG &DAG) const; 5224 5225 // Turn a store of a vector type into stores of the individual elements. 5226 /// \param ST Store with a vector value type 5227 /// \returns TokenFactor of the individual store chains. 5228 SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const; 5229 5230 /// Expands an unaligned load to 2 half-size loads for an integer, and 5231 /// possibly more for vectors. 5232 std::pair<SDValue, SDValue> expandUnalignedLoad(LoadSDNode *LD, 5233 SelectionDAG &DAG) const; 5234 5235 /// Expands an unaligned store to 2 half-size stores for integer values, and 5236 /// possibly more for vectors. 5237 SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const; 5238 5239 /// Increments memory address \p Addr according to the type of the value 5240 /// \p DataVT that should be stored. If the data is stored in compressed 5241 /// form, the memory address should be incremented according to the number of 5242 /// the stored elements. This number is equal to the number of '1's bits 5243 /// in the \p Mask. 5244 /// \p DataVT is a vector type. \p Mask is a vector value. 5245 /// \p DataVT and \p Mask have the same number of vector elements. 5246 SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, 5247 EVT DataVT, SelectionDAG &DAG, 5248 bool IsCompressedMemory) const; 5249 5250 /// Get a pointer to vector element \p Idx located in memory for a vector of 5251 /// type \p VecVT starting at a base address of \p VecPtr. If \p Idx is out of 5252 /// bounds the returned pointer is unspecified, but will be within the vector 5253 /// bounds. 5254 SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, 5255 SDValue Index) const; 5256 5257 /// Get a pointer to a sub-vector of type \p SubVecVT at index \p Idx located 5258 /// in memory for a vector of type \p VecVT starting at a base address of 5259 /// \p VecPtr. If \p Idx plus the size of \p SubVecVT is out of bounds the 5260 /// returned pointer is unspecified, but the value returned will be such that 5261 /// the entire subvector would be within the vector bounds. 5262 SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, 5263 EVT SubVecVT, SDValue Index) const; 5264 5265 /// Method for building the DAG expansion of ISD::[US][MIN|MAX]. This 5266 /// method accepts integers as its arguments. 5267 SDValue expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const; 5268 5269 /// Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT. This 5270 /// method accepts integers as its arguments. 5271 SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const; 5272 5273 /// Method for building the DAG expansion of ISD::[US]SHLSAT. This 5274 /// method accepts integers as its arguments. 5275 SDValue expandShlSat(SDNode *Node, SelectionDAG &DAG) const; 5276 5277 /// Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT]. This 5278 /// method accepts integers as its arguments. 5279 SDValue expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const; 5280 5281 /// Method for building the DAG expansion of ISD::[US]DIVFIX[SAT]. This 5282 /// method accepts integers as its arguments. 5283 /// Note: This method may fail if the division could not be performed 5284 /// within the type. Clients must retry with a wider type if this happens. 5285 SDValue expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, 5286 SDValue LHS, SDValue RHS, 5287 unsigned Scale, SelectionDAG &DAG) const; 5288 5289 /// Method for building the DAG expansion of ISD::U(ADD|SUB)O. Expansion 5290 /// always suceeds and populates the Result and Overflow arguments. 5291 void expandUADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, 5292 SelectionDAG &DAG) const; 5293 5294 /// Method for building the DAG expansion of ISD::S(ADD|SUB)O. Expansion 5295 /// always suceeds and populates the Result and Overflow arguments. 5296 void expandSADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, 5297 SelectionDAG &DAG) const; 5298 5299 /// Method for building the DAG expansion of ISD::[US]MULO. Returns whether 5300 /// expansion was successful and populates the Result and Overflow arguments. 5301 bool expandMULO(SDNode *Node, SDValue &Result, SDValue &Overflow, 5302 SelectionDAG &DAG) const; 5303 5304 /// Expand a VECREDUCE_* into an explicit calculation. If Count is specified, 5305 /// only the first Count elements of the vector are used. 5306 SDValue expandVecReduce(SDNode *Node, SelectionDAG &DAG) const; 5307 5308 /// Expand a VECREDUCE_SEQ_* into an explicit ordered calculation. 5309 SDValue expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const; 5310 5311 /// Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal. 5312 /// Returns true if the expansion was successful. 5313 bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const; 5314 5315 /// Method for building the DAG expansion of ISD::VECTOR_SPLICE. This 5316 /// method accepts vectors as its arguments. 5317 SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const; 5318 5319 /// Legalize a SETCC or VP_SETCC with given LHS and RHS and condition code CC 5320 /// on the current target. A VP_SETCC will additionally be given a Mask 5321 /// and/or EVL not equal to SDValue(). 5322 /// 5323 /// If the SETCC has been legalized using AND / OR, then the legalized node 5324 /// will be stored in LHS. RHS and CC will be set to SDValue(). NeedInvert 5325 /// will be set to false. This will also hold if the VP_SETCC has been 5326 /// legalized using VP_AND / VP_OR. 5327 /// 5328 /// If the SETCC / VP_SETCC has been legalized by using 5329 /// getSetCCSwappedOperands(), then the values of LHS and RHS will be 5330 /// swapped, CC will be set to the new condition, and NeedInvert will be set 5331 /// to false. 5332 /// 5333 /// If the SETCC / VP_SETCC has been legalized using the inverse condcode, 5334 /// then LHS and RHS will be unchanged, CC will set to the inverted condcode, 5335 /// and NeedInvert will be set to true. The caller must invert the result of 5336 /// the SETCC with SelectionDAG::getLogicalNOT() or take equivalent action to 5337 /// swap the effect of a true/false result. 5338 /// 5339 /// \returns true if the SETCC / VP_SETCC has been legalized, false if it 5340 /// hasn't. 5341 bool LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, SDValue &LHS, 5342 SDValue &RHS, SDValue &CC, SDValue Mask, 5343 SDValue EVL, bool &NeedInvert, const SDLoc &dl, 5344 SDValue &Chain, bool IsSignaling = false) const; 5345 5346 //===--------------------------------------------------------------------===// 5347 // Instruction Emitting Hooks 5348 // 5349 5350 /// This method should be implemented by targets that mark instructions with 5351 /// the 'usesCustomInserter' flag. These instructions are special in various 5352 /// ways, which require special support to insert. The specified MachineInstr 5353 /// is created but not inserted into any basic blocks, and this method is 5354 /// called to expand it into a sequence of instructions, potentially also 5355 /// creating new basic blocks and control flow. 5356 /// As long as the returned basic block is different (i.e., we created a new 5357 /// one), the custom inserter is free to modify the rest of \p MBB. 5358 virtual MachineBasicBlock * 5359 EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const; 5360 5361 /// This method should be implemented by targets that mark instructions with 5362 /// the 'hasPostISelHook' flag. These instructions must be adjusted after 5363 /// instruction selection by target hooks. e.g. To fill in optional defs for 5364 /// ARM 's' setting instructions. 5365 virtual void AdjustInstrPostInstrSelection(MachineInstr &MI, 5366 SDNode *Node) const; 5367 5368 /// If this function returns true, SelectionDAGBuilder emits a 5369 /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector. useLoadStackGuardNode()5370 virtual bool useLoadStackGuardNode() const { 5371 return false; 5372 } 5373 emitStackGuardXorFP(SelectionDAG & DAG,SDValue Val,const SDLoc & DL)5374 virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, 5375 const SDLoc &DL) const { 5376 llvm_unreachable("not implemented for this target"); 5377 } 5378 5379 /// Lower TLS global address SDNode for target independent emulated TLS model. 5380 virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, 5381 SelectionDAG &DAG) const; 5382 5383 /// Expands target specific indirect branch for the case of JumpTable 5384 /// expansion. 5385 virtual SDValue expandIndirectJTBranch(const SDLoc &dl, SDValue Value, 5386 SDValue Addr, int JTI, 5387 SelectionDAG &DAG) const; 5388 5389 // seteq(x, 0) -> truncate(srl(ctlz(zext(x)), log2(#bits))) 5390 // If we're comparing for equality to zero and isCtlzFast is true, expose the 5391 // fact that this can be implemented as a ctlz/srl pair, so that the dag 5392 // combiner can fold the new nodes. 5393 SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const; 5394 5395 // Return true if `X & Y eq/ne 0` is preferable to `X & Y ne/eq Y` isXAndYEqZeroPreferableToXAndYEqY(ISD::CondCode,EVT)5396 virtual bool isXAndYEqZeroPreferableToXAndYEqY(ISD::CondCode, EVT) const { 5397 return true; 5398 } 5399 5400 private: 5401 SDValue foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, 5402 const SDLoc &DL, DAGCombinerInfo &DCI) const; 5403 SDValue foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, 5404 const SDLoc &DL, DAGCombinerInfo &DCI) const; 5405 5406 SDValue optimizeSetCCOfSignedTruncationCheck(EVT SCCVT, SDValue N0, 5407 SDValue N1, ISD::CondCode Cond, 5408 DAGCombinerInfo &DCI, 5409 const SDLoc &DL) const; 5410 5411 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 5412 SDValue optimizeSetCCByHoistingAndByConstFromLogicalShift( 5413 EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond, 5414 DAGCombinerInfo &DCI, const SDLoc &DL) const; 5415 5416 SDValue prepareUREMEqFold(EVT SETCCVT, SDValue REMNode, 5417 SDValue CompTargetNode, ISD::CondCode Cond, 5418 DAGCombinerInfo &DCI, const SDLoc &DL, 5419 SmallVectorImpl<SDNode *> &Created) const; 5420 SDValue buildUREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode, 5421 ISD::CondCode Cond, DAGCombinerInfo &DCI, 5422 const SDLoc &DL) const; 5423 5424 SDValue prepareSREMEqFold(EVT SETCCVT, SDValue REMNode, 5425 SDValue CompTargetNode, ISD::CondCode Cond, 5426 DAGCombinerInfo &DCI, const SDLoc &DL, 5427 SmallVectorImpl<SDNode *> &Created) const; 5428 SDValue buildSREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode, 5429 ISD::CondCode Cond, DAGCombinerInfo &DCI, 5430 const SDLoc &DL) const; 5431 }; 5432 5433 /// Given an LLVM IR type and return type attributes, compute the return value 5434 /// EVTs and flags, and optionally also the offsets, if the return value is 5435 /// being lowered to memory. 5436 void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, 5437 SmallVectorImpl<ISD::OutputArg> &Outs, 5438 const TargetLowering &TLI, const DataLayout &DL); 5439 5440 } // end namespace llvm 5441 5442 #endif // LLVM_CODEGEN_TARGETLOWERING_H 5443