1 //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file exposes the class definitions of all of the subclasses of the 10 // Instruction class. This is meant to be an easy way to get access to all 11 // instruction subclasses. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_IR_INSTRUCTIONS_H 16 #define LLVM_IR_INSTRUCTIONS_H 17 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/Bitfields.h" 20 #include "llvm/ADT/MapVector.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/Twine.h" 24 #include "llvm/ADT/iterator.h" 25 #include "llvm/ADT/iterator_range.h" 26 #include "llvm/IR/CFG.h" 27 #include "llvm/IR/Constant.h" 28 #include "llvm/IR/DerivedTypes.h" 29 #include "llvm/IR/InstrTypes.h" 30 #include "llvm/IR/Instruction.h" 31 #include "llvm/IR/OperandTraits.h" 32 #include "llvm/IR/Use.h" 33 #include "llvm/IR/User.h" 34 #include "llvm/Support/AtomicOrdering.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include <cassert> 37 #include <cstddef> 38 #include <cstdint> 39 #include <iterator> 40 #include <optional> 41 42 namespace llvm { 43 44 class APFloat; 45 class APInt; 46 class BasicBlock; 47 class ConstantInt; 48 class DataLayout; 49 class StringRef; 50 class Type; 51 class Value; 52 class UnreachableInst; 53 54 //===----------------------------------------------------------------------===// 55 // AllocaInst Class 56 //===----------------------------------------------------------------------===// 57 58 /// an instruction to allocate memory on the stack 59 class AllocaInst : public UnaryInstruction { 60 Type *AllocatedType; 61 62 using AlignmentField = AlignmentBitfieldElementT<0>; 63 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>; 64 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>; 65 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField, 66 SwiftErrorField>(), 67 "Bitfields must be contiguous"); 68 69 protected: 70 // Note: Instruction needs to be a friend here to call cloneImpl. 71 friend class Instruction; 72 73 AllocaInst *cloneImpl() const; 74 75 public: 76 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 77 const Twine &Name, BasicBlock::iterator InsertBefore); 78 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 79 const Twine &Name, Instruction *InsertBefore); 80 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 81 const Twine &Name, BasicBlock *InsertAtEnd); 82 83 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 84 BasicBlock::iterator InsertBefore); 85 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 86 Instruction *InsertBefore); 87 AllocaInst(Type *Ty, unsigned AddrSpace, 88 const Twine &Name, BasicBlock *InsertAtEnd); 89 90 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, 91 const Twine &Name, BasicBlock::iterator); 92 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, 93 const Twine &Name = "", Instruction *InsertBefore = nullptr); 94 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, 95 const Twine &Name, BasicBlock *InsertAtEnd); 96 97 /// Return true if there is an allocation size parameter to the allocation 98 /// instruction that is not 1. 99 bool isArrayAllocation() const; 100 101 /// Get the number of elements allocated. For a simple allocation of a single 102 /// element, this will return a constant 1 value. getArraySize()103 const Value *getArraySize() const { return getOperand(0); } getArraySize()104 Value *getArraySize() { return getOperand(0); } 105 106 /// Overload to return most specific pointer type. getType()107 PointerType *getType() const { 108 return cast<PointerType>(Instruction::getType()); 109 } 110 111 /// Return the address space for the allocation. getAddressSpace()112 unsigned getAddressSpace() const { 113 return getType()->getAddressSpace(); 114 } 115 116 /// Get allocation size in bytes. Returns std::nullopt if size can't be 117 /// determined, e.g. in case of a VLA. 118 std::optional<TypeSize> getAllocationSize(const DataLayout &DL) const; 119 120 /// Get allocation size in bits. Returns std::nullopt if size can't be 121 /// determined, e.g. in case of a VLA. 122 std::optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const; 123 124 /// Return the type that is being allocated by the instruction. getAllocatedType()125 Type *getAllocatedType() const { return AllocatedType; } 126 /// for use only in special circumstances that need to generically 127 /// transform a whole instruction (eg: IR linking and vectorization). setAllocatedType(Type * Ty)128 void setAllocatedType(Type *Ty) { AllocatedType = Ty; } 129 130 /// Return the alignment of the memory that is being allocated by the 131 /// instruction. getAlign()132 Align getAlign() const { 133 return Align(1ULL << getSubclassData<AlignmentField>()); 134 } 135 setAlignment(Align Align)136 void setAlignment(Align Align) { 137 setSubclassData<AlignmentField>(Log2(Align)); 138 } 139 140 /// Return true if this alloca is in the entry block of the function and is a 141 /// constant size. If so, the code generator will fold it into the 142 /// prolog/epilog code, so it is basically free. 143 bool isStaticAlloca() const; 144 145 /// Return true if this alloca is used as an inalloca argument to a call. Such 146 /// allocas are never considered static even if they are in the entry block. isUsedWithInAlloca()147 bool isUsedWithInAlloca() const { 148 return getSubclassData<UsedWithInAllocaField>(); 149 } 150 151 /// Specify whether this alloca is used to represent the arguments to a call. setUsedWithInAlloca(bool V)152 void setUsedWithInAlloca(bool V) { 153 setSubclassData<UsedWithInAllocaField>(V); 154 } 155 156 /// Return true if this alloca is used as a swifterror argument to a call. isSwiftError()157 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); } 158 /// Specify whether this alloca is used to represent a swifterror. setSwiftError(bool V)159 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); } 160 161 // Methods for support type inquiry through isa, cast, and dyn_cast: classof(const Instruction * I)162 static bool classof(const Instruction *I) { 163 return (I->getOpcode() == Instruction::Alloca); 164 } classof(const Value * V)165 static bool classof(const Value *V) { 166 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 167 } 168 169 private: 170 // Shadow Instruction::setInstructionSubclassData with a private forwarding 171 // method so that subclasses cannot accidentally use it. 172 template <typename Bitfield> setSubclassData(typename Bitfield::Type Value)173 void setSubclassData(typename Bitfield::Type Value) { 174 Instruction::setSubclassData<Bitfield>(Value); 175 } 176 }; 177 178 //===----------------------------------------------------------------------===// 179 // LoadInst Class 180 //===----------------------------------------------------------------------===// 181 182 /// An instruction for reading from memory. This uses the SubclassData field in 183 /// Value to store whether or not the load is volatile. 184 class LoadInst : public UnaryInstruction { 185 using VolatileField = BoolBitfieldElementT<0>; 186 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; 187 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; 188 static_assert( 189 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), 190 "Bitfields must be contiguous"); 191 192 void AssertOK(); 193 194 protected: 195 // Note: Instruction needs to be a friend here to call cloneImpl. 196 friend class Instruction; 197 198 LoadInst *cloneImpl() const; 199 200 public: 201 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, 202 BasicBlock::iterator InsertBefore); 203 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, 204 Instruction *InsertBefore); 205 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd); 206 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 207 BasicBlock::iterator InsertBefore); 208 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 209 Instruction *InsertBefore); 210 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 211 BasicBlock *InsertAtEnd); 212 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 213 Align Align, BasicBlock::iterator InsertBefore); 214 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 215 Align Align, Instruction *InsertBefore = nullptr); 216 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 217 Align Align, BasicBlock *InsertAtEnd); 218 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 219 Align Align, AtomicOrdering Order, SyncScope::ID SSID, 220 BasicBlock::iterator InsertBefore); 221 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 222 Align Align, AtomicOrdering Order, 223 SyncScope::ID SSID = SyncScope::System, 224 Instruction *InsertBefore = nullptr); 225 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 226 Align Align, AtomicOrdering Order, SyncScope::ID SSID, 227 BasicBlock *InsertAtEnd); 228 229 /// Return true if this is a load from a volatile memory location. isVolatile()230 bool isVolatile() const { return getSubclassData<VolatileField>(); } 231 232 /// Specify whether this is a volatile load or not. setVolatile(bool V)233 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 234 235 /// Return the alignment of the access that is being performed. getAlign()236 Align getAlign() const { 237 return Align(1ULL << (getSubclassData<AlignmentField>())); 238 } 239 setAlignment(Align Align)240 void setAlignment(Align Align) { 241 setSubclassData<AlignmentField>(Log2(Align)); 242 } 243 244 /// Returns the ordering constraint of this load instruction. getOrdering()245 AtomicOrdering getOrdering() const { 246 return getSubclassData<OrderingField>(); 247 } 248 /// Sets the ordering constraint of this load instruction. May not be Release 249 /// or AcquireRelease. setOrdering(AtomicOrdering Ordering)250 void setOrdering(AtomicOrdering Ordering) { 251 setSubclassData<OrderingField>(Ordering); 252 } 253 254 /// Returns the synchronization scope ID of this load instruction. getSyncScopeID()255 SyncScope::ID getSyncScopeID() const { 256 return SSID; 257 } 258 259 /// Sets the synchronization scope ID of this load instruction. setSyncScopeID(SyncScope::ID SSID)260 void setSyncScopeID(SyncScope::ID SSID) { 261 this->SSID = SSID; 262 } 263 264 /// Sets the ordering constraint and the synchronization scope ID of this load 265 /// instruction. 266 void setAtomic(AtomicOrdering Ordering, 267 SyncScope::ID SSID = SyncScope::System) { 268 setOrdering(Ordering); 269 setSyncScopeID(SSID); 270 } 271 isSimple()272 bool isSimple() const { return !isAtomic() && !isVolatile(); } 273 isUnordered()274 bool isUnordered() const { 275 return (getOrdering() == AtomicOrdering::NotAtomic || 276 getOrdering() == AtomicOrdering::Unordered) && 277 !isVolatile(); 278 } 279 getPointerOperand()280 Value *getPointerOperand() { return getOperand(0); } getPointerOperand()281 const Value *getPointerOperand() const { return getOperand(0); } getPointerOperandIndex()282 static unsigned getPointerOperandIndex() { return 0U; } getPointerOperandType()283 Type *getPointerOperandType() const { return getPointerOperand()->getType(); } 284 285 /// Returns the address space of the pointer operand. getPointerAddressSpace()286 unsigned getPointerAddressSpace() const { 287 return getPointerOperandType()->getPointerAddressSpace(); 288 } 289 290 // Methods for support type inquiry through isa, cast, and dyn_cast: classof(const Instruction * I)291 static bool classof(const Instruction *I) { 292 return I->getOpcode() == Instruction::Load; 293 } classof(const Value * V)294 static bool classof(const Value *V) { 295 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 296 } 297 298 private: 299 // Shadow Instruction::setInstructionSubclassData with a private forwarding 300 // method so that subclasses cannot accidentally use it. 301 template <typename Bitfield> setSubclassData(typename Bitfield::Type Value)302 void setSubclassData(typename Bitfield::Type Value) { 303 Instruction::setSubclassData<Bitfield>(Value); 304 } 305 306 /// The synchronization scope ID of this load instruction. Not quite enough 307 /// room in SubClassData for everything, so synchronization scope ID gets its 308 /// own field. 309 SyncScope::ID SSID; 310 }; 311 312 //===----------------------------------------------------------------------===// 313 // StoreInst Class 314 //===----------------------------------------------------------------------===// 315 316 /// An instruction for storing to memory. 317 class StoreInst : public Instruction { 318 using VolatileField = BoolBitfieldElementT<0>; 319 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; 320 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; 321 static_assert( 322 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), 323 "Bitfields must be contiguous"); 324 325 void AssertOK(); 326 327 protected: 328 // Note: Instruction needs to be a friend here to call cloneImpl. 329 friend class Instruction; 330 331 StoreInst *cloneImpl() const; 332 333 public: 334 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore); 335 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd); 336 StoreInst(Value *Val, Value *Ptr, BasicBlock::iterator InsertBefore); 337 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore); 338 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd); 339 StoreInst(Value *Val, Value *Ptr, bool isVolatile, 340 BasicBlock::iterator InsertBefore); 341 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 342 Instruction *InsertBefore = nullptr); 343 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 344 BasicBlock *InsertAtEnd); 345 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 346 BasicBlock::iterator InsertBefore); 347 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 348 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System, 349 Instruction *InsertBefore = nullptr); 350 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 351 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd); 352 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 353 AtomicOrdering Order, SyncScope::ID SSID, 354 BasicBlock::iterator InsertBefore); 355 356 // allocate space for exactly two operands new(size_t S)357 void *operator new(size_t S) { return User::operator new(S, 2); } delete(void * Ptr)358 void operator delete(void *Ptr) { User::operator delete(Ptr); } 359 360 /// Return true if this is a store to a volatile memory location. isVolatile()361 bool isVolatile() const { return getSubclassData<VolatileField>(); } 362 363 /// Specify whether this is a volatile store or not. setVolatile(bool V)364 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 365 366 /// Transparently provide more efficient getOperand methods. 367 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 368 getAlign()369 Align getAlign() const { 370 return Align(1ULL << (getSubclassData<AlignmentField>())); 371 } 372 setAlignment(Align Align)373 void setAlignment(Align Align) { 374 setSubclassData<AlignmentField>(Log2(Align)); 375 } 376 377 /// Returns the ordering constraint of this store instruction. getOrdering()378 AtomicOrdering getOrdering() const { 379 return getSubclassData<OrderingField>(); 380 } 381 382 /// Sets the ordering constraint of this store instruction. May not be 383 /// Acquire or AcquireRelease. setOrdering(AtomicOrdering Ordering)384 void setOrdering(AtomicOrdering Ordering) { 385 setSubclassData<OrderingField>(Ordering); 386 } 387 388 /// Returns the synchronization scope ID of this store instruction. getSyncScopeID()389 SyncScope::ID getSyncScopeID() const { 390 return SSID; 391 } 392 393 /// Sets the synchronization scope ID of this store instruction. setSyncScopeID(SyncScope::ID SSID)394 void setSyncScopeID(SyncScope::ID SSID) { 395 this->SSID = SSID; 396 } 397 398 /// Sets the ordering constraint and the synchronization scope ID of this 399 /// store instruction. 400 void setAtomic(AtomicOrdering Ordering, 401 SyncScope::ID SSID = SyncScope::System) { 402 setOrdering(Ordering); 403 setSyncScopeID(SSID); 404 } 405 isSimple()406 bool isSimple() const { return !isAtomic() && !isVolatile(); } 407 isUnordered()408 bool isUnordered() const { 409 return (getOrdering() == AtomicOrdering::NotAtomic || 410 getOrdering() == AtomicOrdering::Unordered) && 411 !isVolatile(); 412 } 413 getValueOperand()414 Value *getValueOperand() { return getOperand(0); } getValueOperand()415 const Value *getValueOperand() const { return getOperand(0); } 416 getPointerOperand()417 Value *getPointerOperand() { return getOperand(1); } getPointerOperand()418 const Value *getPointerOperand() const { return getOperand(1); } getPointerOperandIndex()419 static unsigned getPointerOperandIndex() { return 1U; } getPointerOperandType()420 Type *getPointerOperandType() const { return getPointerOperand()->getType(); } 421 422 /// Returns the address space of the pointer operand. getPointerAddressSpace()423 unsigned getPointerAddressSpace() const { 424 return getPointerOperandType()->getPointerAddressSpace(); 425 } 426 427 // Methods for support type inquiry through isa, cast, and dyn_cast: classof(const Instruction * I)428 static bool classof(const Instruction *I) { 429 return I->getOpcode() == Instruction::Store; 430 } classof(const Value * V)431 static bool classof(const Value *V) { 432 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 433 } 434 435 private: 436 // Shadow Instruction::setInstructionSubclassData with a private forwarding 437 // method so that subclasses cannot accidentally use it. 438 template <typename Bitfield> setSubclassData(typename Bitfield::Type Value)439 void setSubclassData(typename Bitfield::Type Value) { 440 Instruction::setSubclassData<Bitfield>(Value); 441 } 442 443 /// The synchronization scope ID of this store instruction. Not quite enough 444 /// room in SubClassData for everything, so synchronization scope ID gets its 445 /// own field. 446 SyncScope::ID SSID; 447 }; 448 449 template <> 450 struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> { 451 }; 452 453 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value) 454 455 //===----------------------------------------------------------------------===// 456 // FenceInst Class 457 //===----------------------------------------------------------------------===// 458 459 /// An instruction for ordering other memory operations. 460 class FenceInst : public Instruction { 461 using OrderingField = AtomicOrderingBitfieldElementT<0>; 462 463 void Init(AtomicOrdering Ordering, SyncScope::ID SSID); 464 465 protected: 466 // Note: Instruction needs to be a friend here to call cloneImpl. 467 friend class Instruction; 468 469 FenceInst *cloneImpl() const; 470 471 public: 472 // Ordering may only be Acquire, Release, AcquireRelease, or 473 // SequentiallyConsistent. 474 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID, 475 BasicBlock::iterator InsertBefore); 476 FenceInst(LLVMContext &C, AtomicOrdering Ordering, 477 SyncScope::ID SSID = SyncScope::System, 478 Instruction *InsertBefore = nullptr); 479 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID, 480 BasicBlock *InsertAtEnd); 481 482 // allocate space for exactly zero operands 483 void *operator new(size_t S) { return User::operator new(S, 0); } 484 void operator delete(void *Ptr) { User::operator delete(Ptr); } 485 486 /// Returns the ordering constraint of this fence instruction. 487 AtomicOrdering getOrdering() const { 488 return getSubclassData<OrderingField>(); 489 } 490 491 /// Sets the ordering constraint of this fence instruction. May only be 492 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent. 493 void setOrdering(AtomicOrdering Ordering) { 494 setSubclassData<OrderingField>(Ordering); 495 } 496 497 /// Returns the synchronization scope ID of this fence instruction. 498 SyncScope::ID getSyncScopeID() const { 499 return SSID; 500 } 501 502 /// Sets the synchronization scope ID of this fence instruction. 503 void setSyncScopeID(SyncScope::ID SSID) { 504 this->SSID = SSID; 505 } 506 507 // Methods for support type inquiry through isa, cast, and dyn_cast: 508 static bool classof(const Instruction *I) { 509 return I->getOpcode() == Instruction::Fence; 510 } 511 static bool classof(const Value *V) { 512 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 513 } 514 515 private: 516 // Shadow Instruction::setInstructionSubclassData with a private forwarding 517 // method so that subclasses cannot accidentally use it. 518 template <typename Bitfield> 519 void setSubclassData(typename Bitfield::Type Value) { 520 Instruction::setSubclassData<Bitfield>(Value); 521 } 522 523 /// The synchronization scope ID of this fence instruction. Not quite enough 524 /// room in SubClassData for everything, so synchronization scope ID gets its 525 /// own field. 526 SyncScope::ID SSID; 527 }; 528 529 //===----------------------------------------------------------------------===// 530 // AtomicCmpXchgInst Class 531 //===----------------------------------------------------------------------===// 532 533 /// An instruction that atomically checks whether a 534 /// specified value is in a memory location, and, if it is, stores a new value 535 /// there. The value returned by this instruction is a pair containing the 536 /// original value as first element, and an i1 indicating success (true) or 537 /// failure (false) as second element. 538 /// 539 class AtomicCmpXchgInst : public Instruction { 540 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align, 541 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, 542 SyncScope::ID SSID); 543 544 template <unsigned Offset> 545 using AtomicOrderingBitfieldElement = 546 typename Bitfield::Element<AtomicOrdering, Offset, 3, 547 AtomicOrdering::LAST>; 548 549 protected: 550 // Note: Instruction needs to be a friend here to call cloneImpl. 551 friend class Instruction; 552 553 AtomicCmpXchgInst *cloneImpl() const; 554 555 public: 556 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, 557 AtomicOrdering SuccessOrdering, 558 AtomicOrdering FailureOrdering, SyncScope::ID SSID, 559 BasicBlock::iterator InsertBefore); 560 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, 561 AtomicOrdering SuccessOrdering, 562 AtomicOrdering FailureOrdering, SyncScope::ID SSID, 563 Instruction *InsertBefore = nullptr); 564 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, 565 AtomicOrdering SuccessOrdering, 566 AtomicOrdering FailureOrdering, SyncScope::ID SSID, 567 BasicBlock *InsertAtEnd); 568 569 // allocate space for exactly three operands 570 void *operator new(size_t S) { return User::operator new(S, 3); } 571 void operator delete(void *Ptr) { User::operator delete(Ptr); } 572 573 using VolatileField = BoolBitfieldElementT<0>; 574 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>; 575 using SuccessOrderingField = 576 AtomicOrderingBitfieldElementT<WeakField::NextBit>; 577 using FailureOrderingField = 578 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>; 579 using AlignmentField = 580 AlignmentBitfieldElementT<FailureOrderingField::NextBit>; 581 static_assert( 582 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField, 583 FailureOrderingField, AlignmentField>(), 584 "Bitfields must be contiguous"); 585 586 /// Return the alignment of the memory that is being allocated by the 587 /// instruction. 588 Align getAlign() const { 589 return Align(1ULL << getSubclassData<AlignmentField>()); 590 } 591 592 void setAlignment(Align Align) { 593 setSubclassData<AlignmentField>(Log2(Align)); 594 } 595 596 /// Return true if this is a cmpxchg from a volatile memory 597 /// location. 598 /// 599 bool isVolatile() const { return getSubclassData<VolatileField>(); } 600 601 /// Specify whether this is a volatile cmpxchg. 602 /// 603 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 604 605 /// Return true if this cmpxchg may spuriously fail. 606 bool isWeak() const { return getSubclassData<WeakField>(); } 607 608 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); } 609 610 /// Transparently provide more efficient getOperand methods. 611 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 612 613 static bool isValidSuccessOrdering(AtomicOrdering Ordering) { 614 return Ordering != AtomicOrdering::NotAtomic && 615 Ordering != AtomicOrdering::Unordered; 616 } 617 618 static bool isValidFailureOrdering(AtomicOrdering Ordering) { 619 return Ordering != AtomicOrdering::NotAtomic && 620 Ordering != AtomicOrdering::Unordered && 621 Ordering != AtomicOrdering::AcquireRelease && 622 Ordering != AtomicOrdering::Release; 623 } 624 625 /// Returns the success ordering constraint of this cmpxchg instruction. 626 AtomicOrdering getSuccessOrdering() const { 627 return getSubclassData<SuccessOrderingField>(); 628 } 629 630 /// Sets the success ordering constraint of this cmpxchg instruction. 631 void setSuccessOrdering(AtomicOrdering Ordering) { 632 assert(isValidSuccessOrdering(Ordering) && 633 "invalid CmpXchg success ordering"); 634 setSubclassData<SuccessOrderingField>(Ordering); 635 } 636 637 /// Returns the failure ordering constraint of this cmpxchg instruction. 638 AtomicOrdering getFailureOrdering() const { 639 return getSubclassData<FailureOrderingField>(); 640 } 641 642 /// Sets the failure ordering constraint of this cmpxchg instruction. 643 void setFailureOrdering(AtomicOrdering Ordering) { 644 assert(isValidFailureOrdering(Ordering) && 645 "invalid CmpXchg failure ordering"); 646 setSubclassData<FailureOrderingField>(Ordering); 647 } 648 649 /// Returns a single ordering which is at least as strong as both the 650 /// success and failure orderings for this cmpxchg. 651 AtomicOrdering getMergedOrdering() const { 652 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent) 653 return AtomicOrdering::SequentiallyConsistent; 654 if (getFailureOrdering() == AtomicOrdering::Acquire) { 655 if (getSuccessOrdering() == AtomicOrdering::Monotonic) 656 return AtomicOrdering::Acquire; 657 if (getSuccessOrdering() == AtomicOrdering::Release) 658 return AtomicOrdering::AcquireRelease; 659 } 660 return getSuccessOrdering(); 661 } 662 663 /// Returns the synchronization scope ID of this cmpxchg instruction. 664 SyncScope::ID getSyncScopeID() const { 665 return SSID; 666 } 667 668 /// Sets the synchronization scope ID of this cmpxchg instruction. 669 void setSyncScopeID(SyncScope::ID SSID) { 670 this->SSID = SSID; 671 } 672 673 Value *getPointerOperand() { return getOperand(0); } 674 const Value *getPointerOperand() const { return getOperand(0); } 675 static unsigned getPointerOperandIndex() { return 0U; } 676 677 Value *getCompareOperand() { return getOperand(1); } 678 const Value *getCompareOperand() const { return getOperand(1); } 679 680 Value *getNewValOperand() { return getOperand(2); } 681 const Value *getNewValOperand() const { return getOperand(2); } 682 683 /// Returns the address space of the pointer operand. 684 unsigned getPointerAddressSpace() const { 685 return getPointerOperand()->getType()->getPointerAddressSpace(); 686 } 687 688 /// Returns the strongest permitted ordering on failure, given the 689 /// desired ordering on success. 690 /// 691 /// If the comparison in a cmpxchg operation fails, there is no atomic store 692 /// so release semantics cannot be provided. So this function drops explicit 693 /// Release requests from the AtomicOrdering. A SequentiallyConsistent 694 /// operation would remain SequentiallyConsistent. 695 static AtomicOrdering 696 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) { 697 switch (SuccessOrdering) { 698 default: 699 llvm_unreachable("invalid cmpxchg success ordering"); 700 case AtomicOrdering::Release: 701 case AtomicOrdering::Monotonic: 702 return AtomicOrdering::Monotonic; 703 case AtomicOrdering::AcquireRelease: 704 case AtomicOrdering::Acquire: 705 return AtomicOrdering::Acquire; 706 case AtomicOrdering::SequentiallyConsistent: 707 return AtomicOrdering::SequentiallyConsistent; 708 } 709 } 710 711 // Methods for support type inquiry through isa, cast, and dyn_cast: 712 static bool classof(const Instruction *I) { 713 return I->getOpcode() == Instruction::AtomicCmpXchg; 714 } 715 static bool classof(const Value *V) { 716 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 717 } 718 719 private: 720 // Shadow Instruction::setInstructionSubclassData with a private forwarding 721 // method so that subclasses cannot accidentally use it. 722 template <typename Bitfield> 723 void setSubclassData(typename Bitfield::Type Value) { 724 Instruction::setSubclassData<Bitfield>(Value); 725 } 726 727 /// The synchronization scope ID of this cmpxchg instruction. Not quite 728 /// enough room in SubClassData for everything, so synchronization scope ID 729 /// gets its own field. 730 SyncScope::ID SSID; 731 }; 732 733 template <> 734 struct OperandTraits<AtomicCmpXchgInst> : 735 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> { 736 }; 737 738 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value) 739 740 //===----------------------------------------------------------------------===// 741 // AtomicRMWInst Class 742 //===----------------------------------------------------------------------===// 743 744 /// an instruction that atomically reads a memory location, 745 /// combines it with another value, and then stores the result back. Returns 746 /// the old value. 747 /// 748 class AtomicRMWInst : public Instruction { 749 protected: 750 // Note: Instruction needs to be a friend here to call cloneImpl. 751 friend class Instruction; 752 753 AtomicRMWInst *cloneImpl() const; 754 755 public: 756 /// This enumeration lists the possible modifications atomicrmw can make. In 757 /// the descriptions, 'p' is the pointer to the instruction's memory location, 758 /// 'old' is the initial value of *p, and 'v' is the other value passed to the 759 /// instruction. These instructions always return 'old'. 760 enum BinOp : unsigned { 761 /// *p = v 762 Xchg, 763 /// *p = old + v 764 Add, 765 /// *p = old - v 766 Sub, 767 /// *p = old & v 768 And, 769 /// *p = ~(old & v) 770 Nand, 771 /// *p = old | v 772 Or, 773 /// *p = old ^ v 774 Xor, 775 /// *p = old >signed v ? old : v 776 Max, 777 /// *p = old <signed v ? old : v 778 Min, 779 /// *p = old >unsigned v ? old : v 780 UMax, 781 /// *p = old <unsigned v ? old : v 782 UMin, 783 784 /// *p = old + v 785 FAdd, 786 787 /// *p = old - v 788 FSub, 789 790 /// *p = maxnum(old, v) 791 /// \p maxnum matches the behavior of \p llvm.maxnum.*. 792 FMax, 793 794 /// *p = minnum(old, v) 795 /// \p minnum matches the behavior of \p llvm.minnum.*. 796 FMin, 797 798 /// Increment one up to a maximum value. 799 /// *p = (old u>= v) ? 0 : (old + 1) 800 UIncWrap, 801 802 /// Decrement one until a minimum value or zero. 803 /// *p = ((old == 0) || (old u> v)) ? v : (old - 1) 804 UDecWrap, 805 806 FIRST_BINOP = Xchg, 807 LAST_BINOP = UDecWrap, 808 BAD_BINOP 809 }; 810 811 private: 812 template <unsigned Offset> 813 using AtomicOrderingBitfieldElement = 814 typename Bitfield::Element<AtomicOrdering, Offset, 3, 815 AtomicOrdering::LAST>; 816 817 template <unsigned Offset> 818 using BinOpBitfieldElement = 819 typename Bitfield::Element<BinOp, Offset, 5, BinOp::LAST_BINOP>; 820 821 public: 822 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, 823 AtomicOrdering Ordering, SyncScope::ID SSID, 824 BasicBlock::iterator InsertBefore); 825 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, 826 AtomicOrdering Ordering, SyncScope::ID SSID, 827 Instruction *InsertBefore = nullptr); 828 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, 829 AtomicOrdering Ordering, SyncScope::ID SSID, 830 BasicBlock *InsertAtEnd); 831 832 // allocate space for exactly two operands 833 void *operator new(size_t S) { return User::operator new(S, 2); } 834 void operator delete(void *Ptr) { User::operator delete(Ptr); } 835 836 using VolatileField = BoolBitfieldElementT<0>; 837 using AtomicOrderingField = 838 AtomicOrderingBitfieldElementT<VolatileField::NextBit>; 839 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>; 840 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>; 841 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField, 842 OperationField, AlignmentField>(), 843 "Bitfields must be contiguous"); 844 845 BinOp getOperation() const { return getSubclassData<OperationField>(); } 846 847 static StringRef getOperationName(BinOp Op); 848 849 static bool isFPOperation(BinOp Op) { 850 switch (Op) { 851 case AtomicRMWInst::FAdd: 852 case AtomicRMWInst::FSub: 853 case AtomicRMWInst::FMax: 854 case AtomicRMWInst::FMin: 855 return true; 856 default: 857 return false; 858 } 859 } 860 861 void setOperation(BinOp Operation) { 862 setSubclassData<OperationField>(Operation); 863 } 864 865 /// Return the alignment of the memory that is being allocated by the 866 /// instruction. 867 Align getAlign() const { 868 return Align(1ULL << getSubclassData<AlignmentField>()); 869 } 870 871 void setAlignment(Align Align) { 872 setSubclassData<AlignmentField>(Log2(Align)); 873 } 874 875 /// Return true if this is a RMW on a volatile memory location. 876 /// 877 bool isVolatile() const { return getSubclassData<VolatileField>(); } 878 879 /// Specify whether this is a volatile RMW or not. 880 /// 881 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 882 883 /// Transparently provide more efficient getOperand methods. 884 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 885 886 /// Returns the ordering constraint of this rmw instruction. 887 AtomicOrdering getOrdering() const { 888 return getSubclassData<AtomicOrderingField>(); 889 } 890 891 /// Sets the ordering constraint of this rmw instruction. 892 void setOrdering(AtomicOrdering Ordering) { 893 assert(Ordering != AtomicOrdering::NotAtomic && 894 "atomicrmw instructions can only be atomic."); 895 assert(Ordering != AtomicOrdering::Unordered && 896 "atomicrmw instructions cannot be unordered."); 897 setSubclassData<AtomicOrderingField>(Ordering); 898 } 899 900 /// Returns the synchronization scope ID of this rmw instruction. 901 SyncScope::ID getSyncScopeID() const { 902 return SSID; 903 } 904 905 /// Sets the synchronization scope ID of this rmw instruction. 906 void setSyncScopeID(SyncScope::ID SSID) { 907 this->SSID = SSID; 908 } 909 910 Value *getPointerOperand() { return getOperand(0); } 911 const Value *getPointerOperand() const { return getOperand(0); } 912 static unsigned getPointerOperandIndex() { return 0U; } 913 914 Value *getValOperand() { return getOperand(1); } 915 const Value *getValOperand() const { return getOperand(1); } 916 917 /// Returns the address space of the pointer operand. 918 unsigned getPointerAddressSpace() const { 919 return getPointerOperand()->getType()->getPointerAddressSpace(); 920 } 921 922 bool isFloatingPointOperation() const { 923 return isFPOperation(getOperation()); 924 } 925 926 // Methods for support type inquiry through isa, cast, and dyn_cast: 927 static bool classof(const Instruction *I) { 928 return I->getOpcode() == Instruction::AtomicRMW; 929 } 930 static bool classof(const Value *V) { 931 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 932 } 933 934 private: 935 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align, 936 AtomicOrdering Ordering, SyncScope::ID SSID); 937 938 // Shadow Instruction::setInstructionSubclassData with a private forwarding 939 // method so that subclasses cannot accidentally use it. 940 template <typename Bitfield> 941 void setSubclassData(typename Bitfield::Type Value) { 942 Instruction::setSubclassData<Bitfield>(Value); 943 } 944 945 /// The synchronization scope ID of this rmw instruction. Not quite enough 946 /// room in SubClassData for everything, so synchronization scope ID gets its 947 /// own field. 948 SyncScope::ID SSID; 949 }; 950 951 template <> 952 struct OperandTraits<AtomicRMWInst> 953 : public FixedNumOperandTraits<AtomicRMWInst,2> { 954 }; 955 956 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value) 957 958 //===----------------------------------------------------------------------===// 959 // GetElementPtrInst Class 960 //===----------------------------------------------------------------------===// 961 962 // checkGEPType - Simple wrapper function to give a better assertion failure 963 // message on bad indexes for a gep instruction. 964 // 965 inline Type *checkGEPType(Type *Ty) { 966 assert(Ty && "Invalid GetElementPtrInst indices for type!"); 967 return Ty; 968 } 969 970 /// an instruction for type-safe pointer arithmetic to 971 /// access elements of arrays and structs 972 /// 973 class GetElementPtrInst : public Instruction { 974 Type *SourceElementType; 975 Type *ResultElementType; 976 977 GetElementPtrInst(const GetElementPtrInst &GEPI); 978 979 /// Constructors - Create a getelementptr instruction with a base pointer an 980 /// list of indices. The first and second ctor can optionally insert before an 981 /// existing instruction, the third appends the new instruction to the 982 /// specified BasicBlock. 983 inline GetElementPtrInst(Type *PointeeType, Value *Ptr, 984 ArrayRef<Value *> IdxList, unsigned Values, 985 const Twine &NameStr, 986 BasicBlock::iterator InsertBefore); 987 inline GetElementPtrInst(Type *PointeeType, Value *Ptr, 988 ArrayRef<Value *> IdxList, unsigned Values, 989 const Twine &NameStr, Instruction *InsertBefore); 990 inline GetElementPtrInst(Type *PointeeType, Value *Ptr, 991 ArrayRef<Value *> IdxList, unsigned Values, 992 const Twine &NameStr, BasicBlock *InsertAtEnd); 993 994 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr); 995 996 protected: 997 // Note: Instruction needs to be a friend here to call cloneImpl. 998 friend class Instruction; 999 1000 GetElementPtrInst *cloneImpl() const; 1001 1002 public: 1003 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, 1004 ArrayRef<Value *> IdxList, 1005 const Twine &NameStr, 1006 BasicBlock::iterator InsertBefore) { 1007 unsigned Values = 1 + unsigned(IdxList.size()); 1008 assert(PointeeType && "Must specify element type"); 1009 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, 1010 NameStr, InsertBefore); 1011 } 1012 1013 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, 1014 ArrayRef<Value *> IdxList, 1015 const Twine &NameStr = "", 1016 Instruction *InsertBefore = nullptr) { 1017 unsigned Values = 1 + unsigned(IdxList.size()); 1018 assert(PointeeType && "Must specify element type"); 1019 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, 1020 NameStr, InsertBefore); 1021 } 1022 1023 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, 1024 ArrayRef<Value *> IdxList, 1025 const Twine &NameStr, 1026 BasicBlock *InsertAtEnd) { 1027 unsigned Values = 1 + unsigned(IdxList.size()); 1028 assert(PointeeType && "Must specify element type"); 1029 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, 1030 NameStr, InsertAtEnd); 1031 } 1032 1033 /// Create an "inbounds" getelementptr. See the documentation for the 1034 /// "inbounds" flag in LangRef.html for details. 1035 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr, 1036 ArrayRef<Value *> IdxList, 1037 const Twine &NameStr, 1038 BasicBlock::iterator InsertBefore) { 1039 GetElementPtrInst *GEP = 1040 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore); 1041 GEP->setIsInBounds(true); 1042 return GEP; 1043 } 1044 1045 static GetElementPtrInst * 1046 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, 1047 const Twine &NameStr = "", 1048 Instruction *InsertBefore = nullptr) { 1049 GetElementPtrInst *GEP = 1050 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore); 1051 GEP->setIsInBounds(true); 1052 return GEP; 1053 } 1054 1055 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr, 1056 ArrayRef<Value *> IdxList, 1057 const Twine &NameStr, 1058 BasicBlock *InsertAtEnd) { 1059 GetElementPtrInst *GEP = 1060 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd); 1061 GEP->setIsInBounds(true); 1062 return GEP; 1063 } 1064 1065 /// Transparently provide more efficient getOperand methods. 1066 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1067 1068 Type *getSourceElementType() const { return SourceElementType; } 1069 1070 void setSourceElementType(Type *Ty) { SourceElementType = Ty; } 1071 void setResultElementType(Type *Ty) { ResultElementType = Ty; } 1072 1073 Type *getResultElementType() const { 1074 return ResultElementType; 1075 } 1076 1077 /// Returns the address space of this instruction's pointer type. 1078 unsigned getAddressSpace() const { 1079 // Note that this is always the same as the pointer operand's address space 1080 // and that is cheaper to compute, so cheat here. 1081 return getPointerAddressSpace(); 1082 } 1083 1084 /// Returns the result type of a getelementptr with the given source 1085 /// element type and indexes. 1086 /// 1087 /// Null is returned if the indices are invalid for the specified 1088 /// source element type. 1089 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList); 1090 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList); 1091 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList); 1092 1093 /// Return the type of the element at the given index of an indexable 1094 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})". 1095 /// 1096 /// Returns null if the type can't be indexed, or the given index is not 1097 /// legal for the given type. 1098 static Type *getTypeAtIndex(Type *Ty, Value *Idx); 1099 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx); 1100 1101 inline op_iterator idx_begin() { return op_begin()+1; } 1102 inline const_op_iterator idx_begin() const { return op_begin()+1; } 1103 inline op_iterator idx_end() { return op_end(); } 1104 inline const_op_iterator idx_end() const { return op_end(); } 1105 1106 inline iterator_range<op_iterator> indices() { 1107 return make_range(idx_begin(), idx_end()); 1108 } 1109 1110 inline iterator_range<const_op_iterator> indices() const { 1111 return make_range(idx_begin(), idx_end()); 1112 } 1113 1114 Value *getPointerOperand() { 1115 return getOperand(0); 1116 } 1117 const Value *getPointerOperand() const { 1118 return getOperand(0); 1119 } 1120 static unsigned getPointerOperandIndex() { 1121 return 0U; // get index for modifying correct operand. 1122 } 1123 1124 /// Method to return the pointer operand as a 1125 /// PointerType. 1126 Type *getPointerOperandType() const { 1127 return getPointerOperand()->getType(); 1128 } 1129 1130 /// Returns the address space of the pointer operand. 1131 unsigned getPointerAddressSpace() const { 1132 return getPointerOperandType()->getPointerAddressSpace(); 1133 } 1134 1135 /// Returns the pointer type returned by the GEP 1136 /// instruction, which may be a vector of pointers. 1137 static Type *getGEPReturnType(Value *Ptr, ArrayRef<Value *> IdxList) { 1138 // Vector GEP 1139 Type *Ty = Ptr->getType(); 1140 if (Ty->isVectorTy()) 1141 return Ty; 1142 1143 for (Value *Index : IdxList) 1144 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) { 1145 ElementCount EltCount = IndexVTy->getElementCount(); 1146 return VectorType::get(Ty, EltCount); 1147 } 1148 // Scalar GEP 1149 return Ty; 1150 } 1151 1152 unsigned getNumIndices() const { // Note: always non-negative 1153 return getNumOperands() - 1; 1154 } 1155 1156 bool hasIndices() const { 1157 return getNumOperands() > 1; 1158 } 1159 1160 /// Return true if all of the indices of this GEP are 1161 /// zeros. If so, the result pointer and the first operand have the same 1162 /// value, just potentially different types. 1163 bool hasAllZeroIndices() const; 1164 1165 /// Return true if all of the indices of this GEP are 1166 /// constant integers. If so, the result pointer and the first operand have 1167 /// a constant offset between them. 1168 bool hasAllConstantIndices() const; 1169 1170 /// Set or clear the inbounds flag on this GEP instruction. 1171 /// See LangRef.html for the meaning of inbounds on a getelementptr. 1172 void setIsInBounds(bool b = true); 1173 1174 /// Determine whether the GEP has the inbounds flag. 1175 bool isInBounds() const; 1176 1177 /// Accumulate the constant address offset of this GEP if possible. 1178 /// 1179 /// This routine accepts an APInt into which it will accumulate the constant 1180 /// offset of this GEP if the GEP is in fact constant. If the GEP is not 1181 /// all-constant, it returns false and the value of the offset APInt is 1182 /// undefined (it is *not* preserved!). The APInt passed into this routine 1183 /// must be at least as wide as the IntPtr type for the address space of 1184 /// the base GEP pointer. 1185 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const; 1186 bool collectOffset(const DataLayout &DL, unsigned BitWidth, 1187 MapVector<Value *, APInt> &VariableOffsets, 1188 APInt &ConstantOffset) const; 1189 // Methods for support type inquiry through isa, cast, and dyn_cast: 1190 static bool classof(const Instruction *I) { 1191 return (I->getOpcode() == Instruction::GetElementPtr); 1192 } 1193 static bool classof(const Value *V) { 1194 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1195 } 1196 }; 1197 1198 template <> 1199 struct OperandTraits<GetElementPtrInst> : 1200 public VariadicOperandTraits<GetElementPtrInst, 1> { 1201 }; 1202 1203 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, 1204 ArrayRef<Value *> IdxList, unsigned Values, 1205 const Twine &NameStr, 1206 BasicBlock::iterator InsertBefore) 1207 : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr, 1208 OperandTraits<GetElementPtrInst>::op_end(this) - Values, 1209 Values, InsertBefore), 1210 SourceElementType(PointeeType), 1211 ResultElementType(getIndexedType(PointeeType, IdxList)) { 1212 init(Ptr, IdxList, NameStr); 1213 } 1214 1215 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, 1216 ArrayRef<Value *> IdxList, unsigned Values, 1217 const Twine &NameStr, 1218 Instruction *InsertBefore) 1219 : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr, 1220 OperandTraits<GetElementPtrInst>::op_end(this) - Values, 1221 Values, InsertBefore), 1222 SourceElementType(PointeeType), 1223 ResultElementType(getIndexedType(PointeeType, IdxList)) { 1224 init(Ptr, IdxList, NameStr); 1225 } 1226 1227 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, 1228 ArrayRef<Value *> IdxList, unsigned Values, 1229 const Twine &NameStr, 1230 BasicBlock *InsertAtEnd) 1231 : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr, 1232 OperandTraits<GetElementPtrInst>::op_end(this) - Values, 1233 Values, InsertAtEnd), 1234 SourceElementType(PointeeType), 1235 ResultElementType(getIndexedType(PointeeType, IdxList)) { 1236 init(Ptr, IdxList, NameStr); 1237 } 1238 1239 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value) 1240 1241 //===----------------------------------------------------------------------===// 1242 // ICmpInst Class 1243 //===----------------------------------------------------------------------===// 1244 1245 /// This instruction compares its operands according to the predicate given 1246 /// to the constructor. It only operates on integers or pointers. The operands 1247 /// must be identical types. 1248 /// Represent an integer comparison operator. 1249 class ICmpInst: public CmpInst { 1250 void AssertOK() { 1251 assert(isIntPredicate() && 1252 "Invalid ICmp predicate value"); 1253 assert(getOperand(0)->getType() == getOperand(1)->getType() && 1254 "Both operands to ICmp instruction are not of the same type!"); 1255 // Check that the operands are the right type 1256 assert((getOperand(0)->getType()->isIntOrIntVectorTy() || 1257 getOperand(0)->getType()->isPtrOrPtrVectorTy()) && 1258 "Invalid operand types for ICmp instruction"); 1259 } 1260 1261 protected: 1262 // Note: Instruction needs to be a friend here to call cloneImpl. 1263 friend class Instruction; 1264 1265 /// Clone an identical ICmpInst 1266 ICmpInst *cloneImpl() const; 1267 1268 public: 1269 /// Constructor with insert-before-instruction semantics. 1270 ICmpInst( 1271 BasicBlock::iterator InsertBefore, ///< Where to insert 1272 Predicate pred, ///< The predicate to use for the comparison 1273 Value *LHS, ///< The left-hand-side of the expression 1274 Value *RHS, ///< The right-hand-side of the expression 1275 const Twine &NameStr = "" ///< Name of the instruction 1276 ) : CmpInst(makeCmpResultType(LHS->getType()), 1277 Instruction::ICmp, pred, LHS, RHS, NameStr, 1278 InsertBefore) { 1279 #ifndef NDEBUG 1280 AssertOK(); 1281 #endif 1282 } 1283 1284 /// Constructor with insert-before-instruction semantics. 1285 ICmpInst( 1286 Instruction *InsertBefore, ///< Where to insert 1287 Predicate pred, ///< The predicate to use for the comparison 1288 Value *LHS, ///< The left-hand-side of the expression 1289 Value *RHS, ///< The right-hand-side of the expression 1290 const Twine &NameStr = "" ///< Name of the instruction 1291 ) : CmpInst(makeCmpResultType(LHS->getType()), 1292 Instruction::ICmp, pred, LHS, RHS, NameStr, 1293 InsertBefore) { 1294 #ifndef NDEBUG 1295 AssertOK(); 1296 #endif 1297 } 1298 1299 /// Constructor with insert-at-end semantics. 1300 ICmpInst( 1301 BasicBlock *InsertAtEnd, ///< Block to insert into. 1302 Predicate pred, ///< The predicate to use for the comparison 1303 Value *LHS, ///< The left-hand-side of the expression 1304 Value *RHS, ///< The right-hand-side of the expression 1305 const Twine &NameStr = "" ///< Name of the instruction 1306 ) : CmpInst(makeCmpResultType(LHS->getType()), 1307 Instruction::ICmp, pred, LHS, RHS, NameStr, 1308 InsertAtEnd) { 1309 #ifndef NDEBUG 1310 AssertOK(); 1311 #endif 1312 } 1313 1314 /// Constructor with no-insertion semantics 1315 ICmpInst( 1316 Predicate pred, ///< The predicate to use for the comparison 1317 Value *LHS, ///< The left-hand-side of the expression 1318 Value *RHS, ///< The right-hand-side of the expression 1319 const Twine &NameStr = "" ///< Name of the instruction 1320 ) : CmpInst(makeCmpResultType(LHS->getType()), 1321 Instruction::ICmp, pred, LHS, RHS, NameStr) { 1322 #ifndef NDEBUG 1323 AssertOK(); 1324 #endif 1325 } 1326 1327 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc. 1328 /// @returns the predicate that would be the result if the operand were 1329 /// regarded as signed. 1330 /// Return the signed version of the predicate 1331 Predicate getSignedPredicate() const { 1332 return getSignedPredicate(getPredicate()); 1333 } 1334 1335 /// This is a static version that you can use without an instruction. 1336 /// Return the signed version of the predicate. 1337 static Predicate getSignedPredicate(Predicate pred); 1338 1339 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc. 1340 /// @returns the predicate that would be the result if the operand were 1341 /// regarded as unsigned. 1342 /// Return the unsigned version of the predicate 1343 Predicate getUnsignedPredicate() const { 1344 return getUnsignedPredicate(getPredicate()); 1345 } 1346 1347 /// This is a static version that you can use without an instruction. 1348 /// Return the unsigned version of the predicate. 1349 static Predicate getUnsignedPredicate(Predicate pred); 1350 1351 /// Return true if this predicate is either EQ or NE. This also 1352 /// tests for commutativity. 1353 static bool isEquality(Predicate P) { 1354 return P == ICMP_EQ || P == ICMP_NE; 1355 } 1356 1357 /// Return true if this predicate is either EQ or NE. This also 1358 /// tests for commutativity. 1359 bool isEquality() const { 1360 return isEquality(getPredicate()); 1361 } 1362 1363 /// @returns true if the predicate of this ICmpInst is commutative 1364 /// Determine if this relation is commutative. 1365 bool isCommutative() const { return isEquality(); } 1366 1367 /// Return true if the predicate is relational (not EQ or NE). 1368 /// 1369 bool isRelational() const { 1370 return !isEquality(); 1371 } 1372 1373 /// Return true if the predicate is relational (not EQ or NE). 1374 /// 1375 static bool isRelational(Predicate P) { 1376 return !isEquality(P); 1377 } 1378 1379 /// Return true if the predicate is SGT or UGT. 1380 /// 1381 static bool isGT(Predicate P) { 1382 return P == ICMP_SGT || P == ICMP_UGT; 1383 } 1384 1385 /// Return true if the predicate is SLT or ULT. 1386 /// 1387 static bool isLT(Predicate P) { 1388 return P == ICMP_SLT || P == ICMP_ULT; 1389 } 1390 1391 /// Return true if the predicate is SGE or UGE. 1392 /// 1393 static bool isGE(Predicate P) { 1394 return P == ICMP_SGE || P == ICMP_UGE; 1395 } 1396 1397 /// Return true if the predicate is SLE or ULE. 1398 /// 1399 static bool isLE(Predicate P) { 1400 return P == ICMP_SLE || P == ICMP_ULE; 1401 } 1402 1403 /// Returns the sequence of all ICmp predicates. 1404 /// 1405 static auto predicates() { return ICmpPredicates(); } 1406 1407 /// Exchange the two operands to this instruction in such a way that it does 1408 /// not modify the semantics of the instruction. The predicate value may be 1409 /// changed to retain the same result if the predicate is order dependent 1410 /// (e.g. ult). 1411 /// Swap operands and adjust predicate. 1412 void swapOperands() { 1413 setPredicate(getSwappedPredicate()); 1414 Op<0>().swap(Op<1>()); 1415 } 1416 1417 /// Return result of `LHS Pred RHS` comparison. 1418 static bool compare(const APInt &LHS, const APInt &RHS, 1419 ICmpInst::Predicate Pred); 1420 1421 // Methods for support type inquiry through isa, cast, and dyn_cast: 1422 static bool classof(const Instruction *I) { 1423 return I->getOpcode() == Instruction::ICmp; 1424 } 1425 static bool classof(const Value *V) { 1426 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1427 } 1428 }; 1429 1430 //===----------------------------------------------------------------------===// 1431 // FCmpInst Class 1432 //===----------------------------------------------------------------------===// 1433 1434 /// This instruction compares its operands according to the predicate given 1435 /// to the constructor. It only operates on floating point values or packed 1436 /// vectors of floating point values. The operands must be identical types. 1437 /// Represents a floating point comparison operator. 1438 class FCmpInst: public CmpInst { 1439 void AssertOK() { 1440 assert(isFPPredicate() && "Invalid FCmp predicate value"); 1441 assert(getOperand(0)->getType() == getOperand(1)->getType() && 1442 "Both operands to FCmp instruction are not of the same type!"); 1443 // Check that the operands are the right type 1444 assert(getOperand(0)->getType()->isFPOrFPVectorTy() && 1445 "Invalid operand types for FCmp instruction"); 1446 } 1447 1448 protected: 1449 // Note: Instruction needs to be a friend here to call cloneImpl. 1450 friend class Instruction; 1451 1452 /// Clone an identical FCmpInst 1453 FCmpInst *cloneImpl() const; 1454 1455 public: 1456 /// Constructor with insert-before-instruction semantics. 1457 FCmpInst( 1458 BasicBlock::iterator InsertBefore, ///< Where to insert 1459 Predicate pred, ///< The predicate to use for the comparison 1460 Value *LHS, ///< The left-hand-side of the expression 1461 Value *RHS, ///< The right-hand-side of the expression 1462 const Twine &NameStr = "" ///< Name of the instruction 1463 ) : CmpInst(makeCmpResultType(LHS->getType()), 1464 Instruction::FCmp, pred, LHS, RHS, NameStr, 1465 InsertBefore) { 1466 AssertOK(); 1467 } 1468 1469 /// Constructor with insert-before-instruction semantics. 1470 FCmpInst( 1471 Instruction *InsertBefore, ///< Where to insert 1472 Predicate pred, ///< The predicate to use for the comparison 1473 Value *LHS, ///< The left-hand-side of the expression 1474 Value *RHS, ///< The right-hand-side of the expression 1475 const Twine &NameStr = "" ///< Name of the instruction 1476 ) : CmpInst(makeCmpResultType(LHS->getType()), 1477 Instruction::FCmp, pred, LHS, RHS, NameStr, 1478 InsertBefore) { 1479 AssertOK(); 1480 } 1481 1482 /// Constructor with insert-at-end semantics. 1483 FCmpInst( 1484 BasicBlock *InsertAtEnd, ///< Block to insert into. 1485 Predicate pred, ///< The predicate to use for the comparison 1486 Value *LHS, ///< The left-hand-side of the expression 1487 Value *RHS, ///< The right-hand-side of the expression 1488 const Twine &NameStr = "" ///< Name of the instruction 1489 ) : CmpInst(makeCmpResultType(LHS->getType()), 1490 Instruction::FCmp, pred, LHS, RHS, NameStr, 1491 InsertAtEnd) { 1492 AssertOK(); 1493 } 1494 1495 /// Constructor with no-insertion semantics 1496 FCmpInst( 1497 Predicate Pred, ///< The predicate to use for the comparison 1498 Value *LHS, ///< The left-hand-side of the expression 1499 Value *RHS, ///< The right-hand-side of the expression 1500 const Twine &NameStr = "", ///< Name of the instruction 1501 Instruction *FlagsSource = nullptr 1502 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS, 1503 RHS, NameStr, nullptr, FlagsSource) { 1504 AssertOK(); 1505 } 1506 1507 /// @returns true if the predicate of this instruction is EQ or NE. 1508 /// Determine if this is an equality predicate. 1509 static bool isEquality(Predicate Pred) { 1510 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ || 1511 Pred == FCMP_UNE; 1512 } 1513 1514 /// @returns true if the predicate of this instruction is EQ or NE. 1515 /// Determine if this is an equality predicate. 1516 bool isEquality() const { return isEquality(getPredicate()); } 1517 1518 /// @returns true if the predicate of this instruction is commutative. 1519 /// Determine if this is a commutative predicate. 1520 bool isCommutative() const { 1521 return isEquality() || 1522 getPredicate() == FCMP_FALSE || 1523 getPredicate() == FCMP_TRUE || 1524 getPredicate() == FCMP_ORD || 1525 getPredicate() == FCMP_UNO; 1526 } 1527 1528 /// @returns true if the predicate is relational (not EQ or NE). 1529 /// Determine if this a relational predicate. 1530 bool isRelational() const { return !isEquality(); } 1531 1532 /// Exchange the two operands to this instruction in such a way that it does 1533 /// not modify the semantics of the instruction. The predicate value may be 1534 /// changed to retain the same result if the predicate is order dependent 1535 /// (e.g. ult). 1536 /// Swap operands and adjust predicate. 1537 void swapOperands() { 1538 setPredicate(getSwappedPredicate()); 1539 Op<0>().swap(Op<1>()); 1540 } 1541 1542 /// Returns the sequence of all FCmp predicates. 1543 /// 1544 static auto predicates() { return FCmpPredicates(); } 1545 1546 /// Return result of `LHS Pred RHS` comparison. 1547 static bool compare(const APFloat &LHS, const APFloat &RHS, 1548 FCmpInst::Predicate Pred); 1549 1550 /// Methods for support type inquiry through isa, cast, and dyn_cast: 1551 static bool classof(const Instruction *I) { 1552 return I->getOpcode() == Instruction::FCmp; 1553 } 1554 static bool classof(const Value *V) { 1555 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1556 } 1557 }; 1558 1559 //===----------------------------------------------------------------------===// 1560 /// This class represents a function call, abstracting a target 1561 /// machine's calling convention. This class uses low bit of the SubClassData 1562 /// field to indicate whether or not this is a tail call. The rest of the bits 1563 /// hold the calling convention of the call. 1564 /// 1565 class CallInst : public CallBase { 1566 CallInst(const CallInst &CI); 1567 1568 /// Construct a CallInst from a range of arguments 1569 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1570 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1571 BasicBlock::iterator InsertBefore); 1572 1573 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1574 const Twine &NameStr, BasicBlock::iterator InsertBefore) 1575 : CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore) {} 1576 1577 /// Construct a CallInst given a range of arguments. 1578 /// Construct a CallInst from a range of arguments 1579 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1580 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1581 Instruction *InsertBefore); 1582 1583 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1584 const Twine &NameStr, Instruction *InsertBefore) 1585 : CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore) {} 1586 1587 /// Construct a CallInst given a range of arguments. 1588 /// Construct a CallInst from a range of arguments 1589 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1590 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1591 BasicBlock *InsertAtEnd); 1592 1593 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr, 1594 BasicBlock::iterator InsertBefore); 1595 1596 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr, 1597 Instruction *InsertBefore); 1598 1599 CallInst(FunctionType *ty, Value *F, const Twine &NameStr, 1600 BasicBlock *InsertAtEnd); 1601 1602 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, 1603 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 1604 void init(FunctionType *FTy, Value *Func, const Twine &NameStr); 1605 1606 /// Compute the number of operands to allocate. 1607 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { 1608 // We need one operand for the called function, plus the input operand 1609 // counts provided. 1610 return 1 + NumArgs + NumBundleInputs; 1611 } 1612 1613 protected: 1614 // Note: Instruction needs to be a friend here to call cloneImpl. 1615 friend class Instruction; 1616 1617 CallInst *cloneImpl() const; 1618 1619 public: 1620 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr, 1621 BasicBlock::iterator InsertBefore) { 1622 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore); 1623 } 1624 1625 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "", 1626 Instruction *InsertBefore = nullptr) { 1627 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore); 1628 } 1629 1630 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1631 const Twine &NameStr, 1632 BasicBlock::iterator InsertBefore) { 1633 return new (ComputeNumOperands(Args.size())) 1634 CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore); 1635 } 1636 1637 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1638 const Twine &NameStr, 1639 Instruction *InsertBefore = nullptr) { 1640 return new (ComputeNumOperands(Args.size())) 1641 CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore); 1642 } 1643 1644 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1645 ArrayRef<OperandBundleDef> Bundles, 1646 const Twine &NameStr, 1647 BasicBlock::iterator InsertBefore) { 1648 const int NumOperands = 1649 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 1650 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 1651 1652 return new (NumOperands, DescriptorBytes) 1653 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore); 1654 } 1655 1656 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1657 ArrayRef<OperandBundleDef> Bundles = std::nullopt, 1658 const Twine &NameStr = "", 1659 Instruction *InsertBefore = nullptr) { 1660 const int NumOperands = 1661 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 1662 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 1663 1664 return new (NumOperands, DescriptorBytes) 1665 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore); 1666 } 1667 1668 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr, 1669 BasicBlock *InsertAtEnd) { 1670 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd); 1671 } 1672 1673 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1674 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1675 return new (ComputeNumOperands(Args.size())) 1676 CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertAtEnd); 1677 } 1678 1679 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1680 ArrayRef<OperandBundleDef> Bundles, 1681 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1682 const int NumOperands = 1683 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 1684 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 1685 1686 return new (NumOperands, DescriptorBytes) 1687 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd); 1688 } 1689 1690 static CallInst *Create(FunctionCallee Func, const Twine &NameStr, 1691 BasicBlock::iterator InsertBefore) { 1692 return Create(Func.getFunctionType(), Func.getCallee(), NameStr, 1693 InsertBefore); 1694 } 1695 1696 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "", 1697 Instruction *InsertBefore = nullptr) { 1698 return Create(Func.getFunctionType(), Func.getCallee(), NameStr, 1699 InsertBefore); 1700 } 1701 1702 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1703 ArrayRef<OperandBundleDef> Bundles, 1704 const Twine &NameStr, 1705 BasicBlock::iterator InsertBefore) { 1706 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, 1707 NameStr, InsertBefore); 1708 } 1709 1710 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1711 ArrayRef<OperandBundleDef> Bundles = std::nullopt, 1712 const Twine &NameStr = "", 1713 Instruction *InsertBefore = nullptr) { 1714 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, 1715 NameStr, InsertBefore); 1716 } 1717 1718 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1719 const Twine &NameStr, 1720 BasicBlock::iterator InsertBefore) { 1721 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, 1722 InsertBefore); 1723 } 1724 1725 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1726 const Twine &NameStr, 1727 Instruction *InsertBefore = nullptr) { 1728 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, 1729 InsertBefore); 1730 } 1731 1732 static CallInst *Create(FunctionCallee Func, const Twine &NameStr, 1733 BasicBlock *InsertAtEnd) { 1734 return Create(Func.getFunctionType(), Func.getCallee(), NameStr, 1735 InsertAtEnd); 1736 } 1737 1738 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1739 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1740 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, 1741 InsertAtEnd); 1742 } 1743 1744 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1745 ArrayRef<OperandBundleDef> Bundles, 1746 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1747 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, 1748 NameStr, InsertAtEnd); 1749 } 1750 1751 /// Create a clone of \p CI with a different set of operand bundles and 1752 /// insert it before \p InsertPt. 1753 /// 1754 /// The returned call instruction is identical \p CI in every way except that 1755 /// the operand bundles for the new instruction are set to the operand bundles 1756 /// in \p Bundles. 1757 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles, 1758 BasicBlock::iterator InsertPt); 1759 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles, 1760 Instruction *InsertPt = nullptr); 1761 1762 // Note that 'musttail' implies 'tail'. 1763 enum TailCallKind : unsigned { 1764 TCK_None = 0, 1765 TCK_Tail = 1, 1766 TCK_MustTail = 2, 1767 TCK_NoTail = 3, 1768 TCK_LAST = TCK_NoTail 1769 }; 1770 1771 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>; 1772 static_assert( 1773 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(), 1774 "Bitfields must be contiguous"); 1775 1776 TailCallKind getTailCallKind() const { 1777 return getSubclassData<TailCallKindField>(); 1778 } 1779 1780 bool isTailCall() const { 1781 TailCallKind Kind = getTailCallKind(); 1782 return Kind == TCK_Tail || Kind == TCK_MustTail; 1783 } 1784 1785 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; } 1786 1787 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; } 1788 1789 void setTailCallKind(TailCallKind TCK) { 1790 setSubclassData<TailCallKindField>(TCK); 1791 } 1792 1793 void setTailCall(bool IsTc = true) { 1794 setTailCallKind(IsTc ? TCK_Tail : TCK_None); 1795 } 1796 1797 /// Return true if the call can return twice 1798 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); } 1799 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); } 1800 1801 // Methods for support type inquiry through isa, cast, and dyn_cast: 1802 static bool classof(const Instruction *I) { 1803 return I->getOpcode() == Instruction::Call; 1804 } 1805 static bool classof(const Value *V) { 1806 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1807 } 1808 1809 /// Updates profile metadata by scaling it by \p S / \p T. 1810 void updateProfWeight(uint64_t S, uint64_t T); 1811 1812 private: 1813 // Shadow Instruction::setInstructionSubclassData with a private forwarding 1814 // method so that subclasses cannot accidentally use it. 1815 template <typename Bitfield> 1816 void setSubclassData(typename Bitfield::Type Value) { 1817 Instruction::setSubclassData<Bitfield>(Value); 1818 } 1819 }; 1820 1821 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1822 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1823 BasicBlock *InsertAtEnd) 1824 : CallBase(Ty->getReturnType(), Instruction::Call, 1825 OperandTraits<CallBase>::op_end(this) - 1826 (Args.size() + CountBundleInputs(Bundles) + 1), 1827 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), 1828 InsertAtEnd) { 1829 init(Ty, Func, Args, Bundles, NameStr); 1830 } 1831 1832 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1833 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1834 BasicBlock::iterator InsertBefore) 1835 : CallBase(Ty->getReturnType(), Instruction::Call, 1836 OperandTraits<CallBase>::op_end(this) - 1837 (Args.size() + CountBundleInputs(Bundles) + 1), 1838 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), 1839 InsertBefore) { 1840 init(Ty, Func, Args, Bundles, NameStr); 1841 } 1842 1843 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1844 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1845 Instruction *InsertBefore) 1846 : CallBase(Ty->getReturnType(), Instruction::Call, 1847 OperandTraits<CallBase>::op_end(this) - 1848 (Args.size() + CountBundleInputs(Bundles) + 1), 1849 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), 1850 InsertBefore) { 1851 init(Ty, Func, Args, Bundles, NameStr); 1852 } 1853 1854 //===----------------------------------------------------------------------===// 1855 // SelectInst Class 1856 //===----------------------------------------------------------------------===// 1857 1858 /// This class represents the LLVM 'select' instruction. 1859 /// 1860 class SelectInst : public Instruction { 1861 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, 1862 BasicBlock::iterator InsertBefore) 1863 : Instruction(S1->getType(), Instruction::Select, &Op<0>(), 3, 1864 InsertBefore) { 1865 init(C, S1, S2); 1866 setName(NameStr); 1867 } 1868 1869 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, 1870 Instruction *InsertBefore) 1871 : Instruction(S1->getType(), Instruction::Select, 1872 &Op<0>(), 3, InsertBefore) { 1873 init(C, S1, S2); 1874 setName(NameStr); 1875 } 1876 1877 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, 1878 BasicBlock *InsertAtEnd) 1879 : Instruction(S1->getType(), Instruction::Select, 1880 &Op<0>(), 3, InsertAtEnd) { 1881 init(C, S1, S2); 1882 setName(NameStr); 1883 } 1884 1885 void init(Value *C, Value *S1, Value *S2) { 1886 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select"); 1887 Op<0>() = C; 1888 Op<1>() = S1; 1889 Op<2>() = S2; 1890 } 1891 1892 protected: 1893 // Note: Instruction needs to be a friend here to call cloneImpl. 1894 friend class Instruction; 1895 1896 SelectInst *cloneImpl() const; 1897 1898 public: 1899 static SelectInst *Create(Value *C, Value *S1, Value *S2, 1900 const Twine &NameStr, 1901 BasicBlock::iterator InsertBefore, 1902 Instruction *MDFrom = nullptr) { 1903 SelectInst *Sel = new (3) SelectInst(C, S1, S2, NameStr, InsertBefore); 1904 if (MDFrom) 1905 Sel->copyMetadata(*MDFrom); 1906 return Sel; 1907 } 1908 1909 static SelectInst *Create(Value *C, Value *S1, Value *S2, 1910 const Twine &NameStr = "", 1911 Instruction *InsertBefore = nullptr, 1912 Instruction *MDFrom = nullptr) { 1913 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore); 1914 if (MDFrom) 1915 Sel->copyMetadata(*MDFrom); 1916 return Sel; 1917 } 1918 1919 static SelectInst *Create(Value *C, Value *S1, Value *S2, 1920 const Twine &NameStr, 1921 BasicBlock *InsertAtEnd) { 1922 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd); 1923 } 1924 1925 const Value *getCondition() const { return Op<0>(); } 1926 const Value *getTrueValue() const { return Op<1>(); } 1927 const Value *getFalseValue() const { return Op<2>(); } 1928 Value *getCondition() { return Op<0>(); } 1929 Value *getTrueValue() { return Op<1>(); } 1930 Value *getFalseValue() { return Op<2>(); } 1931 1932 void setCondition(Value *V) { Op<0>() = V; } 1933 void setTrueValue(Value *V) { Op<1>() = V; } 1934 void setFalseValue(Value *V) { Op<2>() = V; } 1935 1936 /// Swap the true and false values of the select instruction. 1937 /// This doesn't swap prof metadata. 1938 void swapValues() { Op<1>().swap(Op<2>()); } 1939 1940 /// Return a string if the specified operands are invalid 1941 /// for a select operation, otherwise return null. 1942 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False); 1943 1944 /// Transparently provide more efficient getOperand methods. 1945 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1946 1947 OtherOps getOpcode() const { 1948 return static_cast<OtherOps>(Instruction::getOpcode()); 1949 } 1950 1951 // Methods for support type inquiry through isa, cast, and dyn_cast: 1952 static bool classof(const Instruction *I) { 1953 return I->getOpcode() == Instruction::Select; 1954 } 1955 static bool classof(const Value *V) { 1956 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1957 } 1958 }; 1959 1960 template <> 1961 struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> { 1962 }; 1963 1964 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value) 1965 1966 //===----------------------------------------------------------------------===// 1967 // VAArgInst Class 1968 //===----------------------------------------------------------------------===// 1969 1970 /// This class represents the va_arg llvm instruction, which returns 1971 /// an argument of the specified type given a va_list and increments that list 1972 /// 1973 class VAArgInst : public UnaryInstruction { 1974 protected: 1975 // Note: Instruction needs to be a friend here to call cloneImpl. 1976 friend class Instruction; 1977 1978 VAArgInst *cloneImpl() const; 1979 1980 public: 1981 VAArgInst(Value *List, Type *Ty, const Twine &NameStr, 1982 BasicBlock::iterator InsertBefore) 1983 : UnaryInstruction(Ty, VAArg, List, InsertBefore) { 1984 setName(NameStr); 1985 } 1986 1987 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "", 1988 Instruction *InsertBefore = nullptr) 1989 : UnaryInstruction(Ty, VAArg, List, InsertBefore) { 1990 setName(NameStr); 1991 } 1992 1993 VAArgInst(Value *List, Type *Ty, const Twine &NameStr, 1994 BasicBlock *InsertAtEnd) 1995 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) { 1996 setName(NameStr); 1997 } 1998 1999 Value *getPointerOperand() { return getOperand(0); } 2000 const Value *getPointerOperand() const { return getOperand(0); } 2001 static unsigned getPointerOperandIndex() { return 0U; } 2002 2003 // Methods for support type inquiry through isa, cast, and dyn_cast: 2004 static bool classof(const Instruction *I) { 2005 return I->getOpcode() == VAArg; 2006 } 2007 static bool classof(const Value *V) { 2008 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2009 } 2010 }; 2011 2012 //===----------------------------------------------------------------------===// 2013 // ExtractElementInst Class 2014 //===----------------------------------------------------------------------===// 2015 2016 /// This instruction extracts a single (scalar) 2017 /// element from a VectorType value 2018 /// 2019 class ExtractElementInst : public Instruction { 2020 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr, 2021 BasicBlock::iterator InsertBefore); 2022 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "", 2023 Instruction *InsertBefore = nullptr); 2024 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr, 2025 BasicBlock *InsertAtEnd); 2026 2027 protected: 2028 // Note: Instruction needs to be a friend here to call cloneImpl. 2029 friend class Instruction; 2030 2031 ExtractElementInst *cloneImpl() const; 2032 2033 public: 2034 static ExtractElementInst *Create(Value *Vec, Value *Idx, 2035 const Twine &NameStr, 2036 BasicBlock::iterator InsertBefore) { 2037 return new (2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore); 2038 } 2039 2040 static ExtractElementInst *Create(Value *Vec, Value *Idx, 2041 const Twine &NameStr = "", 2042 Instruction *InsertBefore = nullptr) { 2043 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore); 2044 } 2045 2046 static ExtractElementInst *Create(Value *Vec, Value *Idx, 2047 const Twine &NameStr, 2048 BasicBlock *InsertAtEnd) { 2049 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd); 2050 } 2051 2052 /// Return true if an extractelement instruction can be 2053 /// formed with the specified operands. 2054 static bool isValidOperands(const Value *Vec, const Value *Idx); 2055 2056 Value *getVectorOperand() { return Op<0>(); } 2057 Value *getIndexOperand() { return Op<1>(); } 2058 const Value *getVectorOperand() const { return Op<0>(); } 2059 const Value *getIndexOperand() const { return Op<1>(); } 2060 2061 VectorType *getVectorOperandType() const { 2062 return cast<VectorType>(getVectorOperand()->getType()); 2063 } 2064 2065 /// Transparently provide more efficient getOperand methods. 2066 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2067 2068 // Methods for support type inquiry through isa, cast, and dyn_cast: 2069 static bool classof(const Instruction *I) { 2070 return I->getOpcode() == Instruction::ExtractElement; 2071 } 2072 static bool classof(const Value *V) { 2073 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2074 } 2075 }; 2076 2077 template <> 2078 struct OperandTraits<ExtractElementInst> : 2079 public FixedNumOperandTraits<ExtractElementInst, 2> { 2080 }; 2081 2082 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value) 2083 2084 //===----------------------------------------------------------------------===// 2085 // InsertElementInst Class 2086 //===----------------------------------------------------------------------===// 2087 2088 /// This instruction inserts a single (scalar) 2089 /// element into a VectorType value 2090 /// 2091 class InsertElementInst : public Instruction { 2092 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, 2093 BasicBlock::iterator InsertBefore); 2094 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, 2095 const Twine &NameStr = "", 2096 Instruction *InsertBefore = nullptr); 2097 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, 2098 BasicBlock *InsertAtEnd); 2099 2100 protected: 2101 // Note: Instruction needs to be a friend here to call cloneImpl. 2102 friend class Instruction; 2103 2104 InsertElementInst *cloneImpl() const; 2105 2106 public: 2107 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, 2108 const Twine &NameStr, 2109 BasicBlock::iterator InsertBefore) { 2110 return new (3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore); 2111 } 2112 2113 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, 2114 const Twine &NameStr = "", 2115 Instruction *InsertBefore = nullptr) { 2116 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore); 2117 } 2118 2119 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, 2120 const Twine &NameStr, 2121 BasicBlock *InsertAtEnd) { 2122 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd); 2123 } 2124 2125 /// Return true if an insertelement instruction can be 2126 /// formed with the specified operands. 2127 static bool isValidOperands(const Value *Vec, const Value *NewElt, 2128 const Value *Idx); 2129 2130 /// Overload to return most specific vector type. 2131 /// 2132 VectorType *getType() const { 2133 return cast<VectorType>(Instruction::getType()); 2134 } 2135 2136 /// Transparently provide more efficient getOperand methods. 2137 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2138 2139 // Methods for support type inquiry through isa, cast, and dyn_cast: 2140 static bool classof(const Instruction *I) { 2141 return I->getOpcode() == Instruction::InsertElement; 2142 } 2143 static bool classof(const Value *V) { 2144 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2145 } 2146 }; 2147 2148 template <> 2149 struct OperandTraits<InsertElementInst> : 2150 public FixedNumOperandTraits<InsertElementInst, 3> { 2151 }; 2152 2153 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value) 2154 2155 //===----------------------------------------------------------------------===// 2156 // ShuffleVectorInst Class 2157 //===----------------------------------------------------------------------===// 2158 2159 constexpr int PoisonMaskElem = -1; 2160 2161 /// This instruction constructs a fixed permutation of two 2162 /// input vectors. 2163 /// 2164 /// For each element of the result vector, the shuffle mask selects an element 2165 /// from one of the input vectors to copy to the result. Non-negative elements 2166 /// in the mask represent an index into the concatenated pair of input vectors. 2167 /// PoisonMaskElem (-1) specifies that the result element is poison. 2168 /// 2169 /// For scalable vectors, all the elements of the mask must be 0 or -1. This 2170 /// requirement may be relaxed in the future. 2171 class ShuffleVectorInst : public Instruction { 2172 SmallVector<int, 4> ShuffleMask; 2173 Constant *ShuffleMaskForBitcode; 2174 2175 protected: 2176 // Note: Instruction needs to be a friend here to call cloneImpl. 2177 friend class Instruction; 2178 2179 ShuffleVectorInst *cloneImpl() const; 2180 2181 public: 2182 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr, 2183 BasicBlock::iterator InsertBefore); 2184 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "", 2185 Instruction *InsertBefore = nullptr); 2186 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr, 2187 BasicBlock *InsertAtEnd); 2188 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr, 2189 BasicBlock::iterator InsertBefore); 2190 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "", 2191 Instruction *InsertBefore = nullptr); 2192 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr, 2193 BasicBlock *InsertAtEnd); 2194 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, const Twine &NameStr, 2195 BasicBlock::iterator InsertBefor); 2196 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 2197 const Twine &NameStr = "", 2198 Instruction *InsertBefor = nullptr); 2199 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 2200 const Twine &NameStr, BasicBlock *InsertAtEnd); 2201 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, 2202 const Twine &NameStr, BasicBlock::iterator InsertBefor); 2203 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, 2204 const Twine &NameStr = "", 2205 Instruction *InsertBefor = nullptr); 2206 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, 2207 const Twine &NameStr, BasicBlock *InsertAtEnd); 2208 2209 void *operator new(size_t S) { return User::operator new(S, 2); } 2210 void operator delete(void *Ptr) { return User::operator delete(Ptr); } 2211 2212 /// Swap the operands and adjust the mask to preserve the semantics 2213 /// of the instruction. 2214 void commute(); 2215 2216 /// Return true if a shufflevector instruction can be 2217 /// formed with the specified operands. 2218 static bool isValidOperands(const Value *V1, const Value *V2, 2219 const Value *Mask); 2220 static bool isValidOperands(const Value *V1, const Value *V2, 2221 ArrayRef<int> Mask); 2222 2223 /// Overload to return most specific vector type. 2224 /// 2225 VectorType *getType() const { 2226 return cast<VectorType>(Instruction::getType()); 2227 } 2228 2229 /// Transparently provide more efficient getOperand methods. 2230 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2231 2232 /// Return the shuffle mask value of this instruction for the given element 2233 /// index. Return PoisonMaskElem if the element is undef. 2234 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; } 2235 2236 /// Convert the input shuffle mask operand to a vector of integers. Undefined 2237 /// elements of the mask are returned as PoisonMaskElem. 2238 static void getShuffleMask(const Constant *Mask, 2239 SmallVectorImpl<int> &Result); 2240 2241 /// Return the mask for this instruction as a vector of integers. Undefined 2242 /// elements of the mask are returned as PoisonMaskElem. 2243 void getShuffleMask(SmallVectorImpl<int> &Result) const { 2244 Result.assign(ShuffleMask.begin(), ShuffleMask.end()); 2245 } 2246 2247 /// Return the mask for this instruction, for use in bitcode. 2248 /// 2249 /// TODO: This is temporary until we decide a new bitcode encoding for 2250 /// shufflevector. 2251 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; } 2252 2253 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask, 2254 Type *ResultTy); 2255 2256 void setShuffleMask(ArrayRef<int> Mask); 2257 2258 ArrayRef<int> getShuffleMask() const { return ShuffleMask; } 2259 2260 /// Return true if this shuffle returns a vector with a different number of 2261 /// elements than its source vectors. 2262 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3> 2263 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5> 2264 bool changesLength() const { 2265 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) 2266 ->getElementCount() 2267 .getKnownMinValue(); 2268 unsigned NumMaskElts = ShuffleMask.size(); 2269 return NumSourceElts != NumMaskElts; 2270 } 2271 2272 /// Return true if this shuffle returns a vector with a greater number of 2273 /// elements than its source vectors. 2274 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3> 2275 bool increasesLength() const { 2276 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) 2277 ->getElementCount() 2278 .getKnownMinValue(); 2279 unsigned NumMaskElts = ShuffleMask.size(); 2280 return NumSourceElts < NumMaskElts; 2281 } 2282 2283 /// Return true if this shuffle mask chooses elements from exactly one source 2284 /// vector. 2285 /// Example: <7,5,undef,7> 2286 /// This assumes that vector operands (of length \p NumSrcElts) are the same 2287 /// length as the mask. 2288 static bool isSingleSourceMask(ArrayRef<int> Mask, int NumSrcElts); 2289 static bool isSingleSourceMask(const Constant *Mask, int NumSrcElts) { 2290 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2291 SmallVector<int, 16> MaskAsInts; 2292 getShuffleMask(Mask, MaskAsInts); 2293 return isSingleSourceMask(MaskAsInts, NumSrcElts); 2294 } 2295 2296 /// Return true if this shuffle chooses elements from exactly one source 2297 /// vector without changing the length of that vector. 2298 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3> 2299 /// TODO: Optionally allow length-changing shuffles. 2300 bool isSingleSource() const { 2301 return !changesLength() && 2302 isSingleSourceMask(ShuffleMask, ShuffleMask.size()); 2303 } 2304 2305 /// Return true if this shuffle mask chooses elements from exactly one source 2306 /// vector without lane crossings. A shuffle using this mask is not 2307 /// necessarily a no-op because it may change the number of elements from its 2308 /// input vectors or it may provide demanded bits knowledge via undef lanes. 2309 /// Example: <undef,undef,2,3> 2310 static bool isIdentityMask(ArrayRef<int> Mask, int NumSrcElts); 2311 static bool isIdentityMask(const Constant *Mask, int NumSrcElts) { 2312 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2313 2314 // Not possible to express a shuffle mask for a scalable vector for this 2315 // case. 2316 if (isa<ScalableVectorType>(Mask->getType())) 2317 return false; 2318 2319 SmallVector<int, 16> MaskAsInts; 2320 getShuffleMask(Mask, MaskAsInts); 2321 return isIdentityMask(MaskAsInts, NumSrcElts); 2322 } 2323 2324 /// Return true if this shuffle chooses elements from exactly one source 2325 /// vector without lane crossings and does not change the number of elements 2326 /// from its input vectors. 2327 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef> 2328 bool isIdentity() const { 2329 // Not possible to express a shuffle mask for a scalable vector for this 2330 // case. 2331 if (isa<ScalableVectorType>(getType())) 2332 return false; 2333 2334 return !changesLength() && isIdentityMask(ShuffleMask, ShuffleMask.size()); 2335 } 2336 2337 /// Return true if this shuffle lengthens exactly one source vector with 2338 /// undefs in the high elements. 2339 bool isIdentityWithPadding() const; 2340 2341 /// Return true if this shuffle extracts the first N elements of exactly one 2342 /// source vector. 2343 bool isIdentityWithExtract() const; 2344 2345 /// Return true if this shuffle concatenates its 2 source vectors. This 2346 /// returns false if either input is undefined. In that case, the shuffle is 2347 /// is better classified as an identity with padding operation. 2348 bool isConcat() const; 2349 2350 /// Return true if this shuffle mask chooses elements from its source vectors 2351 /// without lane crossings. A shuffle using this mask would be 2352 /// equivalent to a vector select with a constant condition operand. 2353 /// Example: <4,1,6,undef> 2354 /// This returns false if the mask does not choose from both input vectors. 2355 /// In that case, the shuffle is better classified as an identity shuffle. 2356 /// This assumes that vector operands are the same length as the mask 2357 /// (a length-changing shuffle can never be equivalent to a vector select). 2358 static bool isSelectMask(ArrayRef<int> Mask, int NumSrcElts); 2359 static bool isSelectMask(const Constant *Mask, int NumSrcElts) { 2360 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2361 SmallVector<int, 16> MaskAsInts; 2362 getShuffleMask(Mask, MaskAsInts); 2363 return isSelectMask(MaskAsInts, NumSrcElts); 2364 } 2365 2366 /// Return true if this shuffle chooses elements from its source vectors 2367 /// without lane crossings and all operands have the same number of elements. 2368 /// In other words, this shuffle is equivalent to a vector select with a 2369 /// constant condition operand. 2370 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3> 2371 /// This returns false if the mask does not choose from both input vectors. 2372 /// In that case, the shuffle is better classified as an identity shuffle. 2373 /// TODO: Optionally allow length-changing shuffles. 2374 bool isSelect() const { 2375 return !changesLength() && isSelectMask(ShuffleMask, ShuffleMask.size()); 2376 } 2377 2378 /// Return true if this shuffle mask swaps the order of elements from exactly 2379 /// one source vector. 2380 /// Example: <7,6,undef,4> 2381 /// This assumes that vector operands (of length \p NumSrcElts) are the same 2382 /// length as the mask. 2383 static bool isReverseMask(ArrayRef<int> Mask, int NumSrcElts); 2384 static bool isReverseMask(const Constant *Mask, int NumSrcElts) { 2385 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2386 SmallVector<int, 16> MaskAsInts; 2387 getShuffleMask(Mask, MaskAsInts); 2388 return isReverseMask(MaskAsInts, NumSrcElts); 2389 } 2390 2391 /// Return true if this shuffle swaps the order of elements from exactly 2392 /// one source vector. 2393 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef> 2394 /// TODO: Optionally allow length-changing shuffles. 2395 bool isReverse() const { 2396 return !changesLength() && isReverseMask(ShuffleMask, ShuffleMask.size()); 2397 } 2398 2399 /// Return true if this shuffle mask chooses all elements with the same value 2400 /// as the first element of exactly one source vector. 2401 /// Example: <4,undef,undef,4> 2402 /// This assumes that vector operands (of length \p NumSrcElts) are the same 2403 /// length as the mask. 2404 static bool isZeroEltSplatMask(ArrayRef<int> Mask, int NumSrcElts); 2405 static bool isZeroEltSplatMask(const Constant *Mask, int NumSrcElts) { 2406 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2407 SmallVector<int, 16> MaskAsInts; 2408 getShuffleMask(Mask, MaskAsInts); 2409 return isZeroEltSplatMask(MaskAsInts, NumSrcElts); 2410 } 2411 2412 /// Return true if all elements of this shuffle are the same value as the 2413 /// first element of exactly one source vector without changing the length 2414 /// of that vector. 2415 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0> 2416 /// TODO: Optionally allow length-changing shuffles. 2417 /// TODO: Optionally allow splats from other elements. 2418 bool isZeroEltSplat() const { 2419 return !changesLength() && 2420 isZeroEltSplatMask(ShuffleMask, ShuffleMask.size()); 2421 } 2422 2423 /// Return true if this shuffle mask is a transpose mask. 2424 /// Transpose vector masks transpose a 2xn matrix. They read corresponding 2425 /// even- or odd-numbered vector elements from two n-dimensional source 2426 /// vectors and write each result into consecutive elements of an 2427 /// n-dimensional destination vector. Two shuffles are necessary to complete 2428 /// the transpose, one for the even elements and another for the odd elements. 2429 /// This description closely follows how the TRN1 and TRN2 AArch64 2430 /// instructions operate. 2431 /// 2432 /// For example, a simple 2x2 matrix can be transposed with: 2433 /// 2434 /// ; Original matrix 2435 /// m0 = < a, b > 2436 /// m1 = < c, d > 2437 /// 2438 /// ; Transposed matrix 2439 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 > 2440 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 > 2441 /// 2442 /// For matrices having greater than n columns, the resulting nx2 transposed 2443 /// matrix is stored in two result vectors such that one vector contains 2444 /// interleaved elements from all the even-numbered rows and the other vector 2445 /// contains interleaved elements from all the odd-numbered rows. For example, 2446 /// a 2x4 matrix can be transposed with: 2447 /// 2448 /// ; Original matrix 2449 /// m0 = < a, b, c, d > 2450 /// m1 = < e, f, g, h > 2451 /// 2452 /// ; Transposed matrix 2453 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 > 2454 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 > 2455 static bool isTransposeMask(ArrayRef<int> Mask, int NumSrcElts); 2456 static bool isTransposeMask(const Constant *Mask, int NumSrcElts) { 2457 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2458 SmallVector<int, 16> MaskAsInts; 2459 getShuffleMask(Mask, MaskAsInts); 2460 return isTransposeMask(MaskAsInts, NumSrcElts); 2461 } 2462 2463 /// Return true if this shuffle transposes the elements of its inputs without 2464 /// changing the length of the vectors. This operation may also be known as a 2465 /// merge or interleave. See the description for isTransposeMask() for the 2466 /// exact specification. 2467 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6> 2468 bool isTranspose() const { 2469 return !changesLength() && isTransposeMask(ShuffleMask, ShuffleMask.size()); 2470 } 2471 2472 /// Return true if this shuffle mask is a splice mask, concatenating the two 2473 /// inputs together and then extracts an original width vector starting from 2474 /// the splice index. 2475 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4> 2476 /// This assumes that vector operands (of length \p NumSrcElts) are the same 2477 /// length as the mask. 2478 static bool isSpliceMask(ArrayRef<int> Mask, int NumSrcElts, int &Index); 2479 static bool isSpliceMask(const Constant *Mask, int NumSrcElts, int &Index) { 2480 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2481 SmallVector<int, 16> MaskAsInts; 2482 getShuffleMask(Mask, MaskAsInts); 2483 return isSpliceMask(MaskAsInts, NumSrcElts, Index); 2484 } 2485 2486 /// Return true if this shuffle splices two inputs without changing the length 2487 /// of the vectors. This operation concatenates the two inputs together and 2488 /// then extracts an original width vector starting from the splice index. 2489 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4> 2490 bool isSplice(int &Index) const { 2491 return !changesLength() && 2492 isSpliceMask(ShuffleMask, ShuffleMask.size(), Index); 2493 } 2494 2495 /// Return true if this shuffle mask is an extract subvector mask. 2496 /// A valid extract subvector mask returns a smaller vector from a single 2497 /// source operand. The base extraction index is returned as well. 2498 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, 2499 int &Index); 2500 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts, 2501 int &Index) { 2502 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2503 // Not possible to express a shuffle mask for a scalable vector for this 2504 // case. 2505 if (isa<ScalableVectorType>(Mask->getType())) 2506 return false; 2507 SmallVector<int, 16> MaskAsInts; 2508 getShuffleMask(Mask, MaskAsInts); 2509 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index); 2510 } 2511 2512 /// Return true if this shuffle mask is an extract subvector mask. 2513 bool isExtractSubvectorMask(int &Index) const { 2514 // Not possible to express a shuffle mask for a scalable vector for this 2515 // case. 2516 if (isa<ScalableVectorType>(getType())) 2517 return false; 2518 2519 int NumSrcElts = 2520 cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); 2521 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index); 2522 } 2523 2524 /// Return true if this shuffle mask is an insert subvector mask. 2525 /// A valid insert subvector mask inserts the lowest elements of a second 2526 /// source operand into an in-place first source operand. 2527 /// Both the sub vector width and the insertion index is returned. 2528 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, 2529 int &NumSubElts, int &Index); 2530 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts, 2531 int &NumSubElts, int &Index) { 2532 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2533 // Not possible to express a shuffle mask for a scalable vector for this 2534 // case. 2535 if (isa<ScalableVectorType>(Mask->getType())) 2536 return false; 2537 SmallVector<int, 16> MaskAsInts; 2538 getShuffleMask(Mask, MaskAsInts); 2539 return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index); 2540 } 2541 2542 /// Return true if this shuffle mask is an insert subvector mask. 2543 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const { 2544 // Not possible to express a shuffle mask for a scalable vector for this 2545 // case. 2546 if (isa<ScalableVectorType>(getType())) 2547 return false; 2548 2549 int NumSrcElts = 2550 cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); 2551 return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index); 2552 } 2553 2554 /// Return true if this shuffle mask replicates each of the \p VF elements 2555 /// in a vector \p ReplicationFactor times. 2556 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is: 2557 /// <0,0,0,1,1,1,2,2,2,3,3,3> 2558 static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor, 2559 int &VF); 2560 static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor, 2561 int &VF) { 2562 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2563 // Not possible to express a shuffle mask for a scalable vector for this 2564 // case. 2565 if (isa<ScalableVectorType>(Mask->getType())) 2566 return false; 2567 SmallVector<int, 16> MaskAsInts; 2568 getShuffleMask(Mask, MaskAsInts); 2569 return isReplicationMask(MaskAsInts, ReplicationFactor, VF); 2570 } 2571 2572 /// Return true if this shuffle mask is a replication mask. 2573 bool isReplicationMask(int &ReplicationFactor, int &VF) const; 2574 2575 /// Return true if this shuffle mask represents "clustered" mask of size VF, 2576 /// i.e. each index between [0..VF) is used exactly once in each submask of 2577 /// size VF. 2578 /// For example, the mask for \p VF=4 is: 2579 /// 0, 1, 2, 3, 3, 2, 0, 1 - "clustered", because each submask of size 4 2580 /// (0,1,2,3 and 3,2,0,1) uses indices [0..VF) exactly one time. 2581 /// 0, 1, 2, 3, 3, 3, 1, 0 - not "clustered", because 2582 /// element 3 is used twice in the second submask 2583 /// (3,3,1,0) and index 2 is not used at all. 2584 static bool isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF); 2585 2586 /// Return true if this shuffle mask is a one-use-single-source("clustered") 2587 /// mask. 2588 bool isOneUseSingleSourceMask(int VF) const; 2589 2590 /// Change values in a shuffle permute mask assuming the two vector operands 2591 /// of length InVecNumElts have swapped position. 2592 static void commuteShuffleMask(MutableArrayRef<int> Mask, 2593 unsigned InVecNumElts) { 2594 for (int &Idx : Mask) { 2595 if (Idx == -1) 2596 continue; 2597 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts; 2598 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 && 2599 "shufflevector mask index out of range"); 2600 } 2601 } 2602 2603 /// Return if this shuffle interleaves its two input vectors together. 2604 bool isInterleave(unsigned Factor); 2605 2606 /// Return true if the mask interleaves one or more input vectors together. 2607 /// 2608 /// I.e. <0, LaneLen, ... , LaneLen*(Factor - 1), 1, LaneLen + 1, ...> 2609 /// E.g. For a Factor of 2 (LaneLen=4): 2610 /// <0, 4, 1, 5, 2, 6, 3, 7> 2611 /// E.g. For a Factor of 3 (LaneLen=4): 2612 /// <4, 0, 9, 5, 1, 10, 6, 2, 11, 7, 3, 12> 2613 /// E.g. For a Factor of 4 (LaneLen=2): 2614 /// <0, 2, 6, 4, 1, 3, 7, 5> 2615 /// 2616 /// NumInputElts is the total number of elements in the input vectors. 2617 /// 2618 /// StartIndexes are the first indexes of each vector being interleaved, 2619 /// substituting any indexes that were undef 2620 /// E.g. <4, -1, 2, 5, 1, 3> (Factor=3): StartIndexes=<4, 0, 2> 2621 /// 2622 /// Note that this does not check if the input vectors are consecutive: 2623 /// It will return true for masks such as 2624 /// <0, 4, 6, 1, 5, 7> (Factor=3, LaneLen=2) 2625 static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor, 2626 unsigned NumInputElts, 2627 SmallVectorImpl<unsigned> &StartIndexes); 2628 static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor, 2629 unsigned NumInputElts) { 2630 SmallVector<unsigned, 8> StartIndexes; 2631 return isInterleaveMask(Mask, Factor, NumInputElts, StartIndexes); 2632 } 2633 2634 /// Check if the mask is a DE-interleave mask of the given factor 2635 /// \p Factor like: 2636 /// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor> 2637 static bool isDeInterleaveMaskOfFactor(ArrayRef<int> Mask, unsigned Factor, 2638 unsigned &Index); 2639 static bool isDeInterleaveMaskOfFactor(ArrayRef<int> Mask, unsigned Factor) { 2640 unsigned Unused; 2641 return isDeInterleaveMaskOfFactor(Mask, Factor, Unused); 2642 } 2643 2644 /// Checks if the shuffle is a bit rotation of the first operand across 2645 /// multiple subelements, e.g: 2646 /// 2647 /// shuffle <8 x i8> %a, <8 x i8> poison, <8 x i32> <1, 0, 3, 2, 5, 4, 7, 6> 2648 /// 2649 /// could be expressed as 2650 /// 2651 /// rotl <4 x i16> %a, 8 2652 /// 2653 /// If it can be expressed as a rotation, returns the number of subelements to 2654 /// group by in NumSubElts and the number of bits to rotate left in RotateAmt. 2655 static bool isBitRotateMask(ArrayRef<int> Mask, unsigned EltSizeInBits, 2656 unsigned MinSubElts, unsigned MaxSubElts, 2657 unsigned &NumSubElts, unsigned &RotateAmt); 2658 2659 // Methods for support type inquiry through isa, cast, and dyn_cast: 2660 static bool classof(const Instruction *I) { 2661 return I->getOpcode() == Instruction::ShuffleVector; 2662 } 2663 static bool classof(const Value *V) { 2664 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2665 } 2666 }; 2667 2668 template <> 2669 struct OperandTraits<ShuffleVectorInst> 2670 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {}; 2671 2672 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value) 2673 2674 //===----------------------------------------------------------------------===// 2675 // ExtractValueInst Class 2676 //===----------------------------------------------------------------------===// 2677 2678 /// This instruction extracts a struct member or array 2679 /// element value from an aggregate value. 2680 /// 2681 class ExtractValueInst : public UnaryInstruction { 2682 SmallVector<unsigned, 4> Indices; 2683 2684 ExtractValueInst(const ExtractValueInst &EVI); 2685 2686 /// Constructors - Create a extractvalue instruction with a base aggregate 2687 /// value and a list of indices. The first and second ctor can optionally 2688 /// insert before an existing instruction, the third appends the new 2689 /// instruction to the specified BasicBlock. 2690 inline ExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs, 2691 const Twine &NameStr, 2692 BasicBlock::iterator InsertBefore); 2693 inline ExtractValueInst(Value *Agg, 2694 ArrayRef<unsigned> Idxs, 2695 const Twine &NameStr, 2696 Instruction *InsertBefore); 2697 inline ExtractValueInst(Value *Agg, 2698 ArrayRef<unsigned> Idxs, 2699 const Twine &NameStr, BasicBlock *InsertAtEnd); 2700 2701 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr); 2702 2703 protected: 2704 // Note: Instruction needs to be a friend here to call cloneImpl. 2705 friend class Instruction; 2706 2707 ExtractValueInst *cloneImpl() const; 2708 2709 public: 2710 static ExtractValueInst *Create(Value *Agg, ArrayRef<unsigned> Idxs, 2711 const Twine &NameStr, 2712 BasicBlock::iterator InsertBefore) { 2713 return new 2714 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore); 2715 } 2716 2717 static ExtractValueInst *Create(Value *Agg, 2718 ArrayRef<unsigned> Idxs, 2719 const Twine &NameStr = "", 2720 Instruction *InsertBefore = nullptr) { 2721 return new 2722 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore); 2723 } 2724 2725 static ExtractValueInst *Create(Value *Agg, 2726 ArrayRef<unsigned> Idxs, 2727 const Twine &NameStr, 2728 BasicBlock *InsertAtEnd) { 2729 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd); 2730 } 2731 2732 /// Returns the type of the element that would be extracted 2733 /// with an extractvalue instruction with the specified parameters. 2734 /// 2735 /// Null is returned if the indices are invalid for the specified type. 2736 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs); 2737 2738 using idx_iterator = const unsigned*; 2739 2740 inline idx_iterator idx_begin() const { return Indices.begin(); } 2741 inline idx_iterator idx_end() const { return Indices.end(); } 2742 inline iterator_range<idx_iterator> indices() const { 2743 return make_range(idx_begin(), idx_end()); 2744 } 2745 2746 Value *getAggregateOperand() { 2747 return getOperand(0); 2748 } 2749 const Value *getAggregateOperand() const { 2750 return getOperand(0); 2751 } 2752 static unsigned getAggregateOperandIndex() { 2753 return 0U; // get index for modifying correct operand 2754 } 2755 2756 ArrayRef<unsigned> getIndices() const { 2757 return Indices; 2758 } 2759 2760 unsigned getNumIndices() const { 2761 return (unsigned)Indices.size(); 2762 } 2763 2764 bool hasIndices() const { 2765 return true; 2766 } 2767 2768 // Methods for support type inquiry through isa, cast, and dyn_cast: 2769 static bool classof(const Instruction *I) { 2770 return I->getOpcode() == Instruction::ExtractValue; 2771 } 2772 static bool classof(const Value *V) { 2773 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2774 } 2775 }; 2776 2777 ExtractValueInst::ExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs, 2778 const Twine &NameStr, 2779 BasicBlock::iterator InsertBefore) 2780 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), 2781 ExtractValue, Agg, InsertBefore) { 2782 init(Idxs, NameStr); 2783 } 2784 2785 ExtractValueInst::ExtractValueInst(Value *Agg, 2786 ArrayRef<unsigned> Idxs, 2787 const Twine &NameStr, 2788 Instruction *InsertBefore) 2789 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), 2790 ExtractValue, Agg, InsertBefore) { 2791 init(Idxs, NameStr); 2792 } 2793 2794 ExtractValueInst::ExtractValueInst(Value *Agg, 2795 ArrayRef<unsigned> Idxs, 2796 const Twine &NameStr, 2797 BasicBlock *InsertAtEnd) 2798 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), 2799 ExtractValue, Agg, InsertAtEnd) { 2800 init(Idxs, NameStr); 2801 } 2802 2803 //===----------------------------------------------------------------------===// 2804 // InsertValueInst Class 2805 //===----------------------------------------------------------------------===// 2806 2807 /// This instruction inserts a struct field of array element 2808 /// value into an aggregate value. 2809 /// 2810 class InsertValueInst : public Instruction { 2811 SmallVector<unsigned, 4> Indices; 2812 2813 InsertValueInst(const InsertValueInst &IVI); 2814 2815 /// Constructors - Create a insertvalue instruction with a base aggregate 2816 /// value, a value to insert, and a list of indices. The first and second ctor 2817 /// can optionally insert before an existing instruction, the third appends 2818 /// the new instruction to the specified BasicBlock. 2819 inline InsertValueInst(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, 2820 const Twine &NameStr, 2821 BasicBlock::iterator InsertBefore); 2822 inline InsertValueInst(Value *Agg, Value *Val, 2823 ArrayRef<unsigned> Idxs, 2824 const Twine &NameStr, 2825 Instruction *InsertBefore); 2826 inline InsertValueInst(Value *Agg, Value *Val, 2827 ArrayRef<unsigned> Idxs, 2828 const Twine &NameStr, BasicBlock *InsertAtEnd); 2829 2830 /// Constructors - These three constructors are convenience methods because 2831 /// one and two index insertvalue instructions are so common. 2832 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr, 2833 BasicBlock::iterator InsertBefore); 2834 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, 2835 const Twine &NameStr = "", 2836 Instruction *InsertBefore = nullptr); 2837 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr, 2838 BasicBlock *InsertAtEnd); 2839 2840 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, 2841 const Twine &NameStr); 2842 2843 protected: 2844 // Note: Instruction needs to be a friend here to call cloneImpl. 2845 friend class Instruction; 2846 2847 InsertValueInst *cloneImpl() const; 2848 2849 public: 2850 // allocate space for exactly two operands 2851 void *operator new(size_t S) { return User::operator new(S, 2); } 2852 void operator delete(void *Ptr) { User::operator delete(Ptr); } 2853 2854 static InsertValueInst *Create(Value *Agg, Value *Val, 2855 ArrayRef<unsigned> Idxs, const Twine &NameStr, 2856 BasicBlock::iterator InsertBefore) { 2857 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore); 2858 } 2859 2860 static InsertValueInst *Create(Value *Agg, Value *Val, 2861 ArrayRef<unsigned> Idxs, 2862 const Twine &NameStr = "", 2863 Instruction *InsertBefore = nullptr) { 2864 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore); 2865 } 2866 2867 static InsertValueInst *Create(Value *Agg, Value *Val, 2868 ArrayRef<unsigned> Idxs, 2869 const Twine &NameStr, 2870 BasicBlock *InsertAtEnd) { 2871 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd); 2872 } 2873 2874 /// Transparently provide more efficient getOperand methods. 2875 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2876 2877 using idx_iterator = const unsigned*; 2878 2879 inline idx_iterator idx_begin() const { return Indices.begin(); } 2880 inline idx_iterator idx_end() const { return Indices.end(); } 2881 inline iterator_range<idx_iterator> indices() const { 2882 return make_range(idx_begin(), idx_end()); 2883 } 2884 2885 Value *getAggregateOperand() { 2886 return getOperand(0); 2887 } 2888 const Value *getAggregateOperand() const { 2889 return getOperand(0); 2890 } 2891 static unsigned getAggregateOperandIndex() { 2892 return 0U; // get index for modifying correct operand 2893 } 2894 2895 Value *getInsertedValueOperand() { 2896 return getOperand(1); 2897 } 2898 const Value *getInsertedValueOperand() const { 2899 return getOperand(1); 2900 } 2901 static unsigned getInsertedValueOperandIndex() { 2902 return 1U; // get index for modifying correct operand 2903 } 2904 2905 ArrayRef<unsigned> getIndices() const { 2906 return Indices; 2907 } 2908 2909 unsigned getNumIndices() const { 2910 return (unsigned)Indices.size(); 2911 } 2912 2913 bool hasIndices() const { 2914 return true; 2915 } 2916 2917 // Methods for support type inquiry through isa, cast, and dyn_cast: 2918 static bool classof(const Instruction *I) { 2919 return I->getOpcode() == Instruction::InsertValue; 2920 } 2921 static bool classof(const Value *V) { 2922 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2923 } 2924 }; 2925 2926 template <> 2927 struct OperandTraits<InsertValueInst> : 2928 public FixedNumOperandTraits<InsertValueInst, 2> { 2929 }; 2930 2931 InsertValueInst::InsertValueInst(Value *Agg, 2932 Value *Val, 2933 ArrayRef<unsigned> Idxs, 2934 const Twine &NameStr, 2935 BasicBlock::iterator InsertBefore) 2936 : Instruction(Agg->getType(), InsertValue, OperandTraits<InsertValueInst>::op_begin(this), 2937 2, InsertBefore) { 2938 init(Agg, Val, Idxs, NameStr); 2939 } 2940 2941 InsertValueInst::InsertValueInst(Value *Agg, 2942 Value *Val, 2943 ArrayRef<unsigned> Idxs, 2944 const Twine &NameStr, 2945 Instruction *InsertBefore) 2946 : Instruction(Agg->getType(), InsertValue, 2947 OperandTraits<InsertValueInst>::op_begin(this), 2948 2, InsertBefore) { 2949 init(Agg, Val, Idxs, NameStr); 2950 } 2951 2952 InsertValueInst::InsertValueInst(Value *Agg, 2953 Value *Val, 2954 ArrayRef<unsigned> Idxs, 2955 const Twine &NameStr, 2956 BasicBlock *InsertAtEnd) 2957 : Instruction(Agg->getType(), InsertValue, 2958 OperandTraits<InsertValueInst>::op_begin(this), 2959 2, InsertAtEnd) { 2960 init(Agg, Val, Idxs, NameStr); 2961 } 2962 2963 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value) 2964 2965 //===----------------------------------------------------------------------===// 2966 // PHINode Class 2967 //===----------------------------------------------------------------------===// 2968 2969 // PHINode - The PHINode class is used to represent the magical mystical PHI 2970 // node, that can not exist in nature, but can be synthesized in a computer 2971 // scientist's overactive imagination. 2972 // 2973 class PHINode : public Instruction { 2974 /// The number of operands actually allocated. NumOperands is 2975 /// the number actually in use. 2976 unsigned ReservedSpace; 2977 2978 PHINode(const PHINode &PN); 2979 2980 explicit PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, 2981 BasicBlock::iterator InsertBefore) 2982 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore), 2983 ReservedSpace(NumReservedValues) { 2984 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!"); 2985 setName(NameStr); 2986 allocHungoffUses(ReservedSpace); 2987 } 2988 2989 explicit PHINode(Type *Ty, unsigned NumReservedValues, 2990 const Twine &NameStr = "", 2991 Instruction *InsertBefore = nullptr) 2992 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore), 2993 ReservedSpace(NumReservedValues) { 2994 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!"); 2995 setName(NameStr); 2996 allocHungoffUses(ReservedSpace); 2997 } 2998 2999 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, 3000 BasicBlock *InsertAtEnd) 3001 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd), 3002 ReservedSpace(NumReservedValues) { 3003 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!"); 3004 setName(NameStr); 3005 allocHungoffUses(ReservedSpace); 3006 } 3007 3008 protected: 3009 // Note: Instruction needs to be a friend here to call cloneImpl. 3010 friend class Instruction; 3011 3012 PHINode *cloneImpl() const; 3013 3014 // allocHungoffUses - this is more complicated than the generic 3015 // User::allocHungoffUses, because we have to allocate Uses for the incoming 3016 // values and pointers to the incoming blocks, all in one allocation. 3017 void allocHungoffUses(unsigned N) { 3018 User::allocHungoffUses(N, /* IsPhi */ true); 3019 } 3020 3021 public: 3022 /// Constructors - NumReservedValues is a hint for the number of incoming 3023 /// edges that this phi node will have (use 0 if you really have no idea). 3024 static PHINode *Create(Type *Ty, unsigned NumReservedValues, 3025 const Twine &NameStr, 3026 BasicBlock::iterator InsertBefore) { 3027 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore); 3028 } 3029 3030 static PHINode *Create(Type *Ty, unsigned NumReservedValues, 3031 const Twine &NameStr = "", 3032 Instruction *InsertBefore = nullptr) { 3033 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore); 3034 } 3035 3036 static PHINode *Create(Type *Ty, unsigned NumReservedValues, 3037 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3038 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd); 3039 } 3040 3041 /// Provide fast operand accessors 3042 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3043 3044 // Block iterator interface. This provides access to the list of incoming 3045 // basic blocks, which parallels the list of incoming values. 3046 // Please note that we are not providing non-const iterators for blocks to 3047 // force all updates go through an interface function. 3048 3049 using block_iterator = BasicBlock **; 3050 using const_block_iterator = BasicBlock * const *; 3051 3052 const_block_iterator block_begin() const { 3053 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace); 3054 } 3055 3056 const_block_iterator block_end() const { 3057 return block_begin() + getNumOperands(); 3058 } 3059 3060 iterator_range<const_block_iterator> blocks() const { 3061 return make_range(block_begin(), block_end()); 3062 } 3063 3064 op_range incoming_values() { return operands(); } 3065 3066 const_op_range incoming_values() const { return operands(); } 3067 3068 /// Return the number of incoming edges 3069 /// 3070 unsigned getNumIncomingValues() const { return getNumOperands(); } 3071 3072 /// Return incoming value number x 3073 /// 3074 Value *getIncomingValue(unsigned i) const { 3075 return getOperand(i); 3076 } 3077 void setIncomingValue(unsigned i, Value *V) { 3078 assert(V && "PHI node got a null value!"); 3079 assert(getType() == V->getType() && 3080 "All operands to PHI node must be the same type as the PHI node!"); 3081 setOperand(i, V); 3082 } 3083 3084 static unsigned getOperandNumForIncomingValue(unsigned i) { 3085 return i; 3086 } 3087 3088 static unsigned getIncomingValueNumForOperand(unsigned i) { 3089 return i; 3090 } 3091 3092 /// Return incoming basic block number @p i. 3093 /// 3094 BasicBlock *getIncomingBlock(unsigned i) const { 3095 return block_begin()[i]; 3096 } 3097 3098 /// Return incoming basic block corresponding 3099 /// to an operand of the PHI. 3100 /// 3101 BasicBlock *getIncomingBlock(const Use &U) const { 3102 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?"); 3103 return getIncomingBlock(unsigned(&U - op_begin())); 3104 } 3105 3106 /// Return incoming basic block corresponding 3107 /// to value use iterator. 3108 /// 3109 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const { 3110 return getIncomingBlock(I.getUse()); 3111 } 3112 3113 void setIncomingBlock(unsigned i, BasicBlock *BB) { 3114 const_cast<block_iterator>(block_begin())[i] = BB; 3115 } 3116 3117 /// Copies the basic blocks from \p BBRange to the incoming basic block list 3118 /// of this PHINode, starting at \p ToIdx. 3119 void copyIncomingBlocks(iterator_range<const_block_iterator> BBRange, 3120 uint32_t ToIdx = 0) { 3121 copy(BBRange, const_cast<block_iterator>(block_begin()) + ToIdx); 3122 } 3123 3124 /// Replace every incoming basic block \p Old to basic block \p New. 3125 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) { 3126 assert(New && Old && "PHI node got a null basic block!"); 3127 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) 3128 if (getIncomingBlock(Op) == Old) 3129 setIncomingBlock(Op, New); 3130 } 3131 3132 /// Add an incoming value to the end of the PHI list 3133 /// 3134 void addIncoming(Value *V, BasicBlock *BB) { 3135 if (getNumOperands() == ReservedSpace) 3136 growOperands(); // Get more space! 3137 // Initialize some new operands. 3138 setNumHungOffUseOperands(getNumOperands() + 1); 3139 setIncomingValue(getNumOperands() - 1, V); 3140 setIncomingBlock(getNumOperands() - 1, BB); 3141 } 3142 3143 /// Remove an incoming value. This is useful if a 3144 /// predecessor basic block is deleted. The value removed is returned. 3145 /// 3146 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty 3147 /// is true), the PHI node is destroyed and any uses of it are replaced with 3148 /// dummy values. The only time there should be zero incoming values to a PHI 3149 /// node is when the block is dead, so this strategy is sound. 3150 /// 3151 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true); 3152 3153 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) { 3154 int Idx = getBasicBlockIndex(BB); 3155 assert(Idx >= 0 && "Invalid basic block argument to remove!"); 3156 return removeIncomingValue(Idx, DeletePHIIfEmpty); 3157 } 3158 3159 /// Remove all incoming values for which the predicate returns true. 3160 /// The predicate accepts the incoming value index. 3161 void removeIncomingValueIf(function_ref<bool(unsigned)> Predicate, 3162 bool DeletePHIIfEmpty = true); 3163 3164 /// Return the first index of the specified basic 3165 /// block in the value list for this PHI. Returns -1 if no instance. 3166 /// 3167 int getBasicBlockIndex(const BasicBlock *BB) const { 3168 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 3169 if (block_begin()[i] == BB) 3170 return i; 3171 return -1; 3172 } 3173 3174 Value *getIncomingValueForBlock(const BasicBlock *BB) const { 3175 int Idx = getBasicBlockIndex(BB); 3176 assert(Idx >= 0 && "Invalid basic block argument!"); 3177 return getIncomingValue(Idx); 3178 } 3179 3180 /// Set every incoming value(s) for block \p BB to \p V. 3181 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) { 3182 assert(BB && "PHI node got a null basic block!"); 3183 bool Found = false; 3184 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) 3185 if (getIncomingBlock(Op) == BB) { 3186 Found = true; 3187 setIncomingValue(Op, V); 3188 } 3189 (void)Found; 3190 assert(Found && "Invalid basic block argument to set!"); 3191 } 3192 3193 /// If the specified PHI node always merges together the 3194 /// same value, return the value, otherwise return null. 3195 Value *hasConstantValue() const; 3196 3197 /// Whether the specified PHI node always merges 3198 /// together the same value, assuming undefs are equal to a unique 3199 /// non-undef value. 3200 bool hasConstantOrUndefValue() const; 3201 3202 /// If the PHI node is complete which means all of its parent's predecessors 3203 /// have incoming value in this PHI, return true, otherwise return false. 3204 bool isComplete() const { 3205 return llvm::all_of(predecessors(getParent()), 3206 [this](const BasicBlock *Pred) { 3207 return getBasicBlockIndex(Pred) >= 0; 3208 }); 3209 } 3210 3211 /// Methods for support type inquiry through isa, cast, and dyn_cast: 3212 static bool classof(const Instruction *I) { 3213 return I->getOpcode() == Instruction::PHI; 3214 } 3215 static bool classof(const Value *V) { 3216 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3217 } 3218 3219 private: 3220 void growOperands(); 3221 }; 3222 3223 template <> 3224 struct OperandTraits<PHINode> : public HungoffOperandTraits<2> { 3225 }; 3226 3227 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value) 3228 3229 //===----------------------------------------------------------------------===// 3230 // LandingPadInst Class 3231 //===----------------------------------------------------------------------===// 3232 3233 //===--------------------------------------------------------------------------- 3234 /// The landingpad instruction holds all of the information 3235 /// necessary to generate correct exception handling. The landingpad instruction 3236 /// cannot be moved from the top of a landing pad block, which itself is 3237 /// accessible only from the 'unwind' edge of an invoke. This uses the 3238 /// SubclassData field in Value to store whether or not the landingpad is a 3239 /// cleanup. 3240 /// 3241 class LandingPadInst : public Instruction { 3242 using CleanupField = BoolBitfieldElementT<0>; 3243 3244 /// The number of operands actually allocated. NumOperands is 3245 /// the number actually in use. 3246 unsigned ReservedSpace; 3247 3248 LandingPadInst(const LandingPadInst &LP); 3249 3250 public: 3251 enum ClauseType { Catch, Filter }; 3252 3253 private: 3254 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, 3255 const Twine &NameStr, 3256 BasicBlock::iterator InsertBefore); 3257 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, 3258 const Twine &NameStr, Instruction *InsertBefore); 3259 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, 3260 const Twine &NameStr, BasicBlock *InsertAtEnd); 3261 3262 // Allocate space for exactly zero operands. 3263 void *operator new(size_t S) { return User::operator new(S); } 3264 3265 void growOperands(unsigned Size); 3266 void init(unsigned NumReservedValues, const Twine &NameStr); 3267 3268 protected: 3269 // Note: Instruction needs to be a friend here to call cloneImpl. 3270 friend class Instruction; 3271 3272 LandingPadInst *cloneImpl() const; 3273 3274 public: 3275 void operator delete(void *Ptr) { User::operator delete(Ptr); } 3276 3277 /// Constructors - NumReservedClauses is a hint for the number of incoming 3278 /// clauses that this landingpad will have (use 0 if you really have no idea). 3279 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, 3280 const Twine &NameStr, 3281 BasicBlock::iterator InsertBefore); 3282 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, 3283 const Twine &NameStr = "", 3284 Instruction *InsertBefore = nullptr); 3285 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, 3286 const Twine &NameStr, BasicBlock *InsertAtEnd); 3287 3288 /// Provide fast operand accessors 3289 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3290 3291 /// Return 'true' if this landingpad instruction is a 3292 /// cleanup. I.e., it should be run when unwinding even if its landing pad 3293 /// doesn't catch the exception. 3294 bool isCleanup() const { return getSubclassData<CleanupField>(); } 3295 3296 /// Indicate that this landingpad instruction is a cleanup. 3297 void setCleanup(bool V) { setSubclassData<CleanupField>(V); } 3298 3299 /// Add a catch or filter clause to the landing pad. 3300 void addClause(Constant *ClauseVal); 3301 3302 /// Get the value of the clause at index Idx. Use isCatch/isFilter to 3303 /// determine what type of clause this is. 3304 Constant *getClause(unsigned Idx) const { 3305 return cast<Constant>(getOperandList()[Idx]); 3306 } 3307 3308 /// Return 'true' if the clause and index Idx is a catch clause. 3309 bool isCatch(unsigned Idx) const { 3310 return !isa<ArrayType>(getOperandList()[Idx]->getType()); 3311 } 3312 3313 /// Return 'true' if the clause and index Idx is a filter clause. 3314 bool isFilter(unsigned Idx) const { 3315 return isa<ArrayType>(getOperandList()[Idx]->getType()); 3316 } 3317 3318 /// Get the number of clauses for this landing pad. 3319 unsigned getNumClauses() const { return getNumOperands(); } 3320 3321 /// Grow the size of the operand list to accommodate the new 3322 /// number of clauses. 3323 void reserveClauses(unsigned Size) { growOperands(Size); } 3324 3325 // Methods for support type inquiry through isa, cast, and dyn_cast: 3326 static bool classof(const Instruction *I) { 3327 return I->getOpcode() == Instruction::LandingPad; 3328 } 3329 static bool classof(const Value *V) { 3330 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3331 } 3332 }; 3333 3334 template <> 3335 struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> { 3336 }; 3337 3338 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value) 3339 3340 //===----------------------------------------------------------------------===// 3341 // ReturnInst Class 3342 //===----------------------------------------------------------------------===// 3343 3344 //===--------------------------------------------------------------------------- 3345 /// Return a value (possibly void), from a function. Execution 3346 /// does not continue in this function any longer. 3347 /// 3348 class ReturnInst : public Instruction { 3349 ReturnInst(const ReturnInst &RI); 3350 3351 private: 3352 // ReturnInst constructors: 3353 // ReturnInst() - 'ret void' instruction 3354 // ReturnInst( null) - 'ret void' instruction 3355 // ReturnInst(Value* X) - 'ret X' instruction 3356 // ReturnInst(null, Iterator It) - 'ret void' instruction, insert before I 3357 // ReturnInst(Value* X, Iterator It) - 'ret X' instruction, insert before I 3358 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I 3359 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I 3360 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B 3361 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B 3362 // 3363 // NOTE: If the Value* passed is of type void then the constructor behaves as 3364 // if it was passed NULL. 3365 explicit ReturnInst(LLVMContext &C, Value *retVal, 3366 BasicBlock::iterator InsertBefore); 3367 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr, 3368 Instruction *InsertBefore = nullptr); 3369 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd); 3370 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd); 3371 3372 protected: 3373 // Note: Instruction needs to be a friend here to call cloneImpl. 3374 friend class Instruction; 3375 3376 ReturnInst *cloneImpl() const; 3377 3378 public: 3379 static ReturnInst *Create(LLVMContext &C, Value *retVal, 3380 BasicBlock::iterator InsertBefore) { 3381 return new (!!retVal) ReturnInst(C, retVal, InsertBefore); 3382 } 3383 3384 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr, 3385 Instruction *InsertBefore = nullptr) { 3386 return new(!!retVal) ReturnInst(C, retVal, InsertBefore); 3387 } 3388 3389 static ReturnInst* Create(LLVMContext &C, Value *retVal, 3390 BasicBlock *InsertAtEnd) { 3391 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd); 3392 } 3393 3394 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) { 3395 return new(0) ReturnInst(C, InsertAtEnd); 3396 } 3397 3398 /// Provide fast operand accessors 3399 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3400 3401 /// Convenience accessor. Returns null if there is no return value. 3402 Value *getReturnValue() const { 3403 return getNumOperands() != 0 ? getOperand(0) : nullptr; 3404 } 3405 3406 unsigned getNumSuccessors() const { return 0; } 3407 3408 // Methods for support type inquiry through isa, cast, and dyn_cast: 3409 static bool classof(const Instruction *I) { 3410 return (I->getOpcode() == Instruction::Ret); 3411 } 3412 static bool classof(const Value *V) { 3413 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3414 } 3415 3416 private: 3417 BasicBlock *getSuccessor(unsigned idx) const { 3418 llvm_unreachable("ReturnInst has no successors!"); 3419 } 3420 3421 void setSuccessor(unsigned idx, BasicBlock *B) { 3422 llvm_unreachable("ReturnInst has no successors!"); 3423 } 3424 }; 3425 3426 template <> 3427 struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> { 3428 }; 3429 3430 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value) 3431 3432 //===----------------------------------------------------------------------===// 3433 // BranchInst Class 3434 //===----------------------------------------------------------------------===// 3435 3436 //===--------------------------------------------------------------------------- 3437 /// Conditional or Unconditional Branch instruction. 3438 /// 3439 class BranchInst : public Instruction { 3440 /// Ops list - Branches are strange. The operands are ordered: 3441 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because 3442 /// they don't have to check for cond/uncond branchness. These are mostly 3443 /// accessed relative from op_end(). 3444 BranchInst(const BranchInst &BI); 3445 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition): 3446 // BranchInst(BB *B) - 'br B' 3447 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F' 3448 // BranchInst(BB* B, Iter It) - 'br B' insert before I 3449 // BranchInst(BB* T, BB *F, Value *C, Iter It) - 'br C, T, F', insert before I 3450 // BranchInst(BB* B, Inst *I) - 'br B' insert before I 3451 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I 3452 // BranchInst(BB* B, BB *I) - 'br B' insert at end 3453 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end 3454 explicit BranchInst(BasicBlock *IfTrue, BasicBlock::iterator InsertBefore); 3455 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 3456 BasicBlock::iterator InsertBefore); 3457 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr); 3458 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 3459 Instruction *InsertBefore = nullptr); 3460 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd); 3461 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 3462 BasicBlock *InsertAtEnd); 3463 3464 void AssertOK(); 3465 3466 protected: 3467 // Note: Instruction needs to be a friend here to call cloneImpl. 3468 friend class Instruction; 3469 3470 BranchInst *cloneImpl() const; 3471 3472 public: 3473 /// Iterator type that casts an operand to a basic block. 3474 /// 3475 /// This only makes sense because the successors are stored as adjacent 3476 /// operands for branch instructions. 3477 struct succ_op_iterator 3478 : iterator_adaptor_base<succ_op_iterator, value_op_iterator, 3479 std::random_access_iterator_tag, BasicBlock *, 3480 ptrdiff_t, BasicBlock *, BasicBlock *> { 3481 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} 3482 3483 BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3484 BasicBlock *operator->() const { return operator*(); } 3485 }; 3486 3487 /// The const version of `succ_op_iterator`. 3488 struct const_succ_op_iterator 3489 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, 3490 std::random_access_iterator_tag, 3491 const BasicBlock *, ptrdiff_t, const BasicBlock *, 3492 const BasicBlock *> { 3493 explicit const_succ_op_iterator(const_value_op_iterator I) 3494 : iterator_adaptor_base(I) {} 3495 3496 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3497 const BasicBlock *operator->() const { return operator*(); } 3498 }; 3499 3500 static BranchInst *Create(BasicBlock *IfTrue, 3501 BasicBlock::iterator InsertBefore) { 3502 return new(1) BranchInst(IfTrue, InsertBefore); 3503 } 3504 3505 static BranchInst *Create(BasicBlock *IfTrue, 3506 Instruction *InsertBefore = nullptr) { 3507 return new(1) BranchInst(IfTrue, InsertBefore); 3508 } 3509 3510 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, 3511 Value *Cond, BasicBlock::iterator InsertBefore) { 3512 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore); 3513 } 3514 3515 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, 3516 Value *Cond, Instruction *InsertBefore = nullptr) { 3517 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore); 3518 } 3519 3520 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) { 3521 return new(1) BranchInst(IfTrue, InsertAtEnd); 3522 } 3523 3524 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, 3525 Value *Cond, BasicBlock *InsertAtEnd) { 3526 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd); 3527 } 3528 3529 /// Transparently provide more efficient getOperand methods. 3530 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3531 3532 bool isUnconditional() const { return getNumOperands() == 1; } 3533 bool isConditional() const { return getNumOperands() == 3; } 3534 3535 Value *getCondition() const { 3536 assert(isConditional() && "Cannot get condition of an uncond branch!"); 3537 return Op<-3>(); 3538 } 3539 3540 void setCondition(Value *V) { 3541 assert(isConditional() && "Cannot set condition of unconditional branch!"); 3542 Op<-3>() = V; 3543 } 3544 3545 unsigned getNumSuccessors() const { return 1+isConditional(); } 3546 3547 BasicBlock *getSuccessor(unsigned i) const { 3548 assert(i < getNumSuccessors() && "Successor # out of range for Branch!"); 3549 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get()); 3550 } 3551 3552 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 3553 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!"); 3554 *(&Op<-1>() - idx) = NewSucc; 3555 } 3556 3557 /// Swap the successors of this branch instruction. 3558 /// 3559 /// Swaps the successors of the branch instruction. This also swaps any 3560 /// branch weight metadata associated with the instruction so that it 3561 /// continues to map correctly to each operand. 3562 void swapSuccessors(); 3563 3564 iterator_range<succ_op_iterator> successors() { 3565 return make_range( 3566 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)), 3567 succ_op_iterator(value_op_end())); 3568 } 3569 3570 iterator_range<const_succ_op_iterator> successors() const { 3571 return make_range(const_succ_op_iterator( 3572 std::next(value_op_begin(), isConditional() ? 1 : 0)), 3573 const_succ_op_iterator(value_op_end())); 3574 } 3575 3576 // Methods for support type inquiry through isa, cast, and dyn_cast: 3577 static bool classof(const Instruction *I) { 3578 return (I->getOpcode() == Instruction::Br); 3579 } 3580 static bool classof(const Value *V) { 3581 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3582 } 3583 }; 3584 3585 template <> 3586 struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> { 3587 }; 3588 3589 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value) 3590 3591 //===----------------------------------------------------------------------===// 3592 // SwitchInst Class 3593 //===----------------------------------------------------------------------===// 3594 3595 //===--------------------------------------------------------------------------- 3596 /// Multiway switch 3597 /// 3598 class SwitchInst : public Instruction { 3599 unsigned ReservedSpace; 3600 3601 // Operand[0] = Value to switch on 3602 // Operand[1] = Default basic block destination 3603 // Operand[2n ] = Value to match 3604 // Operand[2n+1] = BasicBlock to go to on match 3605 SwitchInst(const SwitchInst &SI); 3606 3607 /// Create a new switch instruction, specifying a value to switch on and a 3608 /// default destination. The number of additional cases can be specified here 3609 /// to make memory allocation more efficient. This constructor can also 3610 /// auto-insert before another instruction. 3611 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3612 BasicBlock::iterator InsertBefore); 3613 3614 /// Create a new switch instruction, specifying a value to switch on and a 3615 /// default destination. The number of additional cases can be specified here 3616 /// to make memory allocation more efficient. This constructor can also 3617 /// auto-insert before another instruction. 3618 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3619 Instruction *InsertBefore); 3620 3621 /// Create a new switch instruction, specifying a value to switch on and a 3622 /// default destination. The number of additional cases can be specified here 3623 /// to make memory allocation more efficient. This constructor also 3624 /// auto-inserts at the end of the specified BasicBlock. 3625 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3626 BasicBlock *InsertAtEnd); 3627 3628 // allocate space for exactly zero operands 3629 void *operator new(size_t S) { return User::operator new(S); } 3630 3631 void init(Value *Value, BasicBlock *Default, unsigned NumReserved); 3632 void growOperands(); 3633 3634 protected: 3635 // Note: Instruction needs to be a friend here to call cloneImpl. 3636 friend class Instruction; 3637 3638 SwitchInst *cloneImpl() const; 3639 3640 public: 3641 void operator delete(void *Ptr) { User::operator delete(Ptr); } 3642 3643 // -2 3644 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1); 3645 3646 template <typename CaseHandleT> class CaseIteratorImpl; 3647 3648 /// A handle to a particular switch case. It exposes a convenient interface 3649 /// to both the case value and the successor block. 3650 /// 3651 /// We define this as a template and instantiate it to form both a const and 3652 /// non-const handle. 3653 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT> 3654 class CaseHandleImpl { 3655 // Directly befriend both const and non-const iterators. 3656 friend class SwitchInst::CaseIteratorImpl< 3657 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>; 3658 3659 protected: 3660 // Expose the switch type we're parameterized with to the iterator. 3661 using SwitchInstType = SwitchInstT; 3662 3663 SwitchInstT *SI; 3664 ptrdiff_t Index; 3665 3666 CaseHandleImpl() = default; 3667 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {} 3668 3669 public: 3670 /// Resolves case value for current case. 3671 ConstantIntT *getCaseValue() const { 3672 assert((unsigned)Index < SI->getNumCases() && 3673 "Index out the number of cases."); 3674 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2)); 3675 } 3676 3677 /// Resolves successor for current case. 3678 BasicBlockT *getCaseSuccessor() const { 3679 assert(((unsigned)Index < SI->getNumCases() || 3680 (unsigned)Index == DefaultPseudoIndex) && 3681 "Index out the number of cases."); 3682 return SI->getSuccessor(getSuccessorIndex()); 3683 } 3684 3685 /// Returns number of current case. 3686 unsigned getCaseIndex() const { return Index; } 3687 3688 /// Returns successor index for current case successor. 3689 unsigned getSuccessorIndex() const { 3690 assert(((unsigned)Index == DefaultPseudoIndex || 3691 (unsigned)Index < SI->getNumCases()) && 3692 "Index out the number of cases."); 3693 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0; 3694 } 3695 3696 bool operator==(const CaseHandleImpl &RHS) const { 3697 assert(SI == RHS.SI && "Incompatible operators."); 3698 return Index == RHS.Index; 3699 } 3700 }; 3701 3702 using ConstCaseHandle = 3703 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>; 3704 3705 class CaseHandle 3706 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> { 3707 friend class SwitchInst::CaseIteratorImpl<CaseHandle>; 3708 3709 public: 3710 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {} 3711 3712 /// Sets the new value for current case. 3713 void setValue(ConstantInt *V) const { 3714 assert((unsigned)Index < SI->getNumCases() && 3715 "Index out the number of cases."); 3716 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V)); 3717 } 3718 3719 /// Sets the new successor for current case. 3720 void setSuccessor(BasicBlock *S) const { 3721 SI->setSuccessor(getSuccessorIndex(), S); 3722 } 3723 }; 3724 3725 template <typename CaseHandleT> 3726 class CaseIteratorImpl 3727 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>, 3728 std::random_access_iterator_tag, 3729 const CaseHandleT> { 3730 using SwitchInstT = typename CaseHandleT::SwitchInstType; 3731 3732 CaseHandleT Case; 3733 3734 public: 3735 /// Default constructed iterator is in an invalid state until assigned to 3736 /// a case for a particular switch. 3737 CaseIteratorImpl() = default; 3738 3739 /// Initializes case iterator for given SwitchInst and for given 3740 /// case number. 3741 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {} 3742 3743 /// Initializes case iterator for given SwitchInst and for given 3744 /// successor index. 3745 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI, 3746 unsigned SuccessorIndex) { 3747 assert(SuccessorIndex < SI->getNumSuccessors() && 3748 "Successor index # out of range!"); 3749 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1) 3750 : CaseIteratorImpl(SI, DefaultPseudoIndex); 3751 } 3752 3753 /// Support converting to the const variant. This will be a no-op for const 3754 /// variant. 3755 operator CaseIteratorImpl<ConstCaseHandle>() const { 3756 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index); 3757 } 3758 3759 CaseIteratorImpl &operator+=(ptrdiff_t N) { 3760 // Check index correctness after addition. 3761 // Note: Index == getNumCases() means end(). 3762 assert(Case.Index + N >= 0 && 3763 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && 3764 "Case.Index out the number of cases."); 3765 Case.Index += N; 3766 return *this; 3767 } 3768 CaseIteratorImpl &operator-=(ptrdiff_t N) { 3769 // Check index correctness after subtraction. 3770 // Note: Case.Index == getNumCases() means end(). 3771 assert(Case.Index - N >= 0 && 3772 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && 3773 "Case.Index out the number of cases."); 3774 Case.Index -= N; 3775 return *this; 3776 } 3777 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const { 3778 assert(Case.SI == RHS.Case.SI && "Incompatible operators."); 3779 return Case.Index - RHS.Case.Index; 3780 } 3781 bool operator==(const CaseIteratorImpl &RHS) const { 3782 return Case == RHS.Case; 3783 } 3784 bool operator<(const CaseIteratorImpl &RHS) const { 3785 assert(Case.SI == RHS.Case.SI && "Incompatible operators."); 3786 return Case.Index < RHS.Case.Index; 3787 } 3788 const CaseHandleT &operator*() const { return Case; } 3789 }; 3790 3791 using CaseIt = CaseIteratorImpl<CaseHandle>; 3792 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>; 3793 3794 static SwitchInst *Create(Value *Value, BasicBlock *Default, 3795 unsigned NumCases, 3796 BasicBlock::iterator InsertBefore) { 3797 return new SwitchInst(Value, Default, NumCases, InsertBefore); 3798 } 3799 3800 static SwitchInst *Create(Value *Value, BasicBlock *Default, 3801 unsigned NumCases, 3802 Instruction *InsertBefore = nullptr) { 3803 return new SwitchInst(Value, Default, NumCases, InsertBefore); 3804 } 3805 3806 static SwitchInst *Create(Value *Value, BasicBlock *Default, 3807 unsigned NumCases, BasicBlock *InsertAtEnd) { 3808 return new SwitchInst(Value, Default, NumCases, InsertAtEnd); 3809 } 3810 3811 /// Provide fast operand accessors 3812 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3813 3814 // Accessor Methods for Switch stmt 3815 Value *getCondition() const { return getOperand(0); } 3816 void setCondition(Value *V) { setOperand(0, V); } 3817 3818 BasicBlock *getDefaultDest() const { 3819 return cast<BasicBlock>(getOperand(1)); 3820 } 3821 3822 /// Returns true if the default branch must result in immediate undefined 3823 /// behavior, false otherwise. 3824 bool defaultDestUndefined() const { 3825 return isa<UnreachableInst>(getDefaultDest()->getFirstNonPHIOrDbg()); 3826 } 3827 3828 void setDefaultDest(BasicBlock *DefaultCase) { 3829 setOperand(1, reinterpret_cast<Value*>(DefaultCase)); 3830 } 3831 3832 /// Return the number of 'cases' in this switch instruction, excluding the 3833 /// default case. 3834 unsigned getNumCases() const { 3835 return getNumOperands()/2 - 1; 3836 } 3837 3838 /// Returns a read/write iterator that points to the first case in the 3839 /// SwitchInst. 3840 CaseIt case_begin() { 3841 return CaseIt(this, 0); 3842 } 3843 3844 /// Returns a read-only iterator that points to the first case in the 3845 /// SwitchInst. 3846 ConstCaseIt case_begin() const { 3847 return ConstCaseIt(this, 0); 3848 } 3849 3850 /// Returns a read/write iterator that points one past the last in the 3851 /// SwitchInst. 3852 CaseIt case_end() { 3853 return CaseIt(this, getNumCases()); 3854 } 3855 3856 /// Returns a read-only iterator that points one past the last in the 3857 /// SwitchInst. 3858 ConstCaseIt case_end() const { 3859 return ConstCaseIt(this, getNumCases()); 3860 } 3861 3862 /// Iteration adapter for range-for loops. 3863 iterator_range<CaseIt> cases() { 3864 return make_range(case_begin(), case_end()); 3865 } 3866 3867 /// Constant iteration adapter for range-for loops. 3868 iterator_range<ConstCaseIt> cases() const { 3869 return make_range(case_begin(), case_end()); 3870 } 3871 3872 /// Returns an iterator that points to the default case. 3873 /// Note: this iterator allows to resolve successor only. Attempt 3874 /// to resolve case value causes an assertion. 3875 /// Also note, that increment and decrement also causes an assertion and 3876 /// makes iterator invalid. 3877 CaseIt case_default() { 3878 return CaseIt(this, DefaultPseudoIndex); 3879 } 3880 ConstCaseIt case_default() const { 3881 return ConstCaseIt(this, DefaultPseudoIndex); 3882 } 3883 3884 /// Search all of the case values for the specified constant. If it is 3885 /// explicitly handled, return the case iterator of it, otherwise return 3886 /// default case iterator to indicate that it is handled by the default 3887 /// handler. 3888 CaseIt findCaseValue(const ConstantInt *C) { 3889 return CaseIt( 3890 this, 3891 const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex()); 3892 } 3893 ConstCaseIt findCaseValue(const ConstantInt *C) const { 3894 ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) { 3895 return Case.getCaseValue() == C; 3896 }); 3897 if (I != case_end()) 3898 return I; 3899 3900 return case_default(); 3901 } 3902 3903 /// Finds the unique case value for a given successor. Returns null if the 3904 /// successor is not found, not unique, or is the default case. 3905 ConstantInt *findCaseDest(BasicBlock *BB) { 3906 if (BB == getDefaultDest()) 3907 return nullptr; 3908 3909 ConstantInt *CI = nullptr; 3910 for (auto Case : cases()) { 3911 if (Case.getCaseSuccessor() != BB) 3912 continue; 3913 3914 if (CI) 3915 return nullptr; // Multiple cases lead to BB. 3916 3917 CI = Case.getCaseValue(); 3918 } 3919 3920 return CI; 3921 } 3922 3923 /// Add an entry to the switch instruction. 3924 /// Note: 3925 /// This action invalidates case_end(). Old case_end() iterator will 3926 /// point to the added case. 3927 void addCase(ConstantInt *OnVal, BasicBlock *Dest); 3928 3929 /// This method removes the specified case and its successor from the switch 3930 /// instruction. Note that this operation may reorder the remaining cases at 3931 /// index idx and above. 3932 /// Note: 3933 /// This action invalidates iterators for all cases following the one removed, 3934 /// including the case_end() iterator. It returns an iterator for the next 3935 /// case. 3936 CaseIt removeCase(CaseIt I); 3937 3938 unsigned getNumSuccessors() const { return getNumOperands()/2; } 3939 BasicBlock *getSuccessor(unsigned idx) const { 3940 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!"); 3941 return cast<BasicBlock>(getOperand(idx*2+1)); 3942 } 3943 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 3944 assert(idx < getNumSuccessors() && "Successor # out of range for switch!"); 3945 setOperand(idx * 2 + 1, NewSucc); 3946 } 3947 3948 // Methods for support type inquiry through isa, cast, and dyn_cast: 3949 static bool classof(const Instruction *I) { 3950 return I->getOpcode() == Instruction::Switch; 3951 } 3952 static bool classof(const Value *V) { 3953 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3954 } 3955 }; 3956 3957 /// A wrapper class to simplify modification of SwitchInst cases along with 3958 /// their prof branch_weights metadata. 3959 class SwitchInstProfUpdateWrapper { 3960 SwitchInst &SI; 3961 std::optional<SmallVector<uint32_t, 8>> Weights; 3962 bool Changed = false; 3963 3964 protected: 3965 MDNode *buildProfBranchWeightsMD(); 3966 3967 void init(); 3968 3969 public: 3970 using CaseWeightOpt = std::optional<uint32_t>; 3971 SwitchInst *operator->() { return &SI; } 3972 SwitchInst &operator*() { return SI; } 3973 operator SwitchInst *() { return &SI; } 3974 3975 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); } 3976 3977 ~SwitchInstProfUpdateWrapper() { 3978 if (Changed) 3979 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD()); 3980 } 3981 3982 /// Delegate the call to the underlying SwitchInst::removeCase() and remove 3983 /// correspondent branch weight. 3984 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I); 3985 3986 /// Delegate the call to the underlying SwitchInst::addCase() and set the 3987 /// specified branch weight for the added case. 3988 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W); 3989 3990 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark 3991 /// this object to not touch the underlying SwitchInst in destructor. 3992 Instruction::InstListType::iterator eraseFromParent(); 3993 3994 void setSuccessorWeight(unsigned idx, CaseWeightOpt W); 3995 CaseWeightOpt getSuccessorWeight(unsigned idx); 3996 3997 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx); 3998 }; 3999 4000 template <> 4001 struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> { 4002 }; 4003 4004 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value) 4005 4006 //===----------------------------------------------------------------------===// 4007 // IndirectBrInst Class 4008 //===----------------------------------------------------------------------===// 4009 4010 //===--------------------------------------------------------------------------- 4011 /// Indirect Branch Instruction. 4012 /// 4013 class IndirectBrInst : public Instruction { 4014 unsigned ReservedSpace; 4015 4016 // Operand[0] = Address to jump to 4017 // Operand[n+1] = n-th destination 4018 IndirectBrInst(const IndirectBrInst &IBI); 4019 4020 /// Create a new indirectbr instruction, specifying an 4021 /// Address to jump to. The number of expected destinations can be specified 4022 /// here to make memory allocation more efficient. This constructor can also 4023 /// autoinsert before another instruction. 4024 IndirectBrInst(Value *Address, unsigned NumDests, 4025 BasicBlock::iterator InsertBefore); 4026 4027 /// Create a new indirectbr instruction, specifying an 4028 /// Address to jump to. The number of expected destinations can be specified 4029 /// here to make memory allocation more efficient. This constructor can also 4030 /// autoinsert before another instruction. 4031 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore); 4032 4033 /// Create a new indirectbr instruction, specifying an 4034 /// Address to jump to. The number of expected destinations can be specified 4035 /// here to make memory allocation more efficient. This constructor also 4036 /// autoinserts at the end of the specified BasicBlock. 4037 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd); 4038 4039 // allocate space for exactly zero operands 4040 void *operator new(size_t S) { return User::operator new(S); } 4041 4042 void init(Value *Address, unsigned NumDests); 4043 void growOperands(); 4044 4045 protected: 4046 // Note: Instruction needs to be a friend here to call cloneImpl. 4047 friend class Instruction; 4048 4049 IndirectBrInst *cloneImpl() const; 4050 4051 public: 4052 void operator delete(void *Ptr) { User::operator delete(Ptr); } 4053 4054 /// Iterator type that casts an operand to a basic block. 4055 /// 4056 /// This only makes sense because the successors are stored as adjacent 4057 /// operands for indirectbr instructions. 4058 struct succ_op_iterator 4059 : iterator_adaptor_base<succ_op_iterator, value_op_iterator, 4060 std::random_access_iterator_tag, BasicBlock *, 4061 ptrdiff_t, BasicBlock *, BasicBlock *> { 4062 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} 4063 4064 BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 4065 BasicBlock *operator->() const { return operator*(); } 4066 }; 4067 4068 /// The const version of `succ_op_iterator`. 4069 struct const_succ_op_iterator 4070 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, 4071 std::random_access_iterator_tag, 4072 const BasicBlock *, ptrdiff_t, const BasicBlock *, 4073 const BasicBlock *> { 4074 explicit const_succ_op_iterator(const_value_op_iterator I) 4075 : iterator_adaptor_base(I) {} 4076 4077 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 4078 const BasicBlock *operator->() const { return operator*(); } 4079 }; 4080 4081 static IndirectBrInst *Create(Value *Address, unsigned NumDests, 4082 BasicBlock::iterator InsertBefore) { 4083 return new IndirectBrInst(Address, NumDests, InsertBefore); 4084 } 4085 4086 static IndirectBrInst *Create(Value *Address, unsigned NumDests, 4087 Instruction *InsertBefore = nullptr) { 4088 return new IndirectBrInst(Address, NumDests, InsertBefore); 4089 } 4090 4091 static IndirectBrInst *Create(Value *Address, unsigned NumDests, 4092 BasicBlock *InsertAtEnd) { 4093 return new IndirectBrInst(Address, NumDests, InsertAtEnd); 4094 } 4095 4096 /// Provide fast operand accessors. 4097 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4098 4099 // Accessor Methods for IndirectBrInst instruction. 4100 Value *getAddress() { return getOperand(0); } 4101 const Value *getAddress() const { return getOperand(0); } 4102 void setAddress(Value *V) { setOperand(0, V); } 4103 4104 /// return the number of possible destinations in this 4105 /// indirectbr instruction. 4106 unsigned getNumDestinations() const { return getNumOperands()-1; } 4107 4108 /// Return the specified destination. 4109 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); } 4110 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); } 4111 4112 /// Add a destination. 4113 /// 4114 void addDestination(BasicBlock *Dest); 4115 4116 /// This method removes the specified successor from the 4117 /// indirectbr instruction. 4118 void removeDestination(unsigned i); 4119 4120 unsigned getNumSuccessors() const { return getNumOperands()-1; } 4121 BasicBlock *getSuccessor(unsigned i) const { 4122 return cast<BasicBlock>(getOperand(i+1)); 4123 } 4124 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 4125 setOperand(i + 1, NewSucc); 4126 } 4127 4128 iterator_range<succ_op_iterator> successors() { 4129 return make_range(succ_op_iterator(std::next(value_op_begin())), 4130 succ_op_iterator(value_op_end())); 4131 } 4132 4133 iterator_range<const_succ_op_iterator> successors() const { 4134 return make_range(const_succ_op_iterator(std::next(value_op_begin())), 4135 const_succ_op_iterator(value_op_end())); 4136 } 4137 4138 // Methods for support type inquiry through isa, cast, and dyn_cast: 4139 static bool classof(const Instruction *I) { 4140 return I->getOpcode() == Instruction::IndirectBr; 4141 } 4142 static bool classof(const Value *V) { 4143 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4144 } 4145 }; 4146 4147 template <> 4148 struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> { 4149 }; 4150 4151 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value) 4152 4153 //===----------------------------------------------------------------------===// 4154 // InvokeInst Class 4155 //===----------------------------------------------------------------------===// 4156 4157 /// Invoke instruction. The SubclassData field is used to hold the 4158 /// calling convention of the call. 4159 /// 4160 class InvokeInst : public CallBase { 4161 /// The number of operands for this call beyond the called function, 4162 /// arguments, and operand bundles. 4163 static constexpr int NumExtraOperands = 2; 4164 4165 /// The index from the end of the operand array to the normal destination. 4166 static constexpr int NormalDestOpEndIdx = -3; 4167 4168 /// The index from the end of the operand array to the unwind destination. 4169 static constexpr int UnwindDestOpEndIdx = -2; 4170 4171 InvokeInst(const InvokeInst &BI); 4172 4173 /// Construct an InvokeInst given a range of arguments. 4174 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4175 BasicBlock *IfException, ArrayRef<Value *> Args, 4176 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4177 const Twine &NameStr, BasicBlock::iterator InsertBefore); 4178 4179 /// Construct an InvokeInst given a range of arguments. 4180 /// 4181 /// Construct an InvokeInst from a range of arguments 4182 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4183 BasicBlock *IfException, ArrayRef<Value *> Args, 4184 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4185 const Twine &NameStr, Instruction *InsertBefore); 4186 4187 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4188 BasicBlock *IfException, ArrayRef<Value *> Args, 4189 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4190 const Twine &NameStr, BasicBlock *InsertAtEnd); 4191 4192 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4193 BasicBlock *IfException, ArrayRef<Value *> Args, 4194 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 4195 4196 /// Compute the number of operands to allocate. 4197 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { 4198 // We need one operand for the called function, plus our extra operands and 4199 // the input operand counts provided. 4200 return 1 + NumExtraOperands + NumArgs + NumBundleInputs; 4201 } 4202 4203 protected: 4204 // Note: Instruction needs to be a friend here to call cloneImpl. 4205 friend class Instruction; 4206 4207 InvokeInst *cloneImpl() const; 4208 4209 public: 4210 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4211 BasicBlock *IfException, ArrayRef<Value *> Args, 4212 const Twine &NameStr, 4213 BasicBlock::iterator InsertBefore) { 4214 int NumOperands = ComputeNumOperands(Args.size()); 4215 return new (NumOperands) 4216 InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt, 4217 NumOperands, NameStr, InsertBefore); 4218 } 4219 4220 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4221 BasicBlock *IfException, ArrayRef<Value *> Args, 4222 const Twine &NameStr, 4223 Instruction *InsertBefore = nullptr) { 4224 int NumOperands = ComputeNumOperands(Args.size()); 4225 return new (NumOperands) 4226 InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt, 4227 NumOperands, NameStr, InsertBefore); 4228 } 4229 4230 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4231 BasicBlock *IfException, ArrayRef<Value *> Args, 4232 ArrayRef<OperandBundleDef> Bundles, 4233 const Twine &NameStr, 4234 BasicBlock::iterator InsertBefore) { 4235 int NumOperands = 4236 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 4237 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 4238 4239 return new (NumOperands, DescriptorBytes) 4240 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, 4241 NameStr, InsertBefore); 4242 } 4243 4244 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4245 BasicBlock *IfException, ArrayRef<Value *> Args, 4246 ArrayRef<OperandBundleDef> Bundles = std::nullopt, 4247 const Twine &NameStr = "", 4248 Instruction *InsertBefore = nullptr) { 4249 int NumOperands = 4250 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 4251 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 4252 4253 return new (NumOperands, DescriptorBytes) 4254 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, 4255 NameStr, InsertBefore); 4256 } 4257 4258 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4259 BasicBlock *IfException, ArrayRef<Value *> Args, 4260 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4261 int NumOperands = ComputeNumOperands(Args.size()); 4262 return new (NumOperands) 4263 InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt, 4264 NumOperands, NameStr, InsertAtEnd); 4265 } 4266 4267 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4268 BasicBlock *IfException, ArrayRef<Value *> Args, 4269 ArrayRef<OperandBundleDef> Bundles, 4270 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4271 int NumOperands = 4272 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 4273 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 4274 4275 return new (NumOperands, DescriptorBytes) 4276 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, 4277 NameStr, InsertAtEnd); 4278 } 4279 4280 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 4281 BasicBlock *IfException, ArrayRef<Value *> Args, 4282 const Twine &NameStr, 4283 BasicBlock::iterator InsertBefore) { 4284 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 4285 IfException, Args, std::nullopt, NameStr, InsertBefore); 4286 } 4287 4288 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 4289 BasicBlock *IfException, ArrayRef<Value *> Args, 4290 const Twine &NameStr, 4291 Instruction *InsertBefore = nullptr) { 4292 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 4293 IfException, Args, std::nullopt, NameStr, InsertBefore); 4294 } 4295 4296 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 4297 BasicBlock *IfException, ArrayRef<Value *> Args, 4298 ArrayRef<OperandBundleDef> Bundles, 4299 const Twine &NameStr, 4300 BasicBlock::iterator InsertBefore) { 4301 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 4302 IfException, Args, Bundles, NameStr, InsertBefore); 4303 } 4304 4305 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 4306 BasicBlock *IfException, ArrayRef<Value *> Args, 4307 ArrayRef<OperandBundleDef> Bundles = std::nullopt, 4308 const Twine &NameStr = "", 4309 Instruction *InsertBefore = nullptr) { 4310 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 4311 IfException, Args, Bundles, NameStr, InsertBefore); 4312 } 4313 4314 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 4315 BasicBlock *IfException, ArrayRef<Value *> Args, 4316 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4317 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 4318 IfException, Args, NameStr, InsertAtEnd); 4319 } 4320 4321 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 4322 BasicBlock *IfException, ArrayRef<Value *> Args, 4323 ArrayRef<OperandBundleDef> Bundles, 4324 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4325 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 4326 IfException, Args, Bundles, NameStr, InsertAtEnd); 4327 } 4328 4329 /// Create a clone of \p II with a different set of operand bundles and 4330 /// insert it before \p InsertPt. 4331 /// 4332 /// The returned invoke instruction is identical to \p II in every way except 4333 /// that the operand bundles for the new instruction are set to the operand 4334 /// bundles in \p Bundles. 4335 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles, 4336 BasicBlock::iterator InsertPt); 4337 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles, 4338 Instruction *InsertPt = nullptr); 4339 4340 // get*Dest - Return the destination basic blocks... 4341 BasicBlock *getNormalDest() const { 4342 return cast<BasicBlock>(Op<NormalDestOpEndIdx>()); 4343 } 4344 BasicBlock *getUnwindDest() const { 4345 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>()); 4346 } 4347 void setNormalDest(BasicBlock *B) { 4348 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B); 4349 } 4350 void setUnwindDest(BasicBlock *B) { 4351 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B); 4352 } 4353 4354 /// Get the landingpad instruction from the landing pad 4355 /// block (the unwind destination). 4356 LandingPadInst *getLandingPadInst() const; 4357 4358 BasicBlock *getSuccessor(unsigned i) const { 4359 assert(i < 2 && "Successor # out of range for invoke!"); 4360 return i == 0 ? getNormalDest() : getUnwindDest(); 4361 } 4362 4363 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 4364 assert(i < 2 && "Successor # out of range for invoke!"); 4365 if (i == 0) 4366 setNormalDest(NewSucc); 4367 else 4368 setUnwindDest(NewSucc); 4369 } 4370 4371 unsigned getNumSuccessors() const { return 2; } 4372 4373 /// Updates profile metadata by scaling it by \p S / \p T. 4374 void updateProfWeight(uint64_t S, uint64_t T); 4375 4376 // Methods for support type inquiry through isa, cast, and dyn_cast: 4377 static bool classof(const Instruction *I) { 4378 return (I->getOpcode() == Instruction::Invoke); 4379 } 4380 static bool classof(const Value *V) { 4381 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4382 } 4383 4384 private: 4385 // Shadow Instruction::setInstructionSubclassData with a private forwarding 4386 // method so that subclasses cannot accidentally use it. 4387 template <typename Bitfield> 4388 void setSubclassData(typename Bitfield::Type Value) { 4389 Instruction::setSubclassData<Bitfield>(Value); 4390 } 4391 }; 4392 4393 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4394 BasicBlock *IfException, ArrayRef<Value *> Args, 4395 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4396 const Twine &NameStr, BasicBlock::iterator InsertBefore) 4397 : CallBase(Ty->getReturnType(), Instruction::Invoke, 4398 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 4399 InsertBefore) { 4400 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); 4401 } 4402 4403 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4404 BasicBlock *IfException, ArrayRef<Value *> Args, 4405 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4406 const Twine &NameStr, Instruction *InsertBefore) 4407 : CallBase(Ty->getReturnType(), Instruction::Invoke, 4408 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 4409 InsertBefore) { 4410 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); 4411 } 4412 4413 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4414 BasicBlock *IfException, ArrayRef<Value *> Args, 4415 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4416 const Twine &NameStr, BasicBlock *InsertAtEnd) 4417 : CallBase(Ty->getReturnType(), Instruction::Invoke, 4418 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 4419 InsertAtEnd) { 4420 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); 4421 } 4422 4423 //===----------------------------------------------------------------------===// 4424 // CallBrInst Class 4425 //===----------------------------------------------------------------------===// 4426 4427 /// CallBr instruction, tracking function calls that may not return control but 4428 /// instead transfer it to a third location. The SubclassData field is used to 4429 /// hold the calling convention of the call. 4430 /// 4431 class CallBrInst : public CallBase { 4432 4433 unsigned NumIndirectDests; 4434 4435 CallBrInst(const CallBrInst &BI); 4436 4437 /// Construct a CallBrInst given a range of arguments. 4438 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4439 ArrayRef<BasicBlock *> IndirectDests, 4440 ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles, 4441 int NumOperands, const Twine &NameStr, 4442 BasicBlock::iterator InsertBefore); 4443 4444 /// Construct a CallBrInst given a range of arguments. 4445 /// 4446 /// Construct a CallBrInst from a range of arguments 4447 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4448 ArrayRef<BasicBlock *> IndirectDests, 4449 ArrayRef<Value *> Args, 4450 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4451 const Twine &NameStr, Instruction *InsertBefore); 4452 4453 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4454 ArrayRef<BasicBlock *> IndirectDests, 4455 ArrayRef<Value *> Args, 4456 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4457 const Twine &NameStr, BasicBlock *InsertAtEnd); 4458 4459 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest, 4460 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args, 4461 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 4462 4463 /// Compute the number of operands to allocate. 4464 static int ComputeNumOperands(int NumArgs, int NumIndirectDests, 4465 int NumBundleInputs = 0) { 4466 // We need one operand for the called function, plus our extra operands and 4467 // the input operand counts provided. 4468 return 2 + NumIndirectDests + NumArgs + NumBundleInputs; 4469 } 4470 4471 protected: 4472 // Note: Instruction needs to be a friend here to call cloneImpl. 4473 friend class Instruction; 4474 4475 CallBrInst *cloneImpl() const; 4476 4477 public: 4478 static CallBrInst *Create(FunctionType *Ty, Value *Func, 4479 BasicBlock *DefaultDest, 4480 ArrayRef<BasicBlock *> IndirectDests, 4481 ArrayRef<Value *> Args, const Twine &NameStr, 4482 BasicBlock::iterator InsertBefore) { 4483 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); 4484 return new (NumOperands) 4485 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt, 4486 NumOperands, NameStr, InsertBefore); 4487 } 4488 4489 static CallBrInst *Create(FunctionType *Ty, Value *Func, 4490 BasicBlock *DefaultDest, 4491 ArrayRef<BasicBlock *> IndirectDests, 4492 ArrayRef<Value *> Args, const Twine &NameStr, 4493 Instruction *InsertBefore = nullptr) { 4494 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); 4495 return new (NumOperands) 4496 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt, 4497 NumOperands, NameStr, InsertBefore); 4498 } 4499 4500 static CallBrInst * 4501 Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4502 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args, 4503 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 4504 BasicBlock::iterator InsertBefore) { 4505 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), 4506 CountBundleInputs(Bundles)); 4507 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 4508 4509 return new (NumOperands, DescriptorBytes) 4510 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, 4511 NumOperands, NameStr, InsertBefore); 4512 } 4513 4514 static CallBrInst * 4515 Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4516 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args, 4517 ArrayRef<OperandBundleDef> Bundles = std::nullopt, 4518 const Twine &NameStr = "", Instruction *InsertBefore = nullptr) { 4519 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), 4520 CountBundleInputs(Bundles)); 4521 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 4522 4523 return new (NumOperands, DescriptorBytes) 4524 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, 4525 NumOperands, NameStr, InsertBefore); 4526 } 4527 4528 static CallBrInst *Create(FunctionType *Ty, Value *Func, 4529 BasicBlock *DefaultDest, 4530 ArrayRef<BasicBlock *> IndirectDests, 4531 ArrayRef<Value *> Args, const Twine &NameStr, 4532 BasicBlock *InsertAtEnd) { 4533 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); 4534 return new (NumOperands) 4535 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt, 4536 NumOperands, NameStr, InsertAtEnd); 4537 } 4538 4539 static CallBrInst *Create(FunctionType *Ty, Value *Func, 4540 BasicBlock *DefaultDest, 4541 ArrayRef<BasicBlock *> IndirectDests, 4542 ArrayRef<Value *> Args, 4543 ArrayRef<OperandBundleDef> Bundles, 4544 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4545 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), 4546 CountBundleInputs(Bundles)); 4547 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 4548 4549 return new (NumOperands, DescriptorBytes) 4550 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, 4551 NumOperands, NameStr, InsertAtEnd); 4552 } 4553 4554 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4555 ArrayRef<BasicBlock *> IndirectDests, 4556 ArrayRef<Value *> Args, const Twine &NameStr, 4557 BasicBlock::iterator InsertBefore) { 4558 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4559 IndirectDests, Args, NameStr, InsertBefore); 4560 } 4561 4562 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4563 ArrayRef<BasicBlock *> IndirectDests, 4564 ArrayRef<Value *> Args, const Twine &NameStr, 4565 Instruction *InsertBefore = nullptr) { 4566 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4567 IndirectDests, Args, NameStr, InsertBefore); 4568 } 4569 4570 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4571 ArrayRef<BasicBlock *> IndirectDests, 4572 ArrayRef<Value *> Args, 4573 ArrayRef<OperandBundleDef> Bundles, 4574 const Twine &NameStr, 4575 BasicBlock::iterator InsertBefore) { 4576 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4577 IndirectDests, Args, Bundles, NameStr, InsertBefore); 4578 } 4579 4580 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4581 ArrayRef<BasicBlock *> IndirectDests, 4582 ArrayRef<Value *> Args, 4583 ArrayRef<OperandBundleDef> Bundles = std::nullopt, 4584 const Twine &NameStr = "", 4585 Instruction *InsertBefore = nullptr) { 4586 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4587 IndirectDests, Args, Bundles, NameStr, InsertBefore); 4588 } 4589 4590 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4591 ArrayRef<BasicBlock *> IndirectDests, 4592 ArrayRef<Value *> Args, const Twine &NameStr, 4593 BasicBlock *InsertAtEnd) { 4594 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4595 IndirectDests, Args, NameStr, InsertAtEnd); 4596 } 4597 4598 static CallBrInst *Create(FunctionCallee Func, 4599 BasicBlock *DefaultDest, 4600 ArrayRef<BasicBlock *> IndirectDests, 4601 ArrayRef<Value *> Args, 4602 ArrayRef<OperandBundleDef> Bundles, 4603 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4604 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4605 IndirectDests, Args, Bundles, NameStr, InsertAtEnd); 4606 } 4607 4608 /// Create a clone of \p CBI with a different set of operand bundles and 4609 /// insert it before \p InsertPt. 4610 /// 4611 /// The returned callbr instruction is identical to \p CBI in every way 4612 /// except that the operand bundles for the new instruction are set to the 4613 /// operand bundles in \p Bundles. 4614 static CallBrInst *Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> Bundles, 4615 BasicBlock::iterator InsertPt); 4616 static CallBrInst *Create(CallBrInst *CBI, 4617 ArrayRef<OperandBundleDef> Bundles, 4618 Instruction *InsertPt = nullptr); 4619 4620 /// Return the number of callbr indirect dest labels. 4621 /// 4622 unsigned getNumIndirectDests() const { return NumIndirectDests; } 4623 4624 /// getIndirectDestLabel - Return the i-th indirect dest label. 4625 /// 4626 Value *getIndirectDestLabel(unsigned i) const { 4627 assert(i < getNumIndirectDests() && "Out of bounds!"); 4628 return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1); 4629 } 4630 4631 Value *getIndirectDestLabelUse(unsigned i) const { 4632 assert(i < getNumIndirectDests() && "Out of bounds!"); 4633 return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1); 4634 } 4635 4636 // Return the destination basic blocks... 4637 BasicBlock *getDefaultDest() const { 4638 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1)); 4639 } 4640 BasicBlock *getIndirectDest(unsigned i) const { 4641 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i)); 4642 } 4643 SmallVector<BasicBlock *, 16> getIndirectDests() const { 4644 SmallVector<BasicBlock *, 16> IndirectDests; 4645 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i) 4646 IndirectDests.push_back(getIndirectDest(i)); 4647 return IndirectDests; 4648 } 4649 void setDefaultDest(BasicBlock *B) { 4650 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B); 4651 } 4652 void setIndirectDest(unsigned i, BasicBlock *B) { 4653 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B); 4654 } 4655 4656 BasicBlock *getSuccessor(unsigned i) const { 4657 assert(i < getNumSuccessors() + 1 && 4658 "Successor # out of range for callbr!"); 4659 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1); 4660 } 4661 4662 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 4663 assert(i < getNumIndirectDests() + 1 && 4664 "Successor # out of range for callbr!"); 4665 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc); 4666 } 4667 4668 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; } 4669 4670 // Methods for support type inquiry through isa, cast, and dyn_cast: 4671 static bool classof(const Instruction *I) { 4672 return (I->getOpcode() == Instruction::CallBr); 4673 } 4674 static bool classof(const Value *V) { 4675 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4676 } 4677 4678 private: 4679 // Shadow Instruction::setInstructionSubclassData with a private forwarding 4680 // method so that subclasses cannot accidentally use it. 4681 template <typename Bitfield> 4682 void setSubclassData(typename Bitfield::Type Value) { 4683 Instruction::setSubclassData<Bitfield>(Value); 4684 } 4685 }; 4686 4687 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4688 ArrayRef<BasicBlock *> IndirectDests, 4689 ArrayRef<Value *> Args, 4690 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4691 const Twine &NameStr, BasicBlock::iterator InsertBefore) 4692 : CallBase(Ty->getReturnType(), Instruction::CallBr, 4693 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 4694 InsertBefore) { 4695 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); 4696 } 4697 4698 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4699 ArrayRef<BasicBlock *> IndirectDests, 4700 ArrayRef<Value *> Args, 4701 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4702 const Twine &NameStr, Instruction *InsertBefore) 4703 : CallBase(Ty->getReturnType(), Instruction::CallBr, 4704 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 4705 InsertBefore) { 4706 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); 4707 } 4708 4709 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4710 ArrayRef<BasicBlock *> IndirectDests, 4711 ArrayRef<Value *> Args, 4712 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4713 const Twine &NameStr, BasicBlock *InsertAtEnd) 4714 : CallBase(Ty->getReturnType(), Instruction::CallBr, 4715 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 4716 InsertAtEnd) { 4717 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); 4718 } 4719 4720 //===----------------------------------------------------------------------===// 4721 // ResumeInst Class 4722 //===----------------------------------------------------------------------===// 4723 4724 //===--------------------------------------------------------------------------- 4725 /// Resume the propagation of an exception. 4726 /// 4727 class ResumeInst : public Instruction { 4728 ResumeInst(const ResumeInst &RI); 4729 4730 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr); 4731 explicit ResumeInst(Value *Exn, BasicBlock::iterator InsertBefore); 4732 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd); 4733 4734 protected: 4735 // Note: Instruction needs to be a friend here to call cloneImpl. 4736 friend class Instruction; 4737 4738 ResumeInst *cloneImpl() const; 4739 4740 public: 4741 static ResumeInst *Create(Value *Exn, BasicBlock::iterator InsertBefore) { 4742 return new (1) ResumeInst(Exn, InsertBefore); 4743 } 4744 4745 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) { 4746 return new(1) ResumeInst(Exn, InsertBefore); 4747 } 4748 4749 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) { 4750 return new(1) ResumeInst(Exn, InsertAtEnd); 4751 } 4752 4753 /// Provide fast operand accessors 4754 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4755 4756 /// Convenience accessor. 4757 Value *getValue() const { return Op<0>(); } 4758 4759 unsigned getNumSuccessors() const { return 0; } 4760 4761 // Methods for support type inquiry through isa, cast, and dyn_cast: 4762 static bool classof(const Instruction *I) { 4763 return I->getOpcode() == Instruction::Resume; 4764 } 4765 static bool classof(const Value *V) { 4766 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4767 } 4768 4769 private: 4770 BasicBlock *getSuccessor(unsigned idx) const { 4771 llvm_unreachable("ResumeInst has no successors!"); 4772 } 4773 4774 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 4775 llvm_unreachable("ResumeInst has no successors!"); 4776 } 4777 }; 4778 4779 template <> 4780 struct OperandTraits<ResumeInst> : 4781 public FixedNumOperandTraits<ResumeInst, 1> { 4782 }; 4783 4784 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value) 4785 4786 //===----------------------------------------------------------------------===// 4787 // CatchSwitchInst Class 4788 //===----------------------------------------------------------------------===// 4789 class CatchSwitchInst : public Instruction { 4790 using UnwindDestField = BoolBitfieldElementT<0>; 4791 4792 /// The number of operands actually allocated. NumOperands is 4793 /// the number actually in use. 4794 unsigned ReservedSpace; 4795 4796 // Operand[0] = Outer scope 4797 // Operand[1] = Unwind block destination 4798 // Operand[n] = BasicBlock to go to on match 4799 CatchSwitchInst(const CatchSwitchInst &CSI); 4800 4801 /// Create a new switch instruction, specifying a 4802 /// default destination. The number of additional handlers can be specified 4803 /// here to make memory allocation more efficient. 4804 /// This constructor can also autoinsert before another instruction. 4805 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 4806 unsigned NumHandlers, const Twine &NameStr, 4807 BasicBlock::iterator InsertBefore); 4808 4809 /// Create a new switch instruction, specifying a 4810 /// default destination. The number of additional handlers can be specified 4811 /// here to make memory allocation more efficient. 4812 /// This constructor can also autoinsert before another instruction. 4813 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 4814 unsigned NumHandlers, const Twine &NameStr, 4815 Instruction *InsertBefore); 4816 4817 /// Create a new switch instruction, specifying a 4818 /// default destination. The number of additional handlers can be specified 4819 /// here to make memory allocation more efficient. 4820 /// This constructor also autoinserts at the end of the specified BasicBlock. 4821 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 4822 unsigned NumHandlers, const Twine &NameStr, 4823 BasicBlock *InsertAtEnd); 4824 4825 // allocate space for exactly zero operands 4826 void *operator new(size_t S) { return User::operator new(S); } 4827 4828 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved); 4829 void growOperands(unsigned Size); 4830 4831 protected: 4832 // Note: Instruction needs to be a friend here to call cloneImpl. 4833 friend class Instruction; 4834 4835 CatchSwitchInst *cloneImpl() const; 4836 4837 public: 4838 void operator delete(void *Ptr) { return User::operator delete(Ptr); } 4839 4840 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, 4841 unsigned NumHandlers, const Twine &NameStr, 4842 BasicBlock::iterator InsertBefore) { 4843 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, 4844 InsertBefore); 4845 } 4846 4847 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, 4848 unsigned NumHandlers, 4849 const Twine &NameStr = "", 4850 Instruction *InsertBefore = nullptr) { 4851 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, 4852 InsertBefore); 4853 } 4854 4855 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, 4856 unsigned NumHandlers, const Twine &NameStr, 4857 BasicBlock *InsertAtEnd) { 4858 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, 4859 InsertAtEnd); 4860 } 4861 4862 /// Provide fast operand accessors 4863 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4864 4865 // Accessor Methods for CatchSwitch stmt 4866 Value *getParentPad() const { return getOperand(0); } 4867 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); } 4868 4869 // Accessor Methods for CatchSwitch stmt 4870 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } 4871 bool unwindsToCaller() const { return !hasUnwindDest(); } 4872 BasicBlock *getUnwindDest() const { 4873 if (hasUnwindDest()) 4874 return cast<BasicBlock>(getOperand(1)); 4875 return nullptr; 4876 } 4877 void setUnwindDest(BasicBlock *UnwindDest) { 4878 assert(UnwindDest); 4879 assert(hasUnwindDest()); 4880 setOperand(1, UnwindDest); 4881 } 4882 4883 /// return the number of 'handlers' in this catchswitch 4884 /// instruction, except the default handler 4885 unsigned getNumHandlers() const { 4886 if (hasUnwindDest()) 4887 return getNumOperands() - 2; 4888 return getNumOperands() - 1; 4889 } 4890 4891 private: 4892 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); } 4893 static const BasicBlock *handler_helper(const Value *V) { 4894 return cast<BasicBlock>(V); 4895 } 4896 4897 public: 4898 using DerefFnTy = BasicBlock *(*)(Value *); 4899 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>; 4900 using handler_range = iterator_range<handler_iterator>; 4901 using ConstDerefFnTy = const BasicBlock *(*)(const Value *); 4902 using const_handler_iterator = 4903 mapped_iterator<const_op_iterator, ConstDerefFnTy>; 4904 using const_handler_range = iterator_range<const_handler_iterator>; 4905 4906 /// Returns an iterator that points to the first handler in CatchSwitchInst. 4907 handler_iterator handler_begin() { 4908 op_iterator It = op_begin() + 1; 4909 if (hasUnwindDest()) 4910 ++It; 4911 return handler_iterator(It, DerefFnTy(handler_helper)); 4912 } 4913 4914 /// Returns an iterator that points to the first handler in the 4915 /// CatchSwitchInst. 4916 const_handler_iterator handler_begin() const { 4917 const_op_iterator It = op_begin() + 1; 4918 if (hasUnwindDest()) 4919 ++It; 4920 return const_handler_iterator(It, ConstDerefFnTy(handler_helper)); 4921 } 4922 4923 /// Returns a read-only iterator that points one past the last 4924 /// handler in the CatchSwitchInst. 4925 handler_iterator handler_end() { 4926 return handler_iterator(op_end(), DerefFnTy(handler_helper)); 4927 } 4928 4929 /// Returns an iterator that points one past the last handler in the 4930 /// CatchSwitchInst. 4931 const_handler_iterator handler_end() const { 4932 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper)); 4933 } 4934 4935 /// iteration adapter for range-for loops. 4936 handler_range handlers() { 4937 return make_range(handler_begin(), handler_end()); 4938 } 4939 4940 /// iteration adapter for range-for loops. 4941 const_handler_range handlers() const { 4942 return make_range(handler_begin(), handler_end()); 4943 } 4944 4945 /// Add an entry to the switch instruction... 4946 /// Note: 4947 /// This action invalidates handler_end(). Old handler_end() iterator will 4948 /// point to the added handler. 4949 void addHandler(BasicBlock *Dest); 4950 4951 void removeHandler(handler_iterator HI); 4952 4953 unsigned getNumSuccessors() const { return getNumOperands() - 1; } 4954 BasicBlock *getSuccessor(unsigned Idx) const { 4955 assert(Idx < getNumSuccessors() && 4956 "Successor # out of range for catchswitch!"); 4957 return cast<BasicBlock>(getOperand(Idx + 1)); 4958 } 4959 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) { 4960 assert(Idx < getNumSuccessors() && 4961 "Successor # out of range for catchswitch!"); 4962 setOperand(Idx + 1, NewSucc); 4963 } 4964 4965 // Methods for support type inquiry through isa, cast, and dyn_cast: 4966 static bool classof(const Instruction *I) { 4967 return I->getOpcode() == Instruction::CatchSwitch; 4968 } 4969 static bool classof(const Value *V) { 4970 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4971 } 4972 }; 4973 4974 template <> 4975 struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {}; 4976 4977 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value) 4978 4979 //===----------------------------------------------------------------------===// 4980 // CleanupPadInst Class 4981 //===----------------------------------------------------------------------===// 4982 class CleanupPadInst : public FuncletPadInst { 4983 private: 4984 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, 4985 unsigned Values, const Twine &NameStr, 4986 BasicBlock::iterator InsertBefore) 4987 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, 4988 NameStr, InsertBefore) {} 4989 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, 4990 unsigned Values, const Twine &NameStr, 4991 Instruction *InsertBefore) 4992 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, 4993 NameStr, InsertBefore) {} 4994 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, 4995 unsigned Values, const Twine &NameStr, 4996 BasicBlock *InsertAtEnd) 4997 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, 4998 NameStr, InsertAtEnd) {} 4999 5000 public: 5001 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args, 5002 const Twine &NameStr, 5003 BasicBlock::iterator InsertBefore) { 5004 unsigned Values = 1 + Args.size(); 5005 return new (Values) 5006 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore); 5007 } 5008 5009 static CleanupPadInst *Create(Value *ParentPad, 5010 ArrayRef<Value *> Args = std::nullopt, 5011 const Twine &NameStr = "", 5012 Instruction *InsertBefore = nullptr) { 5013 unsigned Values = 1 + Args.size(); 5014 return new (Values) 5015 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore); 5016 } 5017 5018 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args, 5019 const Twine &NameStr, BasicBlock *InsertAtEnd) { 5020 unsigned Values = 1 + Args.size(); 5021 return new (Values) 5022 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd); 5023 } 5024 5025 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5026 static bool classof(const Instruction *I) { 5027 return I->getOpcode() == Instruction::CleanupPad; 5028 } 5029 static bool classof(const Value *V) { 5030 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5031 } 5032 }; 5033 5034 //===----------------------------------------------------------------------===// 5035 // CatchPadInst Class 5036 //===----------------------------------------------------------------------===// 5037 class CatchPadInst : public FuncletPadInst { 5038 private: 5039 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, 5040 unsigned Values, const Twine &NameStr, 5041 BasicBlock::iterator InsertBefore) 5042 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, 5043 NameStr, InsertBefore) {} 5044 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, 5045 unsigned Values, const Twine &NameStr, 5046 Instruction *InsertBefore) 5047 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, 5048 NameStr, InsertBefore) {} 5049 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, 5050 unsigned Values, const Twine &NameStr, 5051 BasicBlock *InsertAtEnd) 5052 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, 5053 NameStr, InsertAtEnd) {} 5054 5055 public: 5056 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, 5057 const Twine &NameStr, 5058 BasicBlock::iterator InsertBefore) { 5059 unsigned Values = 1 + Args.size(); 5060 return new (Values) 5061 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore); 5062 } 5063 5064 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, 5065 const Twine &NameStr = "", 5066 Instruction *InsertBefore = nullptr) { 5067 unsigned Values = 1 + Args.size(); 5068 return new (Values) 5069 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore); 5070 } 5071 5072 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, 5073 const Twine &NameStr, BasicBlock *InsertAtEnd) { 5074 unsigned Values = 1 + Args.size(); 5075 return new (Values) 5076 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd); 5077 } 5078 5079 /// Convenience accessors 5080 CatchSwitchInst *getCatchSwitch() const { 5081 return cast<CatchSwitchInst>(Op<-1>()); 5082 } 5083 void setCatchSwitch(Value *CatchSwitch) { 5084 assert(CatchSwitch); 5085 Op<-1>() = CatchSwitch; 5086 } 5087 5088 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5089 static bool classof(const Instruction *I) { 5090 return I->getOpcode() == Instruction::CatchPad; 5091 } 5092 static bool classof(const Value *V) { 5093 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5094 } 5095 }; 5096 5097 //===----------------------------------------------------------------------===// 5098 // CatchReturnInst Class 5099 //===----------------------------------------------------------------------===// 5100 5101 class CatchReturnInst : public Instruction { 5102 CatchReturnInst(const CatchReturnInst &RI); 5103 CatchReturnInst(Value *CatchPad, BasicBlock *BB, 5104 BasicBlock::iterator InsertBefore); 5105 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore); 5106 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd); 5107 5108 void init(Value *CatchPad, BasicBlock *BB); 5109 5110 protected: 5111 // Note: Instruction needs to be a friend here to call cloneImpl. 5112 friend class Instruction; 5113 5114 CatchReturnInst *cloneImpl() const; 5115 5116 public: 5117 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, 5118 BasicBlock::iterator InsertBefore) { 5119 assert(CatchPad); 5120 assert(BB); 5121 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore); 5122 } 5123 5124 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, 5125 Instruction *InsertBefore = nullptr) { 5126 assert(CatchPad); 5127 assert(BB); 5128 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore); 5129 } 5130 5131 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, 5132 BasicBlock *InsertAtEnd) { 5133 assert(CatchPad); 5134 assert(BB); 5135 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd); 5136 } 5137 5138 /// Provide fast operand accessors 5139 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 5140 5141 /// Convenience accessors. 5142 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); } 5143 void setCatchPad(CatchPadInst *CatchPad) { 5144 assert(CatchPad); 5145 Op<0>() = CatchPad; 5146 } 5147 5148 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); } 5149 void setSuccessor(BasicBlock *NewSucc) { 5150 assert(NewSucc); 5151 Op<1>() = NewSucc; 5152 } 5153 unsigned getNumSuccessors() const { return 1; } 5154 5155 /// Get the parentPad of this catchret's catchpad's catchswitch. 5156 /// The successor block is implicitly a member of this funclet. 5157 Value *getCatchSwitchParentPad() const { 5158 return getCatchPad()->getCatchSwitch()->getParentPad(); 5159 } 5160 5161 // Methods for support type inquiry through isa, cast, and dyn_cast: 5162 static bool classof(const Instruction *I) { 5163 return (I->getOpcode() == Instruction::CatchRet); 5164 } 5165 static bool classof(const Value *V) { 5166 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5167 } 5168 5169 private: 5170 BasicBlock *getSuccessor(unsigned Idx) const { 5171 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!"); 5172 return getSuccessor(); 5173 } 5174 5175 void setSuccessor(unsigned Idx, BasicBlock *B) { 5176 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!"); 5177 setSuccessor(B); 5178 } 5179 }; 5180 5181 template <> 5182 struct OperandTraits<CatchReturnInst> 5183 : public FixedNumOperandTraits<CatchReturnInst, 2> {}; 5184 5185 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value) 5186 5187 //===----------------------------------------------------------------------===// 5188 // CleanupReturnInst Class 5189 //===----------------------------------------------------------------------===// 5190 5191 class CleanupReturnInst : public Instruction { 5192 using UnwindDestField = BoolBitfieldElementT<0>; 5193 5194 private: 5195 CleanupReturnInst(const CleanupReturnInst &RI); 5196 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, 5197 BasicBlock::iterator InsertBefore); 5198 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, 5199 Instruction *InsertBefore = nullptr); 5200 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, 5201 BasicBlock *InsertAtEnd); 5202 5203 void init(Value *CleanupPad, BasicBlock *UnwindBB); 5204 5205 protected: 5206 // Note: Instruction needs to be a friend here to call cloneImpl. 5207 friend class Instruction; 5208 5209 CleanupReturnInst *cloneImpl() const; 5210 5211 public: 5212 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB, 5213 BasicBlock::iterator InsertBefore) { 5214 assert(CleanupPad); 5215 unsigned Values = 1; 5216 if (UnwindBB) 5217 ++Values; 5218 return new (Values) 5219 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore); 5220 } 5221 5222 static CleanupReturnInst *Create(Value *CleanupPad, 5223 BasicBlock *UnwindBB = nullptr, 5224 Instruction *InsertBefore = nullptr) { 5225 assert(CleanupPad); 5226 unsigned Values = 1; 5227 if (UnwindBB) 5228 ++Values; 5229 return new (Values) 5230 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore); 5231 } 5232 5233 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB, 5234 BasicBlock *InsertAtEnd) { 5235 assert(CleanupPad); 5236 unsigned Values = 1; 5237 if (UnwindBB) 5238 ++Values; 5239 return new (Values) 5240 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd); 5241 } 5242 5243 /// Provide fast operand accessors 5244 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 5245 5246 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } 5247 bool unwindsToCaller() const { return !hasUnwindDest(); } 5248 5249 /// Convenience accessor. 5250 CleanupPadInst *getCleanupPad() const { 5251 return cast<CleanupPadInst>(Op<0>()); 5252 } 5253 void setCleanupPad(CleanupPadInst *CleanupPad) { 5254 assert(CleanupPad); 5255 Op<0>() = CleanupPad; 5256 } 5257 5258 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; } 5259 5260 BasicBlock *getUnwindDest() const { 5261 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr; 5262 } 5263 void setUnwindDest(BasicBlock *NewDest) { 5264 assert(NewDest); 5265 assert(hasUnwindDest()); 5266 Op<1>() = NewDest; 5267 } 5268 5269 // Methods for support type inquiry through isa, cast, and dyn_cast: 5270 static bool classof(const Instruction *I) { 5271 return (I->getOpcode() == Instruction::CleanupRet); 5272 } 5273 static bool classof(const Value *V) { 5274 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5275 } 5276 5277 private: 5278 BasicBlock *getSuccessor(unsigned Idx) const { 5279 assert(Idx == 0); 5280 return getUnwindDest(); 5281 } 5282 5283 void setSuccessor(unsigned Idx, BasicBlock *B) { 5284 assert(Idx == 0); 5285 setUnwindDest(B); 5286 } 5287 5288 // Shadow Instruction::setInstructionSubclassData with a private forwarding 5289 // method so that subclasses cannot accidentally use it. 5290 template <typename Bitfield> 5291 void setSubclassData(typename Bitfield::Type Value) { 5292 Instruction::setSubclassData<Bitfield>(Value); 5293 } 5294 }; 5295 5296 template <> 5297 struct OperandTraits<CleanupReturnInst> 5298 : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {}; 5299 5300 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value) 5301 5302 //===----------------------------------------------------------------------===// 5303 // UnreachableInst Class 5304 //===----------------------------------------------------------------------===// 5305 5306 //===--------------------------------------------------------------------------- 5307 /// This function has undefined behavior. In particular, the 5308 /// presence of this instruction indicates some higher level knowledge that the 5309 /// end of the block cannot be reached. 5310 /// 5311 class UnreachableInst : public Instruction { 5312 protected: 5313 // Note: Instruction needs to be a friend here to call cloneImpl. 5314 friend class Instruction; 5315 5316 UnreachableInst *cloneImpl() const; 5317 5318 public: 5319 explicit UnreachableInst(LLVMContext &C, BasicBlock::iterator InsertBefore); 5320 explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr); 5321 explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd); 5322 5323 // allocate space for exactly zero operands 5324 void *operator new(size_t S) { return User::operator new(S, 0); } 5325 void operator delete(void *Ptr) { User::operator delete(Ptr); } 5326 5327 unsigned getNumSuccessors() const { return 0; } 5328 5329 // Methods for support type inquiry through isa, cast, and dyn_cast: 5330 static bool classof(const Instruction *I) { 5331 return I->getOpcode() == Instruction::Unreachable; 5332 } 5333 static bool classof(const Value *V) { 5334 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5335 } 5336 5337 private: 5338 BasicBlock *getSuccessor(unsigned idx) const { 5339 llvm_unreachable("UnreachableInst has no successors!"); 5340 } 5341 5342 void setSuccessor(unsigned idx, BasicBlock *B) { 5343 llvm_unreachable("UnreachableInst has no successors!"); 5344 } 5345 }; 5346 5347 //===----------------------------------------------------------------------===// 5348 // TruncInst Class 5349 //===----------------------------------------------------------------------===// 5350 5351 /// This class represents a truncation of integer types. 5352 class TruncInst : public CastInst { 5353 protected: 5354 // Note: Instruction needs to be a friend here to call cloneImpl. 5355 friend class Instruction; 5356 5357 /// Clone an identical TruncInst 5358 TruncInst *cloneImpl() const; 5359 5360 public: 5361 enum { AnyWrap = 0, NoUnsignedWrap = (1 << 0), NoSignedWrap = (1 << 1) }; 5362 5363 /// Constructor with insert-before-instruction semantics 5364 TruncInst( 5365 Value *S, ///< The value to be truncated 5366 Type *Ty, ///< The (smaller) type to truncate to 5367 const Twine &NameStr, ///< A name for the new instruction 5368 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5369 ); 5370 5371 /// Constructor with insert-before-instruction semantics 5372 TruncInst( 5373 Value *S, ///< The value to be truncated 5374 Type *Ty, ///< The (smaller) type to truncate to 5375 const Twine &NameStr = "", ///< A name for the new instruction 5376 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5377 ); 5378 5379 /// Constructor with insert-at-end-of-block semantics 5380 TruncInst( 5381 Value *S, ///< The value to be truncated 5382 Type *Ty, ///< The (smaller) type to truncate to 5383 const Twine &NameStr, ///< A name for the new instruction 5384 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5385 ); 5386 5387 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5388 static bool classof(const Instruction *I) { 5389 return I->getOpcode() == Trunc; 5390 } 5391 static bool classof(const Value *V) { 5392 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5393 } 5394 5395 void setHasNoUnsignedWrap(bool B) { 5396 SubclassOptionalData = 5397 (SubclassOptionalData & ~NoUnsignedWrap) | (B * NoUnsignedWrap); 5398 } 5399 void setHasNoSignedWrap(bool B) { 5400 SubclassOptionalData = 5401 (SubclassOptionalData & ~NoSignedWrap) | (B * NoSignedWrap); 5402 } 5403 5404 /// Test whether this operation is known to never 5405 /// undergo unsigned overflow, aka the nuw property. 5406 bool hasNoUnsignedWrap() const { 5407 return SubclassOptionalData & NoUnsignedWrap; 5408 } 5409 5410 /// Test whether this operation is known to never 5411 /// undergo signed overflow, aka the nsw property. 5412 bool hasNoSignedWrap() const { 5413 return (SubclassOptionalData & NoSignedWrap) != 0; 5414 } 5415 5416 /// Returns the no-wrap kind of the operation. 5417 unsigned getNoWrapKind() const { 5418 unsigned NoWrapKind = 0; 5419 if (hasNoUnsignedWrap()) 5420 NoWrapKind |= NoUnsignedWrap; 5421 5422 if (hasNoSignedWrap()) 5423 NoWrapKind |= NoSignedWrap; 5424 5425 return NoWrapKind; 5426 } 5427 }; 5428 5429 //===----------------------------------------------------------------------===// 5430 // ZExtInst Class 5431 //===----------------------------------------------------------------------===// 5432 5433 /// This class represents zero extension of integer types. 5434 class ZExtInst : public CastInst { 5435 protected: 5436 // Note: Instruction needs to be a friend here to call cloneImpl. 5437 friend class Instruction; 5438 5439 /// Clone an identical ZExtInst 5440 ZExtInst *cloneImpl() const; 5441 5442 public: 5443 /// Constructor with insert-before-instruction semantics 5444 ZExtInst( 5445 Value *S, ///< The value to be zero extended 5446 Type *Ty, ///< The type to zero extend to 5447 const Twine &NameStr, ///< A name for the new instruction 5448 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5449 ); 5450 5451 /// Constructor with insert-before-instruction semantics 5452 ZExtInst( 5453 Value *S, ///< The value to be zero extended 5454 Type *Ty, ///< The type to zero extend to 5455 const Twine &NameStr = "", ///< A name for the new instruction 5456 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5457 ); 5458 5459 /// Constructor with insert-at-end semantics. 5460 ZExtInst( 5461 Value *S, ///< The value to be zero extended 5462 Type *Ty, ///< The type to zero extend to 5463 const Twine &NameStr, ///< A name for the new instruction 5464 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5465 ); 5466 5467 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5468 static bool classof(const Instruction *I) { 5469 return I->getOpcode() == ZExt; 5470 } 5471 static bool classof(const Value *V) { 5472 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5473 } 5474 }; 5475 5476 //===----------------------------------------------------------------------===// 5477 // SExtInst Class 5478 //===----------------------------------------------------------------------===// 5479 5480 /// This class represents a sign extension of integer types. 5481 class SExtInst : public CastInst { 5482 protected: 5483 // Note: Instruction needs to be a friend here to call cloneImpl. 5484 friend class Instruction; 5485 5486 /// Clone an identical SExtInst 5487 SExtInst *cloneImpl() const; 5488 5489 public: 5490 /// Constructor with insert-before-instruction semantics 5491 SExtInst( 5492 Value *S, ///< The value to be sign extended 5493 Type *Ty, ///< The type to sign extend to 5494 const Twine &NameStr, ///< A name for the new instruction 5495 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5496 ); 5497 5498 /// Constructor with insert-before-instruction semantics 5499 SExtInst( 5500 Value *S, ///< The value to be sign extended 5501 Type *Ty, ///< The type to sign extend to 5502 const Twine &NameStr = "", ///< A name for the new instruction 5503 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5504 ); 5505 5506 /// Constructor with insert-at-end-of-block semantics 5507 SExtInst( 5508 Value *S, ///< The value to be sign extended 5509 Type *Ty, ///< The type to sign extend to 5510 const Twine &NameStr, ///< A name for the new instruction 5511 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5512 ); 5513 5514 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5515 static bool classof(const Instruction *I) { 5516 return I->getOpcode() == SExt; 5517 } 5518 static bool classof(const Value *V) { 5519 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5520 } 5521 }; 5522 5523 //===----------------------------------------------------------------------===// 5524 // FPTruncInst Class 5525 //===----------------------------------------------------------------------===// 5526 5527 /// This class represents a truncation of floating point types. 5528 class FPTruncInst : public CastInst { 5529 protected: 5530 // Note: Instruction needs to be a friend here to call cloneImpl. 5531 friend class Instruction; 5532 5533 /// Clone an identical FPTruncInst 5534 FPTruncInst *cloneImpl() const; 5535 5536 public: 5537 /// Constructor with insert-before-instruction semantics 5538 FPTruncInst( 5539 Value *S, ///< The value to be truncated 5540 Type *Ty, ///< The type to truncate to 5541 const Twine &NameStr, ///< A name for the new instruction 5542 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5543 ); 5544 5545 /// Constructor with insert-before-instruction semantics 5546 FPTruncInst( 5547 Value *S, ///< The value to be truncated 5548 Type *Ty, ///< The type to truncate to 5549 const Twine &NameStr = "", ///< A name for the new instruction 5550 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5551 ); 5552 5553 /// Constructor with insert-before-instruction semantics 5554 FPTruncInst( 5555 Value *S, ///< The value to be truncated 5556 Type *Ty, ///< The type to truncate to 5557 const Twine &NameStr, ///< A name for the new instruction 5558 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5559 ); 5560 5561 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5562 static bool classof(const Instruction *I) { 5563 return I->getOpcode() == FPTrunc; 5564 } 5565 static bool classof(const Value *V) { 5566 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5567 } 5568 }; 5569 5570 //===----------------------------------------------------------------------===// 5571 // FPExtInst Class 5572 //===----------------------------------------------------------------------===// 5573 5574 /// This class represents an extension of floating point types. 5575 class FPExtInst : public CastInst { 5576 protected: 5577 // Note: Instruction needs to be a friend here to call cloneImpl. 5578 friend class Instruction; 5579 5580 /// Clone an identical FPExtInst 5581 FPExtInst *cloneImpl() const; 5582 5583 public: 5584 /// Constructor with insert-before-instruction semantics 5585 FPExtInst( 5586 Value *S, ///< The value to be extended 5587 Type *Ty, ///< The type to extend to 5588 const Twine &NameStr, ///< A name for the new instruction 5589 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5590 ); 5591 5592 /// Constructor with insert-before-instruction semantics 5593 FPExtInst( 5594 Value *S, ///< The value to be extended 5595 Type *Ty, ///< The type to extend to 5596 const Twine &NameStr = "", ///< A name for the new instruction 5597 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5598 ); 5599 5600 /// Constructor with insert-at-end-of-block semantics 5601 FPExtInst( 5602 Value *S, ///< The value to be extended 5603 Type *Ty, ///< The type to extend to 5604 const Twine &NameStr, ///< A name for the new instruction 5605 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5606 ); 5607 5608 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5609 static bool classof(const Instruction *I) { 5610 return I->getOpcode() == FPExt; 5611 } 5612 static bool classof(const Value *V) { 5613 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5614 } 5615 }; 5616 5617 //===----------------------------------------------------------------------===// 5618 // UIToFPInst Class 5619 //===----------------------------------------------------------------------===// 5620 5621 /// This class represents a cast unsigned integer to floating point. 5622 class UIToFPInst : public CastInst { 5623 protected: 5624 // Note: Instruction needs to be a friend here to call cloneImpl. 5625 friend class Instruction; 5626 5627 /// Clone an identical UIToFPInst 5628 UIToFPInst *cloneImpl() const; 5629 5630 public: 5631 /// Constructor with insert-before-instruction semantics 5632 UIToFPInst( 5633 Value *S, ///< The value to be converted 5634 Type *Ty, ///< The type to convert to 5635 const Twine &NameStr, ///< A name for the new instruction 5636 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5637 ); 5638 5639 /// Constructor with insert-before-instruction semantics 5640 UIToFPInst( 5641 Value *S, ///< The value to be converted 5642 Type *Ty, ///< The type to convert to 5643 const Twine &NameStr = "", ///< A name for the new instruction 5644 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5645 ); 5646 5647 /// Constructor with insert-at-end-of-block semantics 5648 UIToFPInst( 5649 Value *S, ///< The value to be converted 5650 Type *Ty, ///< The type to convert to 5651 const Twine &NameStr, ///< A name for the new instruction 5652 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5653 ); 5654 5655 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5656 static bool classof(const Instruction *I) { 5657 return I->getOpcode() == UIToFP; 5658 } 5659 static bool classof(const Value *V) { 5660 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5661 } 5662 }; 5663 5664 //===----------------------------------------------------------------------===// 5665 // SIToFPInst Class 5666 //===----------------------------------------------------------------------===// 5667 5668 /// This class represents a cast from signed integer to floating point. 5669 class SIToFPInst : public CastInst { 5670 protected: 5671 // Note: Instruction needs to be a friend here to call cloneImpl. 5672 friend class Instruction; 5673 5674 /// Clone an identical SIToFPInst 5675 SIToFPInst *cloneImpl() const; 5676 5677 public: 5678 /// Constructor with insert-before-instruction semantics 5679 SIToFPInst( 5680 Value *S, ///< The value to be converted 5681 Type *Ty, ///< The type to convert to 5682 const Twine &NameStr, ///< A name for the new instruction 5683 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5684 ); 5685 5686 /// Constructor with insert-before-instruction semantics 5687 SIToFPInst( 5688 Value *S, ///< The value to be converted 5689 Type *Ty, ///< The type to convert to 5690 const Twine &NameStr = "", ///< A name for the new instruction 5691 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5692 ); 5693 5694 /// Constructor with insert-at-end-of-block semantics 5695 SIToFPInst( 5696 Value *S, ///< The value to be converted 5697 Type *Ty, ///< The type to convert to 5698 const Twine &NameStr, ///< A name for the new instruction 5699 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5700 ); 5701 5702 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5703 static bool classof(const Instruction *I) { 5704 return I->getOpcode() == SIToFP; 5705 } 5706 static bool classof(const Value *V) { 5707 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5708 } 5709 }; 5710 5711 //===----------------------------------------------------------------------===// 5712 // FPToUIInst Class 5713 //===----------------------------------------------------------------------===// 5714 5715 /// This class represents a cast from floating point to unsigned integer 5716 class FPToUIInst : public CastInst { 5717 protected: 5718 // Note: Instruction needs to be a friend here to call cloneImpl. 5719 friend class Instruction; 5720 5721 /// Clone an identical FPToUIInst 5722 FPToUIInst *cloneImpl() const; 5723 5724 public: 5725 /// Constructor with insert-before-instruction semantics 5726 FPToUIInst( 5727 Value *S, ///< The value to be converted 5728 Type *Ty, ///< The type to convert to 5729 const Twine &NameStr, ///< A name for the new instruction 5730 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5731 ); 5732 5733 /// Constructor with insert-before-instruction semantics 5734 FPToUIInst( 5735 Value *S, ///< The value to be converted 5736 Type *Ty, ///< The type to convert to 5737 const Twine &NameStr = "", ///< A name for the new instruction 5738 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5739 ); 5740 5741 /// Constructor with insert-at-end-of-block semantics 5742 FPToUIInst( 5743 Value *S, ///< The value to be converted 5744 Type *Ty, ///< The type to convert to 5745 const Twine &NameStr, ///< A name for the new instruction 5746 BasicBlock *InsertAtEnd ///< Where to insert the new instruction 5747 ); 5748 5749 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5750 static bool classof(const Instruction *I) { 5751 return I->getOpcode() == FPToUI; 5752 } 5753 static bool classof(const Value *V) { 5754 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5755 } 5756 }; 5757 5758 //===----------------------------------------------------------------------===// 5759 // FPToSIInst Class 5760 //===----------------------------------------------------------------------===// 5761 5762 /// This class represents a cast from floating point to signed integer. 5763 class FPToSIInst : public CastInst { 5764 protected: 5765 // Note: Instruction needs to be a friend here to call cloneImpl. 5766 friend class Instruction; 5767 5768 /// Clone an identical FPToSIInst 5769 FPToSIInst *cloneImpl() const; 5770 5771 public: 5772 /// Constructor with insert-before-instruction semantics 5773 FPToSIInst( 5774 Value *S, ///< The value to be converted 5775 Type *Ty, ///< The type to convert to 5776 const Twine &NameStr, ///< A name for the new instruction 5777 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5778 ); 5779 5780 /// Constructor with insert-before-instruction semantics 5781 FPToSIInst( 5782 Value *S, ///< The value to be converted 5783 Type *Ty, ///< The type to convert to 5784 const Twine &NameStr = "", ///< A name for the new instruction 5785 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5786 ); 5787 5788 /// Constructor with insert-at-end-of-block semantics 5789 FPToSIInst( 5790 Value *S, ///< The value to be converted 5791 Type *Ty, ///< The type to convert to 5792 const Twine &NameStr, ///< A name for the new instruction 5793 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5794 ); 5795 5796 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5797 static bool classof(const Instruction *I) { 5798 return I->getOpcode() == FPToSI; 5799 } 5800 static bool classof(const Value *V) { 5801 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5802 } 5803 }; 5804 5805 //===----------------------------------------------------------------------===// 5806 // IntToPtrInst Class 5807 //===----------------------------------------------------------------------===// 5808 5809 /// This class represents a cast from an integer to a pointer. 5810 class IntToPtrInst : public CastInst { 5811 public: 5812 // Note: Instruction needs to be a friend here to call cloneImpl. 5813 friend class Instruction; 5814 5815 /// Constructor with insert-before-instruction semantics 5816 IntToPtrInst( 5817 Value *S, ///< The value to be converted 5818 Type *Ty, ///< The type to convert to 5819 const Twine &NameStr, ///< A name for the new instruction 5820 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5821 ); 5822 5823 /// Constructor with insert-before-instruction semantics 5824 IntToPtrInst( 5825 Value *S, ///< The value to be converted 5826 Type *Ty, ///< The type to convert to 5827 const Twine &NameStr = "", ///< A name for the new instruction 5828 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5829 ); 5830 5831 /// Constructor with insert-at-end-of-block semantics 5832 IntToPtrInst( 5833 Value *S, ///< The value to be converted 5834 Type *Ty, ///< The type to convert to 5835 const Twine &NameStr, ///< A name for the new instruction 5836 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5837 ); 5838 5839 /// Clone an identical IntToPtrInst. 5840 IntToPtrInst *cloneImpl() const; 5841 5842 /// Returns the address space of this instruction's pointer type. 5843 unsigned getAddressSpace() const { 5844 return getType()->getPointerAddressSpace(); 5845 } 5846 5847 // Methods for support type inquiry through isa, cast, and dyn_cast: 5848 static bool classof(const Instruction *I) { 5849 return I->getOpcode() == IntToPtr; 5850 } 5851 static bool classof(const Value *V) { 5852 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5853 } 5854 }; 5855 5856 //===----------------------------------------------------------------------===// 5857 // PtrToIntInst Class 5858 //===----------------------------------------------------------------------===// 5859 5860 /// This class represents a cast from a pointer to an integer. 5861 class PtrToIntInst : public CastInst { 5862 protected: 5863 // Note: Instruction needs to be a friend here to call cloneImpl. 5864 friend class Instruction; 5865 5866 /// Clone an identical PtrToIntInst. 5867 PtrToIntInst *cloneImpl() const; 5868 5869 public: 5870 /// Constructor with insert-before-instruction semantics 5871 PtrToIntInst( 5872 Value *S, ///< The value to be converted 5873 Type *Ty, ///< The type to convert to 5874 const Twine &NameStr, ///< A name for the new instruction 5875 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5876 ); 5877 5878 /// Constructor with insert-before-instruction semantics 5879 PtrToIntInst( 5880 Value *S, ///< The value to be converted 5881 Type *Ty, ///< The type to convert to 5882 const Twine &NameStr = "", ///< A name for the new instruction 5883 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5884 ); 5885 5886 /// Constructor with insert-at-end-of-block semantics 5887 PtrToIntInst( 5888 Value *S, ///< The value to be converted 5889 Type *Ty, ///< The type to convert to 5890 const Twine &NameStr, ///< A name for the new instruction 5891 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5892 ); 5893 5894 /// Gets the pointer operand. 5895 Value *getPointerOperand() { return getOperand(0); } 5896 /// Gets the pointer operand. 5897 const Value *getPointerOperand() const { return getOperand(0); } 5898 /// Gets the operand index of the pointer operand. 5899 static unsigned getPointerOperandIndex() { return 0U; } 5900 5901 /// Returns the address space of the pointer operand. 5902 unsigned getPointerAddressSpace() const { 5903 return getPointerOperand()->getType()->getPointerAddressSpace(); 5904 } 5905 5906 // Methods for support type inquiry through isa, cast, and dyn_cast: 5907 static bool classof(const Instruction *I) { 5908 return I->getOpcode() == PtrToInt; 5909 } 5910 static bool classof(const Value *V) { 5911 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5912 } 5913 }; 5914 5915 //===----------------------------------------------------------------------===// 5916 // BitCastInst Class 5917 //===----------------------------------------------------------------------===// 5918 5919 /// This class represents a no-op cast from one type to another. 5920 class BitCastInst : public CastInst { 5921 protected: 5922 // Note: Instruction needs to be a friend here to call cloneImpl. 5923 friend class Instruction; 5924 5925 /// Clone an identical BitCastInst. 5926 BitCastInst *cloneImpl() const; 5927 5928 public: 5929 /// Constructor with insert-before-instruction semantics 5930 BitCastInst( 5931 Value *S, ///< The value to be casted 5932 Type *Ty, ///< The type to casted to 5933 const Twine &NameStr, ///< A name for the new instruction 5934 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5935 ); 5936 5937 /// Constructor with insert-before-instruction semantics 5938 BitCastInst( 5939 Value *S, ///< The value to be casted 5940 Type *Ty, ///< The type to casted to 5941 const Twine &NameStr = "", ///< A name for the new instruction 5942 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5943 ); 5944 5945 /// Constructor with insert-at-end-of-block semantics 5946 BitCastInst( 5947 Value *S, ///< The value to be casted 5948 Type *Ty, ///< The type to casted to 5949 const Twine &NameStr, ///< A name for the new instruction 5950 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5951 ); 5952 5953 // Methods for support type inquiry through isa, cast, and dyn_cast: 5954 static bool classof(const Instruction *I) { 5955 return I->getOpcode() == BitCast; 5956 } 5957 static bool classof(const Value *V) { 5958 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5959 } 5960 }; 5961 5962 //===----------------------------------------------------------------------===// 5963 // AddrSpaceCastInst Class 5964 //===----------------------------------------------------------------------===// 5965 5966 /// This class represents a conversion between pointers from one address space 5967 /// to another. 5968 class AddrSpaceCastInst : public CastInst { 5969 protected: 5970 // Note: Instruction needs to be a friend here to call cloneImpl. 5971 friend class Instruction; 5972 5973 /// Clone an identical AddrSpaceCastInst. 5974 AddrSpaceCastInst *cloneImpl() const; 5975 5976 public: 5977 /// Constructor with insert-before-instruction semantics 5978 AddrSpaceCastInst( 5979 Value *S, ///< The value to be casted 5980 Type *Ty, ///< The type to casted to 5981 const Twine &NameStr, ///< A name for the new instruction 5982 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5983 ); 5984 5985 /// Constructor with insert-before-instruction semantics 5986 AddrSpaceCastInst( 5987 Value *S, ///< The value to be casted 5988 Type *Ty, ///< The type to casted to 5989 const Twine &NameStr = "", ///< A name for the new instruction 5990 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5991 ); 5992 5993 /// Constructor with insert-at-end-of-block semantics 5994 AddrSpaceCastInst( 5995 Value *S, ///< The value to be casted 5996 Type *Ty, ///< The type to casted to 5997 const Twine &NameStr, ///< A name for the new instruction 5998 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5999 ); 6000 6001 // Methods for support type inquiry through isa, cast, and dyn_cast: 6002 static bool classof(const Instruction *I) { 6003 return I->getOpcode() == AddrSpaceCast; 6004 } 6005 static bool classof(const Value *V) { 6006 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 6007 } 6008 6009 /// Gets the pointer operand. 6010 Value *getPointerOperand() { 6011 return getOperand(0); 6012 } 6013 6014 /// Gets the pointer operand. 6015 const Value *getPointerOperand() const { 6016 return getOperand(0); 6017 } 6018 6019 /// Gets the operand index of the pointer operand. 6020 static unsigned getPointerOperandIndex() { 6021 return 0U; 6022 } 6023 6024 /// Returns the address space of the pointer operand. 6025 unsigned getSrcAddressSpace() const { 6026 return getPointerOperand()->getType()->getPointerAddressSpace(); 6027 } 6028 6029 /// Returns the address space of the result. 6030 unsigned getDestAddressSpace() const { 6031 return getType()->getPointerAddressSpace(); 6032 } 6033 }; 6034 6035 //===----------------------------------------------------------------------===// 6036 // Helper functions 6037 //===----------------------------------------------------------------------===// 6038 6039 /// A helper function that returns the pointer operand of a load or store 6040 /// instruction. Returns nullptr if not load or store. 6041 inline const Value *getLoadStorePointerOperand(const Value *V) { 6042 if (auto *Load = dyn_cast<LoadInst>(V)) 6043 return Load->getPointerOperand(); 6044 if (auto *Store = dyn_cast<StoreInst>(V)) 6045 return Store->getPointerOperand(); 6046 return nullptr; 6047 } 6048 inline Value *getLoadStorePointerOperand(Value *V) { 6049 return const_cast<Value *>( 6050 getLoadStorePointerOperand(static_cast<const Value *>(V))); 6051 } 6052 6053 /// A helper function that returns the pointer operand of a load, store 6054 /// or GEP instruction. Returns nullptr if not load, store, or GEP. 6055 inline const Value *getPointerOperand(const Value *V) { 6056 if (auto *Ptr = getLoadStorePointerOperand(V)) 6057 return Ptr; 6058 if (auto *Gep = dyn_cast<GetElementPtrInst>(V)) 6059 return Gep->getPointerOperand(); 6060 return nullptr; 6061 } 6062 inline Value *getPointerOperand(Value *V) { 6063 return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V))); 6064 } 6065 6066 /// A helper function that returns the alignment of load or store instruction. 6067 inline Align getLoadStoreAlignment(Value *I) { 6068 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 6069 "Expected Load or Store instruction"); 6070 if (auto *LI = dyn_cast<LoadInst>(I)) 6071 return LI->getAlign(); 6072 return cast<StoreInst>(I)->getAlign(); 6073 } 6074 6075 /// A helper function that returns the address space of the pointer operand of 6076 /// load or store instruction. 6077 inline unsigned getLoadStoreAddressSpace(Value *I) { 6078 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 6079 "Expected Load or Store instruction"); 6080 if (auto *LI = dyn_cast<LoadInst>(I)) 6081 return LI->getPointerAddressSpace(); 6082 return cast<StoreInst>(I)->getPointerAddressSpace(); 6083 } 6084 6085 /// A helper function that returns the type of a load or store instruction. 6086 inline Type *getLoadStoreType(Value *I) { 6087 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 6088 "Expected Load or Store instruction"); 6089 if (auto *LI = dyn_cast<LoadInst>(I)) 6090 return LI->getType(); 6091 return cast<StoreInst>(I)->getValueOperand()->getType(); 6092 } 6093 6094 /// A helper function that returns an atomic operation's sync scope; returns 6095 /// std::nullopt if it is not an atomic operation. 6096 inline std::optional<SyncScope::ID> getAtomicSyncScopeID(const Instruction *I) { 6097 if (!I->isAtomic()) 6098 return std::nullopt; 6099 if (auto *AI = dyn_cast<LoadInst>(I)) 6100 return AI->getSyncScopeID(); 6101 if (auto *AI = dyn_cast<StoreInst>(I)) 6102 return AI->getSyncScopeID(); 6103 if (auto *AI = dyn_cast<FenceInst>(I)) 6104 return AI->getSyncScopeID(); 6105 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) 6106 return AI->getSyncScopeID(); 6107 if (auto *AI = dyn_cast<AtomicRMWInst>(I)) 6108 return AI->getSyncScopeID(); 6109 llvm_unreachable("unhandled atomic operation"); 6110 } 6111 6112 //===----------------------------------------------------------------------===// 6113 // FreezeInst Class 6114 //===----------------------------------------------------------------------===// 6115 6116 /// This class represents a freeze function that returns random concrete 6117 /// value if an operand is either a poison value or an undef value 6118 class FreezeInst : public UnaryInstruction { 6119 protected: 6120 // Note: Instruction needs to be a friend here to call cloneImpl. 6121 friend class Instruction; 6122 6123 /// Clone an identical FreezeInst 6124 FreezeInst *cloneImpl() const; 6125 6126 public: 6127 explicit FreezeInst(Value *S, const Twine &NameStr, 6128 BasicBlock::iterator InsertBefore); 6129 explicit FreezeInst(Value *S, 6130 const Twine &NameStr = "", 6131 Instruction *InsertBefore = nullptr); 6132 FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd); 6133 6134 // Methods for support type inquiry through isa, cast, and dyn_cast: 6135 static inline bool classof(const Instruction *I) { 6136 return I->getOpcode() == Freeze; 6137 } 6138 static inline bool classof(const Value *V) { 6139 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 6140 } 6141 }; 6142 6143 } // end namespace llvm 6144 6145 #endif // LLVM_IR_INSTRUCTIONS_H 6146