1 //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file exposes the class definitions of all of the subclasses of the 10 // Instruction class. This is meant to be an easy way to get access to all 11 // instruction subclasses. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_IR_INSTRUCTIONS_H 16 #define LLVM_IR_INSTRUCTIONS_H 17 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/Bitfields.h" 20 #include "llvm/ADT/MapVector.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/Twine.h" 24 #include "llvm/ADT/iterator.h" 25 #include "llvm/ADT/iterator_range.h" 26 #include "llvm/IR/CFG.h" 27 #include "llvm/IR/Constant.h" 28 #include "llvm/IR/DerivedTypes.h" 29 #include "llvm/IR/InstrTypes.h" 30 #include "llvm/IR/Instruction.h" 31 #include "llvm/IR/OperandTraits.h" 32 #include "llvm/IR/Use.h" 33 #include "llvm/IR/User.h" 34 #include "llvm/Support/AtomicOrdering.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include <cassert> 37 #include <cstddef> 38 #include <cstdint> 39 #include <iterator> 40 #include <optional> 41 42 namespace llvm { 43 44 class APFloat; 45 class APInt; 46 class BasicBlock; 47 class ConstantInt; 48 class DataLayout; 49 class StringRef; 50 class Type; 51 class Value; 52 class UnreachableInst; 53 54 //===----------------------------------------------------------------------===// 55 // AllocaInst Class 56 //===----------------------------------------------------------------------===// 57 58 /// an instruction to allocate memory on the stack 59 class AllocaInst : public UnaryInstruction { 60 Type *AllocatedType; 61 62 using AlignmentField = AlignmentBitfieldElementT<0>; 63 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>; 64 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>; 65 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField, 66 SwiftErrorField>(), 67 "Bitfields must be contiguous"); 68 69 protected: 70 // Note: Instruction needs to be a friend here to call cloneImpl. 71 friend class Instruction; 72 73 AllocaInst *cloneImpl() const; 74 75 public: 76 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 77 const Twine &Name, BasicBlock::iterator InsertBefore); 78 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 79 const Twine &Name, Instruction *InsertBefore); 80 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 81 const Twine &Name, BasicBlock *InsertAtEnd); 82 83 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 84 BasicBlock::iterator InsertBefore); 85 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 86 Instruction *InsertBefore); 87 AllocaInst(Type *Ty, unsigned AddrSpace, 88 const Twine &Name, BasicBlock *InsertAtEnd); 89 90 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, 91 const Twine &Name, BasicBlock::iterator); 92 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, 93 const Twine &Name = "", Instruction *InsertBefore = nullptr); 94 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, 95 const Twine &Name, BasicBlock *InsertAtEnd); 96 97 /// Return true if there is an allocation size parameter to the allocation 98 /// instruction that is not 1. 99 bool isArrayAllocation() const; 100 101 /// Get the number of elements allocated. For a simple allocation of a single 102 /// element, this will return a constant 1 value. getArraySize()103 const Value *getArraySize() const { return getOperand(0); } getArraySize()104 Value *getArraySize() { return getOperand(0); } 105 106 /// Overload to return most specific pointer type. getType()107 PointerType *getType() const { 108 return cast<PointerType>(Instruction::getType()); 109 } 110 111 /// Return the address space for the allocation. getAddressSpace()112 unsigned getAddressSpace() const { 113 return getType()->getAddressSpace(); 114 } 115 116 /// Get allocation size in bytes. Returns std::nullopt if size can't be 117 /// determined, e.g. in case of a VLA. 118 std::optional<TypeSize> getAllocationSize(const DataLayout &DL) const; 119 120 /// Get allocation size in bits. Returns std::nullopt if size can't be 121 /// determined, e.g. in case of a VLA. 122 std::optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const; 123 124 /// Return the type that is being allocated by the instruction. getAllocatedType()125 Type *getAllocatedType() const { return AllocatedType; } 126 /// for use only in special circumstances that need to generically 127 /// transform a whole instruction (eg: IR linking and vectorization). setAllocatedType(Type * Ty)128 void setAllocatedType(Type *Ty) { AllocatedType = Ty; } 129 130 /// Return the alignment of the memory that is being allocated by the 131 /// instruction. getAlign()132 Align getAlign() const { 133 return Align(1ULL << getSubclassData<AlignmentField>()); 134 } 135 setAlignment(Align Align)136 void setAlignment(Align Align) { 137 setSubclassData<AlignmentField>(Log2(Align)); 138 } 139 140 /// Return true if this alloca is in the entry block of the function and is a 141 /// constant size. If so, the code generator will fold it into the 142 /// prolog/epilog code, so it is basically free. 143 bool isStaticAlloca() const; 144 145 /// Return true if this alloca is used as an inalloca argument to a call. Such 146 /// allocas are never considered static even if they are in the entry block. isUsedWithInAlloca()147 bool isUsedWithInAlloca() const { 148 return getSubclassData<UsedWithInAllocaField>(); 149 } 150 151 /// Specify whether this alloca is used to represent the arguments to a call. setUsedWithInAlloca(bool V)152 void setUsedWithInAlloca(bool V) { 153 setSubclassData<UsedWithInAllocaField>(V); 154 } 155 156 /// Return true if this alloca is used as a swifterror argument to a call. isSwiftError()157 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); } 158 /// Specify whether this alloca is used to represent a swifterror. setSwiftError(bool V)159 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); } 160 161 // Methods for support type inquiry through isa, cast, and dyn_cast: classof(const Instruction * I)162 static bool classof(const Instruction *I) { 163 return (I->getOpcode() == Instruction::Alloca); 164 } classof(const Value * V)165 static bool classof(const Value *V) { 166 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 167 } 168 169 private: 170 // Shadow Instruction::setInstructionSubclassData with a private forwarding 171 // method so that subclasses cannot accidentally use it. 172 template <typename Bitfield> setSubclassData(typename Bitfield::Type Value)173 void setSubclassData(typename Bitfield::Type Value) { 174 Instruction::setSubclassData<Bitfield>(Value); 175 } 176 }; 177 178 //===----------------------------------------------------------------------===// 179 // LoadInst Class 180 //===----------------------------------------------------------------------===// 181 182 /// An instruction for reading from memory. This uses the SubclassData field in 183 /// Value to store whether or not the load is volatile. 184 class LoadInst : public UnaryInstruction { 185 using VolatileField = BoolBitfieldElementT<0>; 186 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; 187 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; 188 static_assert( 189 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), 190 "Bitfields must be contiguous"); 191 192 void AssertOK(); 193 194 protected: 195 // Note: Instruction needs to be a friend here to call cloneImpl. 196 friend class Instruction; 197 198 LoadInst *cloneImpl() const; 199 200 public: 201 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, 202 BasicBlock::iterator InsertBefore); 203 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, 204 Instruction *InsertBefore); 205 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd); 206 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 207 BasicBlock::iterator InsertBefore); 208 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 209 Instruction *InsertBefore); 210 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 211 BasicBlock *InsertAtEnd); 212 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 213 Align Align, BasicBlock::iterator InsertBefore); 214 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 215 Align Align, Instruction *InsertBefore = nullptr); 216 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 217 Align Align, BasicBlock *InsertAtEnd); 218 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 219 Align Align, AtomicOrdering Order, SyncScope::ID SSID, 220 BasicBlock::iterator InsertBefore); 221 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 222 Align Align, AtomicOrdering Order, 223 SyncScope::ID SSID = SyncScope::System, 224 Instruction *InsertBefore = nullptr); 225 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 226 Align Align, AtomicOrdering Order, SyncScope::ID SSID, 227 BasicBlock *InsertAtEnd); 228 229 /// Return true if this is a load from a volatile memory location. isVolatile()230 bool isVolatile() const { return getSubclassData<VolatileField>(); } 231 232 /// Specify whether this is a volatile load or not. setVolatile(bool V)233 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 234 235 /// Return the alignment of the access that is being performed. getAlign()236 Align getAlign() const { 237 return Align(1ULL << (getSubclassData<AlignmentField>())); 238 } 239 setAlignment(Align Align)240 void setAlignment(Align Align) { 241 setSubclassData<AlignmentField>(Log2(Align)); 242 } 243 244 /// Returns the ordering constraint of this load instruction. getOrdering()245 AtomicOrdering getOrdering() const { 246 return getSubclassData<OrderingField>(); 247 } 248 /// Sets the ordering constraint of this load instruction. May not be Release 249 /// or AcquireRelease. setOrdering(AtomicOrdering Ordering)250 void setOrdering(AtomicOrdering Ordering) { 251 setSubclassData<OrderingField>(Ordering); 252 } 253 254 /// Returns the synchronization scope ID of this load instruction. getSyncScopeID()255 SyncScope::ID getSyncScopeID() const { 256 return SSID; 257 } 258 259 /// Sets the synchronization scope ID of this load instruction. setSyncScopeID(SyncScope::ID SSID)260 void setSyncScopeID(SyncScope::ID SSID) { 261 this->SSID = SSID; 262 } 263 264 /// Sets the ordering constraint and the synchronization scope ID of this load 265 /// instruction. 266 void setAtomic(AtomicOrdering Ordering, 267 SyncScope::ID SSID = SyncScope::System) { 268 setOrdering(Ordering); 269 setSyncScopeID(SSID); 270 } 271 isSimple()272 bool isSimple() const { return !isAtomic() && !isVolatile(); } 273 isUnordered()274 bool isUnordered() const { 275 return (getOrdering() == AtomicOrdering::NotAtomic || 276 getOrdering() == AtomicOrdering::Unordered) && 277 !isVolatile(); 278 } 279 getPointerOperand()280 Value *getPointerOperand() { return getOperand(0); } getPointerOperand()281 const Value *getPointerOperand() const { return getOperand(0); } getPointerOperandIndex()282 static unsigned getPointerOperandIndex() { return 0U; } getPointerOperandType()283 Type *getPointerOperandType() const { return getPointerOperand()->getType(); } 284 285 /// Returns the address space of the pointer operand. getPointerAddressSpace()286 unsigned getPointerAddressSpace() const { 287 return getPointerOperandType()->getPointerAddressSpace(); 288 } 289 290 // Methods for support type inquiry through isa, cast, and dyn_cast: classof(const Instruction * I)291 static bool classof(const Instruction *I) { 292 return I->getOpcode() == Instruction::Load; 293 } classof(const Value * V)294 static bool classof(const Value *V) { 295 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 296 } 297 298 private: 299 // Shadow Instruction::setInstructionSubclassData with a private forwarding 300 // method so that subclasses cannot accidentally use it. 301 template <typename Bitfield> setSubclassData(typename Bitfield::Type Value)302 void setSubclassData(typename Bitfield::Type Value) { 303 Instruction::setSubclassData<Bitfield>(Value); 304 } 305 306 /// The synchronization scope ID of this load instruction. Not quite enough 307 /// room in SubClassData for everything, so synchronization scope ID gets its 308 /// own field. 309 SyncScope::ID SSID; 310 }; 311 312 //===----------------------------------------------------------------------===// 313 // StoreInst Class 314 //===----------------------------------------------------------------------===// 315 316 /// An instruction for storing to memory. 317 class StoreInst : public Instruction { 318 using VolatileField = BoolBitfieldElementT<0>; 319 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; 320 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; 321 static_assert( 322 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), 323 "Bitfields must be contiguous"); 324 325 void AssertOK(); 326 327 protected: 328 // Note: Instruction needs to be a friend here to call cloneImpl. 329 friend class Instruction; 330 331 StoreInst *cloneImpl() const; 332 333 public: 334 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore); 335 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd); 336 StoreInst(Value *Val, Value *Ptr, BasicBlock::iterator InsertBefore); 337 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore); 338 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd); 339 StoreInst(Value *Val, Value *Ptr, bool isVolatile, 340 BasicBlock::iterator InsertBefore); 341 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 342 Instruction *InsertBefore = nullptr); 343 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 344 BasicBlock *InsertAtEnd); 345 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 346 BasicBlock::iterator InsertBefore); 347 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 348 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System, 349 Instruction *InsertBefore = nullptr); 350 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 351 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd); 352 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 353 AtomicOrdering Order, SyncScope::ID SSID, 354 BasicBlock::iterator InsertBefore); 355 356 // allocate space for exactly two operands new(size_t S)357 void *operator new(size_t S) { return User::operator new(S, 2); } delete(void * Ptr)358 void operator delete(void *Ptr) { User::operator delete(Ptr); } 359 360 /// Return true if this is a store to a volatile memory location. isVolatile()361 bool isVolatile() const { return getSubclassData<VolatileField>(); } 362 363 /// Specify whether this is a volatile store or not. setVolatile(bool V)364 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 365 366 /// Transparently provide more efficient getOperand methods. 367 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 368 getAlign()369 Align getAlign() const { 370 return Align(1ULL << (getSubclassData<AlignmentField>())); 371 } 372 setAlignment(Align Align)373 void setAlignment(Align Align) { 374 setSubclassData<AlignmentField>(Log2(Align)); 375 } 376 377 /// Returns the ordering constraint of this store instruction. getOrdering()378 AtomicOrdering getOrdering() const { 379 return getSubclassData<OrderingField>(); 380 } 381 382 /// Sets the ordering constraint of this store instruction. May not be 383 /// Acquire or AcquireRelease. setOrdering(AtomicOrdering Ordering)384 void setOrdering(AtomicOrdering Ordering) { 385 setSubclassData<OrderingField>(Ordering); 386 } 387 388 /// Returns the synchronization scope ID of this store instruction. getSyncScopeID()389 SyncScope::ID getSyncScopeID() const { 390 return SSID; 391 } 392 393 /// Sets the synchronization scope ID of this store instruction. setSyncScopeID(SyncScope::ID SSID)394 void setSyncScopeID(SyncScope::ID SSID) { 395 this->SSID = SSID; 396 } 397 398 /// Sets the ordering constraint and the synchronization scope ID of this 399 /// store instruction. 400 void setAtomic(AtomicOrdering Ordering, 401 SyncScope::ID SSID = SyncScope::System) { 402 setOrdering(Ordering); 403 setSyncScopeID(SSID); 404 } 405 isSimple()406 bool isSimple() const { return !isAtomic() && !isVolatile(); } 407 isUnordered()408 bool isUnordered() const { 409 return (getOrdering() == AtomicOrdering::NotAtomic || 410 getOrdering() == AtomicOrdering::Unordered) && 411 !isVolatile(); 412 } 413 getValueOperand()414 Value *getValueOperand() { return getOperand(0); } getValueOperand()415 const Value *getValueOperand() const { return getOperand(0); } 416 getPointerOperand()417 Value *getPointerOperand() { return getOperand(1); } getPointerOperand()418 const Value *getPointerOperand() const { return getOperand(1); } getPointerOperandIndex()419 static unsigned getPointerOperandIndex() { return 1U; } getPointerOperandType()420 Type *getPointerOperandType() const { return getPointerOperand()->getType(); } 421 422 /// Returns the address space of the pointer operand. getPointerAddressSpace()423 unsigned getPointerAddressSpace() const { 424 return getPointerOperandType()->getPointerAddressSpace(); 425 } 426 427 // Methods for support type inquiry through isa, cast, and dyn_cast: classof(const Instruction * I)428 static bool classof(const Instruction *I) { 429 return I->getOpcode() == Instruction::Store; 430 } classof(const Value * V)431 static bool classof(const Value *V) { 432 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 433 } 434 435 private: 436 // Shadow Instruction::setInstructionSubclassData with a private forwarding 437 // method so that subclasses cannot accidentally use it. 438 template <typename Bitfield> setSubclassData(typename Bitfield::Type Value)439 void setSubclassData(typename Bitfield::Type Value) { 440 Instruction::setSubclassData<Bitfield>(Value); 441 } 442 443 /// The synchronization scope ID of this store instruction. Not quite enough 444 /// room in SubClassData for everything, so synchronization scope ID gets its 445 /// own field. 446 SyncScope::ID SSID; 447 }; 448 449 template <> 450 struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> { 451 }; 452 453 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value) 454 455 //===----------------------------------------------------------------------===// 456 // FenceInst Class 457 //===----------------------------------------------------------------------===// 458 459 /// An instruction for ordering other memory operations. 460 class FenceInst : public Instruction { 461 using OrderingField = AtomicOrderingBitfieldElementT<0>; 462 463 void Init(AtomicOrdering Ordering, SyncScope::ID SSID); 464 465 protected: 466 // Note: Instruction needs to be a friend here to call cloneImpl. 467 friend class Instruction; 468 469 FenceInst *cloneImpl() const; 470 471 public: 472 // Ordering may only be Acquire, Release, AcquireRelease, or 473 // SequentiallyConsistent. 474 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID, 475 BasicBlock::iterator InsertBefore); 476 FenceInst(LLVMContext &C, AtomicOrdering Ordering, 477 SyncScope::ID SSID = SyncScope::System, 478 Instruction *InsertBefore = nullptr); 479 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID, 480 BasicBlock *InsertAtEnd); 481 482 // allocate space for exactly zero operands 483 void *operator new(size_t S) { return User::operator new(S, 0); } 484 void operator delete(void *Ptr) { User::operator delete(Ptr); } 485 486 /// Returns the ordering constraint of this fence instruction. 487 AtomicOrdering getOrdering() const { 488 return getSubclassData<OrderingField>(); 489 } 490 491 /// Sets the ordering constraint of this fence instruction. May only be 492 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent. 493 void setOrdering(AtomicOrdering Ordering) { 494 setSubclassData<OrderingField>(Ordering); 495 } 496 497 /// Returns the synchronization scope ID of this fence instruction. 498 SyncScope::ID getSyncScopeID() const { 499 return SSID; 500 } 501 502 /// Sets the synchronization scope ID of this fence instruction. 503 void setSyncScopeID(SyncScope::ID SSID) { 504 this->SSID = SSID; 505 } 506 507 // Methods for support type inquiry through isa, cast, and dyn_cast: 508 static bool classof(const Instruction *I) { 509 return I->getOpcode() == Instruction::Fence; 510 } 511 static bool classof(const Value *V) { 512 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 513 } 514 515 private: 516 // Shadow Instruction::setInstructionSubclassData with a private forwarding 517 // method so that subclasses cannot accidentally use it. 518 template <typename Bitfield> 519 void setSubclassData(typename Bitfield::Type Value) { 520 Instruction::setSubclassData<Bitfield>(Value); 521 } 522 523 /// The synchronization scope ID of this fence instruction. Not quite enough 524 /// room in SubClassData for everything, so synchronization scope ID gets its 525 /// own field. 526 SyncScope::ID SSID; 527 }; 528 529 //===----------------------------------------------------------------------===// 530 // AtomicCmpXchgInst Class 531 //===----------------------------------------------------------------------===// 532 533 /// An instruction that atomically checks whether a 534 /// specified value is in a memory location, and, if it is, stores a new value 535 /// there. The value returned by this instruction is a pair containing the 536 /// original value as first element, and an i1 indicating success (true) or 537 /// failure (false) as second element. 538 /// 539 class AtomicCmpXchgInst : public Instruction { 540 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align, 541 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, 542 SyncScope::ID SSID); 543 544 template <unsigned Offset> 545 using AtomicOrderingBitfieldElement = 546 typename Bitfield::Element<AtomicOrdering, Offset, 3, 547 AtomicOrdering::LAST>; 548 549 protected: 550 // Note: Instruction needs to be a friend here to call cloneImpl. 551 friend class Instruction; 552 553 AtomicCmpXchgInst *cloneImpl() const; 554 555 public: 556 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, 557 AtomicOrdering SuccessOrdering, 558 AtomicOrdering FailureOrdering, SyncScope::ID SSID, 559 BasicBlock::iterator InsertBefore); 560 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, 561 AtomicOrdering SuccessOrdering, 562 AtomicOrdering FailureOrdering, SyncScope::ID SSID, 563 Instruction *InsertBefore = nullptr); 564 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, 565 AtomicOrdering SuccessOrdering, 566 AtomicOrdering FailureOrdering, SyncScope::ID SSID, 567 BasicBlock *InsertAtEnd); 568 569 // allocate space for exactly three operands 570 void *operator new(size_t S) { return User::operator new(S, 3); } 571 void operator delete(void *Ptr) { User::operator delete(Ptr); } 572 573 using VolatileField = BoolBitfieldElementT<0>; 574 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>; 575 using SuccessOrderingField = 576 AtomicOrderingBitfieldElementT<WeakField::NextBit>; 577 using FailureOrderingField = 578 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>; 579 using AlignmentField = 580 AlignmentBitfieldElementT<FailureOrderingField::NextBit>; 581 static_assert( 582 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField, 583 FailureOrderingField, AlignmentField>(), 584 "Bitfields must be contiguous"); 585 586 /// Return the alignment of the memory that is being allocated by the 587 /// instruction. 588 Align getAlign() const { 589 return Align(1ULL << getSubclassData<AlignmentField>()); 590 } 591 592 void setAlignment(Align Align) { 593 setSubclassData<AlignmentField>(Log2(Align)); 594 } 595 596 /// Return true if this is a cmpxchg from a volatile memory 597 /// location. 598 /// 599 bool isVolatile() const { return getSubclassData<VolatileField>(); } 600 601 /// Specify whether this is a volatile cmpxchg. 602 /// 603 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 604 605 /// Return true if this cmpxchg may spuriously fail. 606 bool isWeak() const { return getSubclassData<WeakField>(); } 607 608 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); } 609 610 /// Transparently provide more efficient getOperand methods. 611 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 612 613 static bool isValidSuccessOrdering(AtomicOrdering Ordering) { 614 return Ordering != AtomicOrdering::NotAtomic && 615 Ordering != AtomicOrdering::Unordered; 616 } 617 618 static bool isValidFailureOrdering(AtomicOrdering Ordering) { 619 return Ordering != AtomicOrdering::NotAtomic && 620 Ordering != AtomicOrdering::Unordered && 621 Ordering != AtomicOrdering::AcquireRelease && 622 Ordering != AtomicOrdering::Release; 623 } 624 625 /// Returns the success ordering constraint of this cmpxchg instruction. 626 AtomicOrdering getSuccessOrdering() const { 627 return getSubclassData<SuccessOrderingField>(); 628 } 629 630 /// Sets the success ordering constraint of this cmpxchg instruction. 631 void setSuccessOrdering(AtomicOrdering Ordering) { 632 assert(isValidSuccessOrdering(Ordering) && 633 "invalid CmpXchg success ordering"); 634 setSubclassData<SuccessOrderingField>(Ordering); 635 } 636 637 /// Returns the failure ordering constraint of this cmpxchg instruction. 638 AtomicOrdering getFailureOrdering() const { 639 return getSubclassData<FailureOrderingField>(); 640 } 641 642 /// Sets the failure ordering constraint of this cmpxchg instruction. 643 void setFailureOrdering(AtomicOrdering Ordering) { 644 assert(isValidFailureOrdering(Ordering) && 645 "invalid CmpXchg failure ordering"); 646 setSubclassData<FailureOrderingField>(Ordering); 647 } 648 649 /// Returns a single ordering which is at least as strong as both the 650 /// success and failure orderings for this cmpxchg. 651 AtomicOrdering getMergedOrdering() const { 652 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent) 653 return AtomicOrdering::SequentiallyConsistent; 654 if (getFailureOrdering() == AtomicOrdering::Acquire) { 655 if (getSuccessOrdering() == AtomicOrdering::Monotonic) 656 return AtomicOrdering::Acquire; 657 if (getSuccessOrdering() == AtomicOrdering::Release) 658 return AtomicOrdering::AcquireRelease; 659 } 660 return getSuccessOrdering(); 661 } 662 663 /// Returns the synchronization scope ID of this cmpxchg instruction. 664 SyncScope::ID getSyncScopeID() const { 665 return SSID; 666 } 667 668 /// Sets the synchronization scope ID of this cmpxchg instruction. 669 void setSyncScopeID(SyncScope::ID SSID) { 670 this->SSID = SSID; 671 } 672 673 Value *getPointerOperand() { return getOperand(0); } 674 const Value *getPointerOperand() const { return getOperand(0); } 675 static unsigned getPointerOperandIndex() { return 0U; } 676 677 Value *getCompareOperand() { return getOperand(1); } 678 const Value *getCompareOperand() const { return getOperand(1); } 679 680 Value *getNewValOperand() { return getOperand(2); } 681 const Value *getNewValOperand() const { return getOperand(2); } 682 683 /// Returns the address space of the pointer operand. 684 unsigned getPointerAddressSpace() const { 685 return getPointerOperand()->getType()->getPointerAddressSpace(); 686 } 687 688 /// Returns the strongest permitted ordering on failure, given the 689 /// desired ordering on success. 690 /// 691 /// If the comparison in a cmpxchg operation fails, there is no atomic store 692 /// so release semantics cannot be provided. So this function drops explicit 693 /// Release requests from the AtomicOrdering. A SequentiallyConsistent 694 /// operation would remain SequentiallyConsistent. 695 static AtomicOrdering 696 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) { 697 switch (SuccessOrdering) { 698 default: 699 llvm_unreachable("invalid cmpxchg success ordering"); 700 case AtomicOrdering::Release: 701 case AtomicOrdering::Monotonic: 702 return AtomicOrdering::Monotonic; 703 case AtomicOrdering::AcquireRelease: 704 case AtomicOrdering::Acquire: 705 return AtomicOrdering::Acquire; 706 case AtomicOrdering::SequentiallyConsistent: 707 return AtomicOrdering::SequentiallyConsistent; 708 } 709 } 710 711 // Methods for support type inquiry through isa, cast, and dyn_cast: 712 static bool classof(const Instruction *I) { 713 return I->getOpcode() == Instruction::AtomicCmpXchg; 714 } 715 static bool classof(const Value *V) { 716 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 717 } 718 719 private: 720 // Shadow Instruction::setInstructionSubclassData with a private forwarding 721 // method so that subclasses cannot accidentally use it. 722 template <typename Bitfield> 723 void setSubclassData(typename Bitfield::Type Value) { 724 Instruction::setSubclassData<Bitfield>(Value); 725 } 726 727 /// The synchronization scope ID of this cmpxchg instruction. Not quite 728 /// enough room in SubClassData for everything, so synchronization scope ID 729 /// gets its own field. 730 SyncScope::ID SSID; 731 }; 732 733 template <> 734 struct OperandTraits<AtomicCmpXchgInst> : 735 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> { 736 }; 737 738 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value) 739 740 //===----------------------------------------------------------------------===// 741 // AtomicRMWInst Class 742 //===----------------------------------------------------------------------===// 743 744 /// an instruction that atomically reads a memory location, 745 /// combines it with another value, and then stores the result back. Returns 746 /// the old value. 747 /// 748 class AtomicRMWInst : public Instruction { 749 protected: 750 // Note: Instruction needs to be a friend here to call cloneImpl. 751 friend class Instruction; 752 753 AtomicRMWInst *cloneImpl() const; 754 755 public: 756 /// This enumeration lists the possible modifications atomicrmw can make. In 757 /// the descriptions, 'p' is the pointer to the instruction's memory location, 758 /// 'old' is the initial value of *p, and 'v' is the other value passed to the 759 /// instruction. These instructions always return 'old'. 760 enum BinOp : unsigned { 761 /// *p = v 762 Xchg, 763 /// *p = old + v 764 Add, 765 /// *p = old - v 766 Sub, 767 /// *p = old & v 768 And, 769 /// *p = ~(old & v) 770 Nand, 771 /// *p = old | v 772 Or, 773 /// *p = old ^ v 774 Xor, 775 /// *p = old >signed v ? old : v 776 Max, 777 /// *p = old <signed v ? old : v 778 Min, 779 /// *p = old >unsigned v ? old : v 780 UMax, 781 /// *p = old <unsigned v ? old : v 782 UMin, 783 784 /// *p = old + v 785 FAdd, 786 787 /// *p = old - v 788 FSub, 789 790 /// *p = maxnum(old, v) 791 /// \p maxnum matches the behavior of \p llvm.maxnum.*. 792 FMax, 793 794 /// *p = minnum(old, v) 795 /// \p minnum matches the behavior of \p llvm.minnum.*. 796 FMin, 797 798 /// Increment one up to a maximum value. 799 /// *p = (old u>= v) ? 0 : (old + 1) 800 UIncWrap, 801 802 /// Decrement one until a minimum value or zero. 803 /// *p = ((old == 0) || (old u> v)) ? v : (old - 1) 804 UDecWrap, 805 806 FIRST_BINOP = Xchg, 807 LAST_BINOP = UDecWrap, 808 BAD_BINOP 809 }; 810 811 private: 812 template <unsigned Offset> 813 using AtomicOrderingBitfieldElement = 814 typename Bitfield::Element<AtomicOrdering, Offset, 3, 815 AtomicOrdering::LAST>; 816 817 template <unsigned Offset> 818 using BinOpBitfieldElement = 819 typename Bitfield::Element<BinOp, Offset, 5, BinOp::LAST_BINOP>; 820 821 public: 822 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, 823 AtomicOrdering Ordering, SyncScope::ID SSID, 824 BasicBlock::iterator InsertBefore); 825 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, 826 AtomicOrdering Ordering, SyncScope::ID SSID, 827 Instruction *InsertBefore = nullptr); 828 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, 829 AtomicOrdering Ordering, SyncScope::ID SSID, 830 BasicBlock *InsertAtEnd); 831 832 // allocate space for exactly two operands 833 void *operator new(size_t S) { return User::operator new(S, 2); } 834 void operator delete(void *Ptr) { User::operator delete(Ptr); } 835 836 using VolatileField = BoolBitfieldElementT<0>; 837 using AtomicOrderingField = 838 AtomicOrderingBitfieldElementT<VolatileField::NextBit>; 839 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>; 840 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>; 841 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField, 842 OperationField, AlignmentField>(), 843 "Bitfields must be contiguous"); 844 845 BinOp getOperation() const { return getSubclassData<OperationField>(); } 846 847 static StringRef getOperationName(BinOp Op); 848 849 static bool isFPOperation(BinOp Op) { 850 switch (Op) { 851 case AtomicRMWInst::FAdd: 852 case AtomicRMWInst::FSub: 853 case AtomicRMWInst::FMax: 854 case AtomicRMWInst::FMin: 855 return true; 856 default: 857 return false; 858 } 859 } 860 861 void setOperation(BinOp Operation) { 862 setSubclassData<OperationField>(Operation); 863 } 864 865 /// Return the alignment of the memory that is being allocated by the 866 /// instruction. 867 Align getAlign() const { 868 return Align(1ULL << getSubclassData<AlignmentField>()); 869 } 870 871 void setAlignment(Align Align) { 872 setSubclassData<AlignmentField>(Log2(Align)); 873 } 874 875 /// Return true if this is a RMW on a volatile memory location. 876 /// 877 bool isVolatile() const { return getSubclassData<VolatileField>(); } 878 879 /// Specify whether this is a volatile RMW or not. 880 /// 881 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 882 883 /// Transparently provide more efficient getOperand methods. 884 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 885 886 /// Returns the ordering constraint of this rmw instruction. 887 AtomicOrdering getOrdering() const { 888 return getSubclassData<AtomicOrderingField>(); 889 } 890 891 /// Sets the ordering constraint of this rmw instruction. 892 void setOrdering(AtomicOrdering Ordering) { 893 assert(Ordering != AtomicOrdering::NotAtomic && 894 "atomicrmw instructions can only be atomic."); 895 assert(Ordering != AtomicOrdering::Unordered && 896 "atomicrmw instructions cannot be unordered."); 897 setSubclassData<AtomicOrderingField>(Ordering); 898 } 899 900 /// Returns the synchronization scope ID of this rmw instruction. 901 SyncScope::ID getSyncScopeID() const { 902 return SSID; 903 } 904 905 /// Sets the synchronization scope ID of this rmw instruction. 906 void setSyncScopeID(SyncScope::ID SSID) { 907 this->SSID = SSID; 908 } 909 910 Value *getPointerOperand() { return getOperand(0); } 911 const Value *getPointerOperand() const { return getOperand(0); } 912 static unsigned getPointerOperandIndex() { return 0U; } 913 914 Value *getValOperand() { return getOperand(1); } 915 const Value *getValOperand() const { return getOperand(1); } 916 917 /// Returns the address space of the pointer operand. 918 unsigned getPointerAddressSpace() const { 919 return getPointerOperand()->getType()->getPointerAddressSpace(); 920 } 921 922 bool isFloatingPointOperation() const { 923 return isFPOperation(getOperation()); 924 } 925 926 // Methods for support type inquiry through isa, cast, and dyn_cast: 927 static bool classof(const Instruction *I) { 928 return I->getOpcode() == Instruction::AtomicRMW; 929 } 930 static bool classof(const Value *V) { 931 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 932 } 933 934 private: 935 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align, 936 AtomicOrdering Ordering, SyncScope::ID SSID); 937 938 // Shadow Instruction::setInstructionSubclassData with a private forwarding 939 // method so that subclasses cannot accidentally use it. 940 template <typename Bitfield> 941 void setSubclassData(typename Bitfield::Type Value) { 942 Instruction::setSubclassData<Bitfield>(Value); 943 } 944 945 /// The synchronization scope ID of this rmw instruction. Not quite enough 946 /// room in SubClassData for everything, so synchronization scope ID gets its 947 /// own field. 948 SyncScope::ID SSID; 949 }; 950 951 template <> 952 struct OperandTraits<AtomicRMWInst> 953 : public FixedNumOperandTraits<AtomicRMWInst,2> { 954 }; 955 956 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value) 957 958 //===----------------------------------------------------------------------===// 959 // GetElementPtrInst Class 960 //===----------------------------------------------------------------------===// 961 962 // checkGEPType - Simple wrapper function to give a better assertion failure 963 // message on bad indexes for a gep instruction. 964 // 965 inline Type *checkGEPType(Type *Ty) { 966 assert(Ty && "Invalid GetElementPtrInst indices for type!"); 967 return Ty; 968 } 969 970 /// an instruction for type-safe pointer arithmetic to 971 /// access elements of arrays and structs 972 /// 973 class GetElementPtrInst : public Instruction { 974 Type *SourceElementType; 975 Type *ResultElementType; 976 977 GetElementPtrInst(const GetElementPtrInst &GEPI); 978 979 /// Constructors - Create a getelementptr instruction with a base pointer an 980 /// list of indices. The first and second ctor can optionally insert before an 981 /// existing instruction, the third appends the new instruction to the 982 /// specified BasicBlock. 983 inline GetElementPtrInst(Type *PointeeType, Value *Ptr, 984 ArrayRef<Value *> IdxList, unsigned Values, 985 const Twine &NameStr, 986 BasicBlock::iterator InsertBefore); 987 inline GetElementPtrInst(Type *PointeeType, Value *Ptr, 988 ArrayRef<Value *> IdxList, unsigned Values, 989 const Twine &NameStr, Instruction *InsertBefore); 990 inline GetElementPtrInst(Type *PointeeType, Value *Ptr, 991 ArrayRef<Value *> IdxList, unsigned Values, 992 const Twine &NameStr, BasicBlock *InsertAtEnd); 993 994 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr); 995 996 protected: 997 // Note: Instruction needs to be a friend here to call cloneImpl. 998 friend class Instruction; 999 1000 GetElementPtrInst *cloneImpl() const; 1001 1002 public: 1003 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, 1004 ArrayRef<Value *> IdxList, 1005 const Twine &NameStr, 1006 BasicBlock::iterator InsertBefore) { 1007 unsigned Values = 1 + unsigned(IdxList.size()); 1008 assert(PointeeType && "Must specify element type"); 1009 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, 1010 NameStr, InsertBefore); 1011 } 1012 1013 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, 1014 ArrayRef<Value *> IdxList, 1015 const Twine &NameStr = "", 1016 Instruction *InsertBefore = nullptr) { 1017 unsigned Values = 1 + unsigned(IdxList.size()); 1018 assert(PointeeType && "Must specify element type"); 1019 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, 1020 NameStr, InsertBefore); 1021 } 1022 1023 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, 1024 ArrayRef<Value *> IdxList, 1025 const Twine &NameStr, 1026 BasicBlock *InsertAtEnd) { 1027 unsigned Values = 1 + unsigned(IdxList.size()); 1028 assert(PointeeType && "Must specify element type"); 1029 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, 1030 NameStr, InsertAtEnd); 1031 } 1032 1033 /// Create an "inbounds" getelementptr. See the documentation for the 1034 /// "inbounds" flag in LangRef.html for details. 1035 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr, 1036 ArrayRef<Value *> IdxList, 1037 const Twine &NameStr, 1038 BasicBlock::iterator InsertBefore) { 1039 GetElementPtrInst *GEP = 1040 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore); 1041 GEP->setIsInBounds(true); 1042 return GEP; 1043 } 1044 1045 static GetElementPtrInst * 1046 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, 1047 const Twine &NameStr = "", 1048 Instruction *InsertBefore = nullptr) { 1049 GetElementPtrInst *GEP = 1050 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore); 1051 GEP->setIsInBounds(true); 1052 return GEP; 1053 } 1054 1055 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr, 1056 ArrayRef<Value *> IdxList, 1057 const Twine &NameStr, 1058 BasicBlock *InsertAtEnd) { 1059 GetElementPtrInst *GEP = 1060 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd); 1061 GEP->setIsInBounds(true); 1062 return GEP; 1063 } 1064 1065 /// Transparently provide more efficient getOperand methods. 1066 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1067 1068 Type *getSourceElementType() const { return SourceElementType; } 1069 1070 void setSourceElementType(Type *Ty) { SourceElementType = Ty; } 1071 void setResultElementType(Type *Ty) { ResultElementType = Ty; } 1072 1073 Type *getResultElementType() const { 1074 return ResultElementType; 1075 } 1076 1077 /// Returns the address space of this instruction's pointer type. 1078 unsigned getAddressSpace() const { 1079 // Note that this is always the same as the pointer operand's address space 1080 // and that is cheaper to compute, so cheat here. 1081 return getPointerAddressSpace(); 1082 } 1083 1084 /// Returns the result type of a getelementptr with the given source 1085 /// element type and indexes. 1086 /// 1087 /// Null is returned if the indices are invalid for the specified 1088 /// source element type. 1089 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList); 1090 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList); 1091 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList); 1092 1093 /// Return the type of the element at the given index of an indexable 1094 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})". 1095 /// 1096 /// Returns null if the type can't be indexed, or the given index is not 1097 /// legal for the given type. 1098 static Type *getTypeAtIndex(Type *Ty, Value *Idx); 1099 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx); 1100 1101 inline op_iterator idx_begin() { return op_begin()+1; } 1102 inline const_op_iterator idx_begin() const { return op_begin()+1; } 1103 inline op_iterator idx_end() { return op_end(); } 1104 inline const_op_iterator idx_end() const { return op_end(); } 1105 1106 inline iterator_range<op_iterator> indices() { 1107 return make_range(idx_begin(), idx_end()); 1108 } 1109 1110 inline iterator_range<const_op_iterator> indices() const { 1111 return make_range(idx_begin(), idx_end()); 1112 } 1113 1114 Value *getPointerOperand() { 1115 return getOperand(0); 1116 } 1117 const Value *getPointerOperand() const { 1118 return getOperand(0); 1119 } 1120 static unsigned getPointerOperandIndex() { 1121 return 0U; // get index for modifying correct operand. 1122 } 1123 1124 /// Method to return the pointer operand as a 1125 /// PointerType. 1126 Type *getPointerOperandType() const { 1127 return getPointerOperand()->getType(); 1128 } 1129 1130 /// Returns the address space of the pointer operand. 1131 unsigned getPointerAddressSpace() const { 1132 return getPointerOperandType()->getPointerAddressSpace(); 1133 } 1134 1135 /// Returns the pointer type returned by the GEP 1136 /// instruction, which may be a vector of pointers. 1137 static Type *getGEPReturnType(Value *Ptr, ArrayRef<Value *> IdxList) { 1138 // Vector GEP 1139 Type *Ty = Ptr->getType(); 1140 if (Ty->isVectorTy()) 1141 return Ty; 1142 1143 for (Value *Index : IdxList) 1144 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) { 1145 ElementCount EltCount = IndexVTy->getElementCount(); 1146 return VectorType::get(Ty, EltCount); 1147 } 1148 // Scalar GEP 1149 return Ty; 1150 } 1151 1152 unsigned getNumIndices() const { // Note: always non-negative 1153 return getNumOperands() - 1; 1154 } 1155 1156 bool hasIndices() const { 1157 return getNumOperands() > 1; 1158 } 1159 1160 /// Return true if all of the indices of this GEP are 1161 /// zeros. If so, the result pointer and the first operand have the same 1162 /// value, just potentially different types. 1163 bool hasAllZeroIndices() const; 1164 1165 /// Return true if all of the indices of this GEP are 1166 /// constant integers. If so, the result pointer and the first operand have 1167 /// a constant offset between them. 1168 bool hasAllConstantIndices() const; 1169 1170 /// Set or clear the inbounds flag on this GEP instruction. 1171 /// See LangRef.html for the meaning of inbounds on a getelementptr. 1172 void setIsInBounds(bool b = true); 1173 1174 /// Determine whether the GEP has the inbounds flag. 1175 bool isInBounds() const; 1176 1177 /// Accumulate the constant address offset of this GEP if possible. 1178 /// 1179 /// This routine accepts an APInt into which it will accumulate the constant 1180 /// offset of this GEP if the GEP is in fact constant. If the GEP is not 1181 /// all-constant, it returns false and the value of the offset APInt is 1182 /// undefined (it is *not* preserved!). The APInt passed into this routine 1183 /// must be at least as wide as the IntPtr type for the address space of 1184 /// the base GEP pointer. 1185 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const; 1186 bool collectOffset(const DataLayout &DL, unsigned BitWidth, 1187 MapVector<Value *, APInt> &VariableOffsets, 1188 APInt &ConstantOffset) const; 1189 // Methods for support type inquiry through isa, cast, and dyn_cast: 1190 static bool classof(const Instruction *I) { 1191 return (I->getOpcode() == Instruction::GetElementPtr); 1192 } 1193 static bool classof(const Value *V) { 1194 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1195 } 1196 }; 1197 1198 template <> 1199 struct OperandTraits<GetElementPtrInst> : 1200 public VariadicOperandTraits<GetElementPtrInst, 1> { 1201 }; 1202 1203 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, 1204 ArrayRef<Value *> IdxList, unsigned Values, 1205 const Twine &NameStr, 1206 BasicBlock::iterator InsertBefore) 1207 : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr, 1208 OperandTraits<GetElementPtrInst>::op_end(this) - Values, 1209 Values, InsertBefore), 1210 SourceElementType(PointeeType), 1211 ResultElementType(getIndexedType(PointeeType, IdxList)) { 1212 init(Ptr, IdxList, NameStr); 1213 } 1214 1215 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, 1216 ArrayRef<Value *> IdxList, unsigned Values, 1217 const Twine &NameStr, 1218 Instruction *InsertBefore) 1219 : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr, 1220 OperandTraits<GetElementPtrInst>::op_end(this) - Values, 1221 Values, InsertBefore), 1222 SourceElementType(PointeeType), 1223 ResultElementType(getIndexedType(PointeeType, IdxList)) { 1224 init(Ptr, IdxList, NameStr); 1225 } 1226 1227 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, 1228 ArrayRef<Value *> IdxList, unsigned Values, 1229 const Twine &NameStr, 1230 BasicBlock *InsertAtEnd) 1231 : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr, 1232 OperandTraits<GetElementPtrInst>::op_end(this) - Values, 1233 Values, InsertAtEnd), 1234 SourceElementType(PointeeType), 1235 ResultElementType(getIndexedType(PointeeType, IdxList)) { 1236 init(Ptr, IdxList, NameStr); 1237 } 1238 1239 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value) 1240 1241 //===----------------------------------------------------------------------===// 1242 // ICmpInst Class 1243 //===----------------------------------------------------------------------===// 1244 1245 /// This instruction compares its operands according to the predicate given 1246 /// to the constructor. It only operates on integers or pointers. The operands 1247 /// must be identical types. 1248 /// Represent an integer comparison operator. 1249 class ICmpInst: public CmpInst { 1250 void AssertOK() { 1251 assert(isIntPredicate() && 1252 "Invalid ICmp predicate value"); 1253 assert(getOperand(0)->getType() == getOperand(1)->getType() && 1254 "Both operands to ICmp instruction are not of the same type!"); 1255 // Check that the operands are the right type 1256 assert((getOperand(0)->getType()->isIntOrIntVectorTy() || 1257 getOperand(0)->getType()->isPtrOrPtrVectorTy()) && 1258 "Invalid operand types for ICmp instruction"); 1259 } 1260 1261 protected: 1262 // Note: Instruction needs to be a friend here to call cloneImpl. 1263 friend class Instruction; 1264 1265 /// Clone an identical ICmpInst 1266 ICmpInst *cloneImpl() const; 1267 1268 public: 1269 /// Constructor with insert-before-instruction semantics. 1270 ICmpInst( 1271 BasicBlock::iterator InsertBefore, ///< Where to insert 1272 Predicate pred, ///< The predicate to use for the comparison 1273 Value *LHS, ///< The left-hand-side of the expression 1274 Value *RHS, ///< The right-hand-side of the expression 1275 const Twine &NameStr = "" ///< Name of the instruction 1276 ) : CmpInst(makeCmpResultType(LHS->getType()), 1277 Instruction::ICmp, pred, LHS, RHS, NameStr, 1278 InsertBefore) { 1279 #ifndef NDEBUG 1280 AssertOK(); 1281 #endif 1282 } 1283 1284 /// Constructor with insert-before-instruction semantics. 1285 ICmpInst( 1286 Instruction *InsertBefore, ///< Where to insert 1287 Predicate pred, ///< The predicate to use for the comparison 1288 Value *LHS, ///< The left-hand-side of the expression 1289 Value *RHS, ///< The right-hand-side of the expression 1290 const Twine &NameStr = "" ///< Name of the instruction 1291 ) : CmpInst(makeCmpResultType(LHS->getType()), 1292 Instruction::ICmp, pred, LHS, RHS, NameStr, 1293 InsertBefore) { 1294 #ifndef NDEBUG 1295 AssertOK(); 1296 #endif 1297 } 1298 1299 /// Constructor with insert-at-end semantics. 1300 ICmpInst( 1301 BasicBlock *InsertAtEnd, ///< Block to insert into. 1302 Predicate pred, ///< The predicate to use for the comparison 1303 Value *LHS, ///< The left-hand-side of the expression 1304 Value *RHS, ///< The right-hand-side of the expression 1305 const Twine &NameStr = "" ///< Name of the instruction 1306 ) : CmpInst(makeCmpResultType(LHS->getType()), 1307 Instruction::ICmp, pred, LHS, RHS, NameStr, 1308 InsertAtEnd) { 1309 #ifndef NDEBUG 1310 AssertOK(); 1311 #endif 1312 } 1313 1314 /// Constructor with no-insertion semantics 1315 ICmpInst( 1316 Predicate pred, ///< The predicate to use for the comparison 1317 Value *LHS, ///< The left-hand-side of the expression 1318 Value *RHS, ///< The right-hand-side of the expression 1319 const Twine &NameStr = "" ///< Name of the instruction 1320 ) : CmpInst(makeCmpResultType(LHS->getType()), 1321 Instruction::ICmp, pred, LHS, RHS, NameStr) { 1322 #ifndef NDEBUG 1323 AssertOK(); 1324 #endif 1325 } 1326 1327 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc. 1328 /// @returns the predicate that would be the result if the operand were 1329 /// regarded as signed. 1330 /// Return the signed version of the predicate 1331 Predicate getSignedPredicate() const { 1332 return getSignedPredicate(getPredicate()); 1333 } 1334 1335 /// This is a static version that you can use without an instruction. 1336 /// Return the signed version of the predicate. 1337 static Predicate getSignedPredicate(Predicate pred); 1338 1339 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc. 1340 /// @returns the predicate that would be the result if the operand were 1341 /// regarded as unsigned. 1342 /// Return the unsigned version of the predicate 1343 Predicate getUnsignedPredicate() const { 1344 return getUnsignedPredicate(getPredicate()); 1345 } 1346 1347 /// This is a static version that you can use without an instruction. 1348 /// Return the unsigned version of the predicate. 1349 static Predicate getUnsignedPredicate(Predicate pred); 1350 1351 /// Return true if this predicate is either EQ or NE. This also 1352 /// tests for commutativity. 1353 static bool isEquality(Predicate P) { 1354 return P == ICMP_EQ || P == ICMP_NE; 1355 } 1356 1357 /// Return true if this predicate is either EQ or NE. This also 1358 /// tests for commutativity. 1359 bool isEquality() const { 1360 return isEquality(getPredicate()); 1361 } 1362 1363 /// @returns true if the predicate of this ICmpInst is commutative 1364 /// Determine if this relation is commutative. 1365 bool isCommutative() const { return isEquality(); } 1366 1367 /// Return true if the predicate is relational (not EQ or NE). 1368 /// 1369 bool isRelational() const { 1370 return !isEquality(); 1371 } 1372 1373 /// Return true if the predicate is relational (not EQ or NE). 1374 /// 1375 static bool isRelational(Predicate P) { 1376 return !isEquality(P); 1377 } 1378 1379 /// Return true if the predicate is SGT or UGT. 1380 /// 1381 static bool isGT(Predicate P) { 1382 return P == ICMP_SGT || P == ICMP_UGT; 1383 } 1384 1385 /// Return true if the predicate is SLT or ULT. 1386 /// 1387 static bool isLT(Predicate P) { 1388 return P == ICMP_SLT || P == ICMP_ULT; 1389 } 1390 1391 /// Return true if the predicate is SGE or UGE. 1392 /// 1393 static bool isGE(Predicate P) { 1394 return P == ICMP_SGE || P == ICMP_UGE; 1395 } 1396 1397 /// Return true if the predicate is SLE or ULE. 1398 /// 1399 static bool isLE(Predicate P) { 1400 return P == ICMP_SLE || P == ICMP_ULE; 1401 } 1402 1403 /// Returns the sequence of all ICmp predicates. 1404 /// 1405 static auto predicates() { return ICmpPredicates(); } 1406 1407 /// Exchange the two operands to this instruction in such a way that it does 1408 /// not modify the semantics of the instruction. The predicate value may be 1409 /// changed to retain the same result if the predicate is order dependent 1410 /// (e.g. ult). 1411 /// Swap operands and adjust predicate. 1412 void swapOperands() { 1413 setPredicate(getSwappedPredicate()); 1414 Op<0>().swap(Op<1>()); 1415 } 1416 1417 /// Return result of `LHS Pred RHS` comparison. 1418 static bool compare(const APInt &LHS, const APInt &RHS, 1419 ICmpInst::Predicate Pred); 1420 1421 // Methods for support type inquiry through isa, cast, and dyn_cast: 1422 static bool classof(const Instruction *I) { 1423 return I->getOpcode() == Instruction::ICmp; 1424 } 1425 static bool classof(const Value *V) { 1426 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1427 } 1428 }; 1429 1430 //===----------------------------------------------------------------------===// 1431 // FCmpInst Class 1432 //===----------------------------------------------------------------------===// 1433 1434 /// This instruction compares its operands according to the predicate given 1435 /// to the constructor. It only operates on floating point values or packed 1436 /// vectors of floating point values. The operands must be identical types. 1437 /// Represents a floating point comparison operator. 1438 class FCmpInst: public CmpInst { 1439 void AssertOK() { 1440 assert(isFPPredicate() && "Invalid FCmp predicate value"); 1441 assert(getOperand(0)->getType() == getOperand(1)->getType() && 1442 "Both operands to FCmp instruction are not of the same type!"); 1443 // Check that the operands are the right type 1444 assert(getOperand(0)->getType()->isFPOrFPVectorTy() && 1445 "Invalid operand types for FCmp instruction"); 1446 } 1447 1448 protected: 1449 // Note: Instruction needs to be a friend here to call cloneImpl. 1450 friend class Instruction; 1451 1452 /// Clone an identical FCmpInst 1453 FCmpInst *cloneImpl() const; 1454 1455 public: 1456 /// Constructor with insert-before-instruction semantics. 1457 FCmpInst( 1458 BasicBlock::iterator InsertBefore, ///< Where to insert 1459 Predicate pred, ///< The predicate to use for the comparison 1460 Value *LHS, ///< The left-hand-side of the expression 1461 Value *RHS, ///< The right-hand-side of the expression 1462 const Twine &NameStr = "" ///< Name of the instruction 1463 ) : CmpInst(makeCmpResultType(LHS->getType()), 1464 Instruction::FCmp, pred, LHS, RHS, NameStr, 1465 InsertBefore) { 1466 AssertOK(); 1467 } 1468 1469 /// Constructor with insert-before-instruction semantics. 1470 FCmpInst( 1471 Instruction *InsertBefore, ///< Where to insert 1472 Predicate pred, ///< The predicate to use for the comparison 1473 Value *LHS, ///< The left-hand-side of the expression 1474 Value *RHS, ///< The right-hand-side of the expression 1475 const Twine &NameStr = "" ///< Name of the instruction 1476 ) : CmpInst(makeCmpResultType(LHS->getType()), 1477 Instruction::FCmp, pred, LHS, RHS, NameStr, 1478 InsertBefore) { 1479 AssertOK(); 1480 } 1481 1482 /// Constructor with insert-at-end semantics. 1483 FCmpInst( 1484 BasicBlock *InsertAtEnd, ///< Block to insert into. 1485 Predicate pred, ///< The predicate to use for the comparison 1486 Value *LHS, ///< The left-hand-side of the expression 1487 Value *RHS, ///< The right-hand-side of the expression 1488 const Twine &NameStr = "" ///< Name of the instruction 1489 ) : CmpInst(makeCmpResultType(LHS->getType()), 1490 Instruction::FCmp, pred, LHS, RHS, NameStr, 1491 InsertAtEnd) { 1492 AssertOK(); 1493 } 1494 1495 /// Constructor with no-insertion semantics 1496 FCmpInst( 1497 Predicate Pred, ///< The predicate to use for the comparison 1498 Value *LHS, ///< The left-hand-side of the expression 1499 Value *RHS, ///< The right-hand-side of the expression 1500 const Twine &NameStr = "", ///< Name of the instruction 1501 Instruction *FlagsSource = nullptr 1502 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS, 1503 RHS, NameStr, nullptr, FlagsSource) { 1504 AssertOK(); 1505 } 1506 1507 /// @returns true if the predicate of this instruction is EQ or NE. 1508 /// Determine if this is an equality predicate. 1509 static bool isEquality(Predicate Pred) { 1510 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ || 1511 Pred == FCMP_UNE; 1512 } 1513 1514 /// @returns true if the predicate of this instruction is EQ or NE. 1515 /// Determine if this is an equality predicate. 1516 bool isEquality() const { return isEquality(getPredicate()); } 1517 1518 /// @returns true if the predicate of this instruction is commutative. 1519 /// Determine if this is a commutative predicate. 1520 bool isCommutative() const { 1521 return isEquality() || 1522 getPredicate() == FCMP_FALSE || 1523 getPredicate() == FCMP_TRUE || 1524 getPredicate() == FCMP_ORD || 1525 getPredicate() == FCMP_UNO; 1526 } 1527 1528 /// @returns true if the predicate is relational (not EQ or NE). 1529 /// Determine if this a relational predicate. 1530 bool isRelational() const { return !isEquality(); } 1531 1532 /// Exchange the two operands to this instruction in such a way that it does 1533 /// not modify the semantics of the instruction. The predicate value may be 1534 /// changed to retain the same result if the predicate is order dependent 1535 /// (e.g. ult). 1536 /// Swap operands and adjust predicate. 1537 void swapOperands() { 1538 setPredicate(getSwappedPredicate()); 1539 Op<0>().swap(Op<1>()); 1540 } 1541 1542 /// Returns the sequence of all FCmp predicates. 1543 /// 1544 static auto predicates() { return FCmpPredicates(); } 1545 1546 /// Return result of `LHS Pred RHS` comparison. 1547 static bool compare(const APFloat &LHS, const APFloat &RHS, 1548 FCmpInst::Predicate Pred); 1549 1550 /// Methods for support type inquiry through isa, cast, and dyn_cast: 1551 static bool classof(const Instruction *I) { 1552 return I->getOpcode() == Instruction::FCmp; 1553 } 1554 static bool classof(const Value *V) { 1555 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1556 } 1557 }; 1558 1559 //===----------------------------------------------------------------------===// 1560 /// This class represents a function call, abstracting a target 1561 /// machine's calling convention. This class uses low bit of the SubClassData 1562 /// field to indicate whether or not this is a tail call. The rest of the bits 1563 /// hold the calling convention of the call. 1564 /// 1565 class CallInst : public CallBase { 1566 CallInst(const CallInst &CI); 1567 1568 /// Construct a CallInst from a range of arguments 1569 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1570 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1571 BasicBlock::iterator InsertBefore); 1572 1573 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1574 const Twine &NameStr, BasicBlock::iterator InsertBefore) 1575 : CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore) {} 1576 1577 /// Construct a CallInst given a range of arguments. 1578 /// Construct a CallInst from a range of arguments 1579 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1580 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1581 Instruction *InsertBefore); 1582 1583 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1584 const Twine &NameStr, Instruction *InsertBefore) 1585 : CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore) {} 1586 1587 /// Construct a CallInst given a range of arguments. 1588 /// Construct a CallInst from a range of arguments 1589 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1590 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1591 BasicBlock *InsertAtEnd); 1592 1593 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr, 1594 BasicBlock::iterator InsertBefore); 1595 1596 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr, 1597 Instruction *InsertBefore); 1598 1599 CallInst(FunctionType *ty, Value *F, const Twine &NameStr, 1600 BasicBlock *InsertAtEnd); 1601 1602 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, 1603 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 1604 void init(FunctionType *FTy, Value *Func, const Twine &NameStr); 1605 1606 /// Compute the number of operands to allocate. 1607 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { 1608 // We need one operand for the called function, plus the input operand 1609 // counts provided. 1610 return 1 + NumArgs + NumBundleInputs; 1611 } 1612 1613 protected: 1614 // Note: Instruction needs to be a friend here to call cloneImpl. 1615 friend class Instruction; 1616 1617 CallInst *cloneImpl() const; 1618 1619 public: 1620 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr, 1621 BasicBlock::iterator InsertBefore) { 1622 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore); 1623 } 1624 1625 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "", 1626 Instruction *InsertBefore = nullptr) { 1627 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore); 1628 } 1629 1630 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1631 const Twine &NameStr, 1632 BasicBlock::iterator InsertBefore) { 1633 return new (ComputeNumOperands(Args.size())) 1634 CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore); 1635 } 1636 1637 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1638 const Twine &NameStr, 1639 Instruction *InsertBefore = nullptr) { 1640 return new (ComputeNumOperands(Args.size())) 1641 CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore); 1642 } 1643 1644 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1645 ArrayRef<OperandBundleDef> Bundles, 1646 const Twine &NameStr, 1647 BasicBlock::iterator InsertBefore) { 1648 const int NumOperands = 1649 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 1650 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 1651 1652 return new (NumOperands, DescriptorBytes) 1653 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore); 1654 } 1655 1656 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1657 ArrayRef<OperandBundleDef> Bundles = std::nullopt, 1658 const Twine &NameStr = "", 1659 Instruction *InsertBefore = nullptr) { 1660 const int NumOperands = 1661 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 1662 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 1663 1664 return new (NumOperands, DescriptorBytes) 1665 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore); 1666 } 1667 1668 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr, 1669 BasicBlock *InsertAtEnd) { 1670 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd); 1671 } 1672 1673 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1674 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1675 return new (ComputeNumOperands(Args.size())) 1676 CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertAtEnd); 1677 } 1678 1679 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1680 ArrayRef<OperandBundleDef> Bundles, 1681 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1682 const int NumOperands = 1683 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 1684 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 1685 1686 return new (NumOperands, DescriptorBytes) 1687 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd); 1688 } 1689 1690 static CallInst *Create(FunctionCallee Func, const Twine &NameStr, 1691 BasicBlock::iterator InsertBefore) { 1692 return Create(Func.getFunctionType(), Func.getCallee(), NameStr, 1693 InsertBefore); 1694 } 1695 1696 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "", 1697 Instruction *InsertBefore = nullptr) { 1698 return Create(Func.getFunctionType(), Func.getCallee(), NameStr, 1699 InsertBefore); 1700 } 1701 1702 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1703 ArrayRef<OperandBundleDef> Bundles, 1704 const Twine &NameStr, 1705 BasicBlock::iterator InsertBefore) { 1706 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, 1707 NameStr, InsertBefore); 1708 } 1709 1710 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1711 ArrayRef<OperandBundleDef> Bundles = std::nullopt, 1712 const Twine &NameStr = "", 1713 Instruction *InsertBefore = nullptr) { 1714 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, 1715 NameStr, InsertBefore); 1716 } 1717 1718 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1719 const Twine &NameStr, 1720 BasicBlock::iterator InsertBefore) { 1721 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, 1722 InsertBefore); 1723 } 1724 1725 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1726 const Twine &NameStr, 1727 Instruction *InsertBefore = nullptr) { 1728 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, 1729 InsertBefore); 1730 } 1731 1732 static CallInst *Create(FunctionCallee Func, const Twine &NameStr, 1733 BasicBlock *InsertAtEnd) { 1734 return Create(Func.getFunctionType(), Func.getCallee(), NameStr, 1735 InsertAtEnd); 1736 } 1737 1738 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1739 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1740 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, 1741 InsertAtEnd); 1742 } 1743 1744 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1745 ArrayRef<OperandBundleDef> Bundles, 1746 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1747 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, 1748 NameStr, InsertAtEnd); 1749 } 1750 1751 /// Create a clone of \p CI with a different set of operand bundles and 1752 /// insert it before \p InsertPt. 1753 /// 1754 /// The returned call instruction is identical \p CI in every way except that 1755 /// the operand bundles for the new instruction are set to the operand bundles 1756 /// in \p Bundles. 1757 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles, 1758 BasicBlock::iterator InsertPt); 1759 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles, 1760 Instruction *InsertPt = nullptr); 1761 1762 // Note that 'musttail' implies 'tail'. 1763 enum TailCallKind : unsigned { 1764 TCK_None = 0, 1765 TCK_Tail = 1, 1766 TCK_MustTail = 2, 1767 TCK_NoTail = 3, 1768 TCK_LAST = TCK_NoTail 1769 }; 1770 1771 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>; 1772 static_assert( 1773 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(), 1774 "Bitfields must be contiguous"); 1775 1776 TailCallKind getTailCallKind() const { 1777 return getSubclassData<TailCallKindField>(); 1778 } 1779 1780 bool isTailCall() const { 1781 TailCallKind Kind = getTailCallKind(); 1782 return Kind == TCK_Tail || Kind == TCK_MustTail; 1783 } 1784 1785 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; } 1786 1787 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; } 1788 1789 void setTailCallKind(TailCallKind TCK) { 1790 setSubclassData<TailCallKindField>(TCK); 1791 } 1792 1793 void setTailCall(bool IsTc = true) { 1794 setTailCallKind(IsTc ? TCK_Tail : TCK_None); 1795 } 1796 1797 /// Return true if the call can return twice 1798 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); } 1799 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); } 1800 1801 // Methods for support type inquiry through isa, cast, and dyn_cast: 1802 static bool classof(const Instruction *I) { 1803 return I->getOpcode() == Instruction::Call; 1804 } 1805 static bool classof(const Value *V) { 1806 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1807 } 1808 1809 /// Updates profile metadata by scaling it by \p S / \p T. 1810 void updateProfWeight(uint64_t S, uint64_t T); 1811 1812 private: 1813 // Shadow Instruction::setInstructionSubclassData with a private forwarding 1814 // method so that subclasses cannot accidentally use it. 1815 template <typename Bitfield> 1816 void setSubclassData(typename Bitfield::Type Value) { 1817 Instruction::setSubclassData<Bitfield>(Value); 1818 } 1819 }; 1820 1821 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1822 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1823 BasicBlock *InsertAtEnd) 1824 : CallBase(Ty->getReturnType(), Instruction::Call, 1825 OperandTraits<CallBase>::op_end(this) - 1826 (Args.size() + CountBundleInputs(Bundles) + 1), 1827 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), 1828 InsertAtEnd) { 1829 init(Ty, Func, Args, Bundles, NameStr); 1830 } 1831 1832 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1833 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1834 BasicBlock::iterator InsertBefore) 1835 : CallBase(Ty->getReturnType(), Instruction::Call, 1836 OperandTraits<CallBase>::op_end(this) - 1837 (Args.size() + CountBundleInputs(Bundles) + 1), 1838 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), 1839 InsertBefore) { 1840 init(Ty, Func, Args, Bundles, NameStr); 1841 } 1842 1843 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1844 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1845 Instruction *InsertBefore) 1846 : CallBase(Ty->getReturnType(), Instruction::Call, 1847 OperandTraits<CallBase>::op_end(this) - 1848 (Args.size() + CountBundleInputs(Bundles) + 1), 1849 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), 1850 InsertBefore) { 1851 init(Ty, Func, Args, Bundles, NameStr); 1852 } 1853 1854 //===----------------------------------------------------------------------===// 1855 // SelectInst Class 1856 //===----------------------------------------------------------------------===// 1857 1858 /// This class represents the LLVM 'select' instruction. 1859 /// 1860 class SelectInst : public Instruction { 1861 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, 1862 BasicBlock::iterator InsertBefore) 1863 : Instruction(S1->getType(), Instruction::Select, &Op<0>(), 3, 1864 InsertBefore) { 1865 init(C, S1, S2); 1866 setName(NameStr); 1867 } 1868 1869 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, 1870 Instruction *InsertBefore) 1871 : Instruction(S1->getType(), Instruction::Select, 1872 &Op<0>(), 3, InsertBefore) { 1873 init(C, S1, S2); 1874 setName(NameStr); 1875 } 1876 1877 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, 1878 BasicBlock *InsertAtEnd) 1879 : Instruction(S1->getType(), Instruction::Select, 1880 &Op<0>(), 3, InsertAtEnd) { 1881 init(C, S1, S2); 1882 setName(NameStr); 1883 } 1884 1885 void init(Value *C, Value *S1, Value *S2) { 1886 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select"); 1887 Op<0>() = C; 1888 Op<1>() = S1; 1889 Op<2>() = S2; 1890 } 1891 1892 protected: 1893 // Note: Instruction needs to be a friend here to call cloneImpl. 1894 friend class Instruction; 1895 1896 SelectInst *cloneImpl() const; 1897 1898 public: 1899 static SelectInst *Create(Value *C, Value *S1, Value *S2, 1900 const Twine &NameStr, 1901 BasicBlock::iterator InsertBefore, 1902 Instruction *MDFrom = nullptr) { 1903 SelectInst *Sel = new (3) SelectInst(C, S1, S2, NameStr, InsertBefore); 1904 if (MDFrom) 1905 Sel->copyMetadata(*MDFrom); 1906 return Sel; 1907 } 1908 1909 static SelectInst *Create(Value *C, Value *S1, Value *S2, 1910 const Twine &NameStr = "", 1911 Instruction *InsertBefore = nullptr, 1912 Instruction *MDFrom = nullptr) { 1913 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore); 1914 if (MDFrom) 1915 Sel->copyMetadata(*MDFrom); 1916 return Sel; 1917 } 1918 1919 static SelectInst *Create(Value *C, Value *S1, Value *S2, 1920 const Twine &NameStr, 1921 BasicBlock *InsertAtEnd) { 1922 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd); 1923 } 1924 1925 const Value *getCondition() const { return Op<0>(); } 1926 const Value *getTrueValue() const { return Op<1>(); } 1927 const Value *getFalseValue() const { return Op<2>(); } 1928 Value *getCondition() { return Op<0>(); } 1929 Value *getTrueValue() { return Op<1>(); } 1930 Value *getFalseValue() { return Op<2>(); } 1931 1932 void setCondition(Value *V) { Op<0>() = V; } 1933 void setTrueValue(Value *V) { Op<1>() = V; } 1934 void setFalseValue(Value *V) { Op<2>() = V; } 1935 1936 /// Swap the true and false values of the select instruction. 1937 /// This doesn't swap prof metadata. 1938 void swapValues() { Op<1>().swap(Op<2>()); } 1939 1940 /// Return a string if the specified operands are invalid 1941 /// for a select operation, otherwise return null. 1942 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False); 1943 1944 /// Transparently provide more efficient getOperand methods. 1945 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1946 1947 OtherOps getOpcode() const { 1948 return static_cast<OtherOps>(Instruction::getOpcode()); 1949 } 1950 1951 // Methods for support type inquiry through isa, cast, and dyn_cast: 1952 static bool classof(const Instruction *I) { 1953 return I->getOpcode() == Instruction::Select; 1954 } 1955 static bool classof(const Value *V) { 1956 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1957 } 1958 }; 1959 1960 template <> 1961 struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> { 1962 }; 1963 1964 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value) 1965 1966 //===----------------------------------------------------------------------===// 1967 // VAArgInst Class 1968 //===----------------------------------------------------------------------===// 1969 1970 /// This class represents the va_arg llvm instruction, which returns 1971 /// an argument of the specified type given a va_list and increments that list 1972 /// 1973 class VAArgInst : public UnaryInstruction { 1974 protected: 1975 // Note: Instruction needs to be a friend here to call cloneImpl. 1976 friend class Instruction; 1977 1978 VAArgInst *cloneImpl() const; 1979 1980 public: 1981 VAArgInst(Value *List, Type *Ty, const Twine &NameStr, 1982 BasicBlock::iterator InsertBefore) 1983 : UnaryInstruction(Ty, VAArg, List, InsertBefore) { 1984 setName(NameStr); 1985 } 1986 1987 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "", 1988 Instruction *InsertBefore = nullptr) 1989 : UnaryInstruction(Ty, VAArg, List, InsertBefore) { 1990 setName(NameStr); 1991 } 1992 1993 VAArgInst(Value *List, Type *Ty, const Twine &NameStr, 1994 BasicBlock *InsertAtEnd) 1995 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) { 1996 setName(NameStr); 1997 } 1998 1999 Value *getPointerOperand() { return getOperand(0); } 2000 const Value *getPointerOperand() const { return getOperand(0); } 2001 static unsigned getPointerOperandIndex() { return 0U; } 2002 2003 // Methods for support type inquiry through isa, cast, and dyn_cast: 2004 static bool classof(const Instruction *I) { 2005 return I->getOpcode() == VAArg; 2006 } 2007 static bool classof(const Value *V) { 2008 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2009 } 2010 }; 2011 2012 //===----------------------------------------------------------------------===// 2013 // ExtractElementInst Class 2014 //===----------------------------------------------------------------------===// 2015 2016 /// This instruction extracts a single (scalar) 2017 /// element from a VectorType value 2018 /// 2019 class ExtractElementInst : public Instruction { 2020 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr, 2021 BasicBlock::iterator InsertBefore); 2022 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "", 2023 Instruction *InsertBefore = nullptr); 2024 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr, 2025 BasicBlock *InsertAtEnd); 2026 2027 protected: 2028 // Note: Instruction needs to be a friend here to call cloneImpl. 2029 friend class Instruction; 2030 2031 ExtractElementInst *cloneImpl() const; 2032 2033 public: 2034 static ExtractElementInst *Create(Value *Vec, Value *Idx, 2035 const Twine &NameStr, 2036 BasicBlock::iterator InsertBefore) { 2037 return new (2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore); 2038 } 2039 2040 static ExtractElementInst *Create(Value *Vec, Value *Idx, 2041 const Twine &NameStr = "", 2042 Instruction *InsertBefore = nullptr) { 2043 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore); 2044 } 2045 2046 static ExtractElementInst *Create(Value *Vec, Value *Idx, 2047 const Twine &NameStr, 2048 BasicBlock *InsertAtEnd) { 2049 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd); 2050 } 2051 2052 /// Return true if an extractelement instruction can be 2053 /// formed with the specified operands. 2054 static bool isValidOperands(const Value *Vec, const Value *Idx); 2055 2056 Value *getVectorOperand() { return Op<0>(); } 2057 Value *getIndexOperand() { return Op<1>(); } 2058 const Value *getVectorOperand() const { return Op<0>(); } 2059 const Value *getIndexOperand() const { return Op<1>(); } 2060 2061 VectorType *getVectorOperandType() const { 2062 return cast<VectorType>(getVectorOperand()->getType()); 2063 } 2064 2065 /// Transparently provide more efficient getOperand methods. 2066 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2067 2068 // Methods for support type inquiry through isa, cast, and dyn_cast: 2069 static bool classof(const Instruction *I) { 2070 return I->getOpcode() == Instruction::ExtractElement; 2071 } 2072 static bool classof(const Value *V) { 2073 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2074 } 2075 }; 2076 2077 template <> 2078 struct OperandTraits<ExtractElementInst> : 2079 public FixedNumOperandTraits<ExtractElementInst, 2> { 2080 }; 2081 2082 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value) 2083 2084 //===----------------------------------------------------------------------===// 2085 // InsertElementInst Class 2086 //===----------------------------------------------------------------------===// 2087 2088 /// This instruction inserts a single (scalar) 2089 /// element into a VectorType value 2090 /// 2091 class InsertElementInst : public Instruction { 2092 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, 2093 BasicBlock::iterator InsertBefore); 2094 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, 2095 const Twine &NameStr = "", 2096 Instruction *InsertBefore = nullptr); 2097 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, 2098 BasicBlock *InsertAtEnd); 2099 2100 protected: 2101 // Note: Instruction needs to be a friend here to call cloneImpl. 2102 friend class Instruction; 2103 2104 InsertElementInst *cloneImpl() const; 2105 2106 public: 2107 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, 2108 const Twine &NameStr, 2109 BasicBlock::iterator InsertBefore) { 2110 return new (3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore); 2111 } 2112 2113 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, 2114 const Twine &NameStr = "", 2115 Instruction *InsertBefore = nullptr) { 2116 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore); 2117 } 2118 2119 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, 2120 const Twine &NameStr, 2121 BasicBlock *InsertAtEnd) { 2122 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd); 2123 } 2124 2125 /// Return true if an insertelement instruction can be 2126 /// formed with the specified operands. 2127 static bool isValidOperands(const Value *Vec, const Value *NewElt, 2128 const Value *Idx); 2129 2130 /// Overload to return most specific vector type. 2131 /// 2132 VectorType *getType() const { 2133 return cast<VectorType>(Instruction::getType()); 2134 } 2135 2136 /// Transparently provide more efficient getOperand methods. 2137 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2138 2139 // Methods for support type inquiry through isa, cast, and dyn_cast: 2140 static bool classof(const Instruction *I) { 2141 return I->getOpcode() == Instruction::InsertElement; 2142 } 2143 static bool classof(const Value *V) { 2144 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2145 } 2146 }; 2147 2148 template <> 2149 struct OperandTraits<InsertElementInst> : 2150 public FixedNumOperandTraits<InsertElementInst, 3> { 2151 }; 2152 2153 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value) 2154 2155 //===----------------------------------------------------------------------===// 2156 // ShuffleVectorInst Class 2157 //===----------------------------------------------------------------------===// 2158 2159 constexpr int PoisonMaskElem = -1; 2160 2161 /// This instruction constructs a fixed permutation of two 2162 /// input vectors. 2163 /// 2164 /// For each element of the result vector, the shuffle mask selects an element 2165 /// from one of the input vectors to copy to the result. Non-negative elements 2166 /// in the mask represent an index into the concatenated pair of input vectors. 2167 /// PoisonMaskElem (-1) specifies that the result element is poison. 2168 /// 2169 /// For scalable vectors, all the elements of the mask must be 0 or -1. This 2170 /// requirement may be relaxed in the future. 2171 class ShuffleVectorInst : public Instruction { 2172 SmallVector<int, 4> ShuffleMask; 2173 Constant *ShuffleMaskForBitcode; 2174 2175 protected: 2176 // Note: Instruction needs to be a friend here to call cloneImpl. 2177 friend class Instruction; 2178 2179 ShuffleVectorInst *cloneImpl() const; 2180 2181 public: 2182 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr, 2183 BasicBlock::iterator InsertBefore); 2184 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "", 2185 Instruction *InsertBefore = nullptr); 2186 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr, 2187 BasicBlock *InsertAtEnd); 2188 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr, 2189 BasicBlock::iterator InsertBefore); 2190 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "", 2191 Instruction *InsertBefore = nullptr); 2192 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr, 2193 BasicBlock *InsertAtEnd); 2194 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, const Twine &NameStr, 2195 BasicBlock::iterator InsertBefor); 2196 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 2197 const Twine &NameStr = "", 2198 Instruction *InsertBefor = nullptr); 2199 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 2200 const Twine &NameStr, BasicBlock *InsertAtEnd); 2201 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, 2202 const Twine &NameStr, BasicBlock::iterator InsertBefor); 2203 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, 2204 const Twine &NameStr = "", 2205 Instruction *InsertBefor = nullptr); 2206 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, 2207 const Twine &NameStr, BasicBlock *InsertAtEnd); 2208 2209 void *operator new(size_t S) { return User::operator new(S, 2); } 2210 void operator delete(void *Ptr) { return User::operator delete(Ptr); } 2211 2212 /// Swap the operands and adjust the mask to preserve the semantics 2213 /// of the instruction. 2214 void commute(); 2215 2216 /// Return true if a shufflevector instruction can be 2217 /// formed with the specified operands. 2218 static bool isValidOperands(const Value *V1, const Value *V2, 2219 const Value *Mask); 2220 static bool isValidOperands(const Value *V1, const Value *V2, 2221 ArrayRef<int> Mask); 2222 2223 /// Overload to return most specific vector type. 2224 /// 2225 VectorType *getType() const { 2226 return cast<VectorType>(Instruction::getType()); 2227 } 2228 2229 /// Transparently provide more efficient getOperand methods. 2230 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2231 2232 /// Return the shuffle mask value of this instruction for the given element 2233 /// index. Return PoisonMaskElem if the element is undef. 2234 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; } 2235 2236 /// Convert the input shuffle mask operand to a vector of integers. Undefined 2237 /// elements of the mask are returned as PoisonMaskElem. 2238 static void getShuffleMask(const Constant *Mask, 2239 SmallVectorImpl<int> &Result); 2240 2241 /// Return the mask for this instruction as a vector of integers. Undefined 2242 /// elements of the mask are returned as PoisonMaskElem. 2243 void getShuffleMask(SmallVectorImpl<int> &Result) const { 2244 Result.assign(ShuffleMask.begin(), ShuffleMask.end()); 2245 } 2246 2247 /// Return the mask for this instruction, for use in bitcode. 2248 /// 2249 /// TODO: This is temporary until we decide a new bitcode encoding for 2250 /// shufflevector. 2251 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; } 2252 2253 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask, 2254 Type *ResultTy); 2255 2256 void setShuffleMask(ArrayRef<int> Mask); 2257 2258 ArrayRef<int> getShuffleMask() const { return ShuffleMask; } 2259 2260 /// Return true if this shuffle returns a vector with a different number of 2261 /// elements than its source vectors. 2262 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3> 2263 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5> 2264 bool changesLength() const { 2265 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) 2266 ->getElementCount() 2267 .getKnownMinValue(); 2268 unsigned NumMaskElts = ShuffleMask.size(); 2269 return NumSourceElts != NumMaskElts; 2270 } 2271 2272 /// Return true if this shuffle returns a vector with a greater number of 2273 /// elements than its source vectors. 2274 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3> 2275 bool increasesLength() const { 2276 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) 2277 ->getElementCount() 2278 .getKnownMinValue(); 2279 unsigned NumMaskElts = ShuffleMask.size(); 2280 return NumSourceElts < NumMaskElts; 2281 } 2282 2283 /// Return true if this shuffle mask chooses elements from exactly one source 2284 /// vector. 2285 /// Example: <7,5,undef,7> 2286 /// This assumes that vector operands (of length \p NumSrcElts) are the same 2287 /// length as the mask. 2288 static bool isSingleSourceMask(ArrayRef<int> Mask, int NumSrcElts); 2289 static bool isSingleSourceMask(const Constant *Mask, int NumSrcElts) { 2290 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2291 SmallVector<int, 16> MaskAsInts; 2292 getShuffleMask(Mask, MaskAsInts); 2293 return isSingleSourceMask(MaskAsInts, NumSrcElts); 2294 } 2295 2296 /// Return true if this shuffle chooses elements from exactly one source 2297 /// vector without changing the length of that vector. 2298 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3> 2299 /// TODO: Optionally allow length-changing shuffles. 2300 bool isSingleSource() const { 2301 return !changesLength() && 2302 isSingleSourceMask(ShuffleMask, ShuffleMask.size()); 2303 } 2304 2305 /// Return true if this shuffle mask chooses elements from exactly one source 2306 /// vector without lane crossings. A shuffle using this mask is not 2307 /// necessarily a no-op because it may change the number of elements from its 2308 /// input vectors or it may provide demanded bits knowledge via undef lanes. 2309 /// Example: <undef,undef,2,3> 2310 static bool isIdentityMask(ArrayRef<int> Mask, int NumSrcElts); 2311 static bool isIdentityMask(const Constant *Mask, int NumSrcElts) { 2312 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2313 2314 // Not possible to express a shuffle mask for a scalable vector for this 2315 // case. 2316 if (isa<ScalableVectorType>(Mask->getType())) 2317 return false; 2318 2319 SmallVector<int, 16> MaskAsInts; 2320 getShuffleMask(Mask, MaskAsInts); 2321 return isIdentityMask(MaskAsInts, NumSrcElts); 2322 } 2323 2324 /// Return true if this shuffle chooses elements from exactly one source 2325 /// vector without lane crossings and does not change the number of elements 2326 /// from its input vectors. 2327 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef> 2328 bool isIdentity() const { 2329 // Not possible to express a shuffle mask for a scalable vector for this 2330 // case. 2331 if (isa<ScalableVectorType>(getType())) 2332 return false; 2333 2334 return !changesLength() && isIdentityMask(ShuffleMask, ShuffleMask.size()); 2335 } 2336 2337 /// Return true if this shuffle lengthens exactly one source vector with 2338 /// undefs in the high elements. 2339 bool isIdentityWithPadding() const; 2340 2341 /// Return true if this shuffle extracts the first N elements of exactly one 2342 /// source vector. 2343 bool isIdentityWithExtract() const; 2344 2345 /// Return true if this shuffle concatenates its 2 source vectors. This 2346 /// returns false if either input is undefined. In that case, the shuffle is 2347 /// is better classified as an identity with padding operation. 2348 bool isConcat() const; 2349 2350 /// Return true if this shuffle mask chooses elements from its source vectors 2351 /// without lane crossings. A shuffle using this mask would be 2352 /// equivalent to a vector select with a constant condition operand. 2353 /// Example: <4,1,6,undef> 2354 /// This returns false if the mask does not choose from both input vectors. 2355 /// In that case, the shuffle is better classified as an identity shuffle. 2356 /// This assumes that vector operands are the same length as the mask 2357 /// (a length-changing shuffle can never be equivalent to a vector select). 2358 static bool isSelectMask(ArrayRef<int> Mask, int NumSrcElts); 2359 static bool isSelectMask(const Constant *Mask, int NumSrcElts) { 2360 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2361 SmallVector<int, 16> MaskAsInts; 2362 getShuffleMask(Mask, MaskAsInts); 2363 return isSelectMask(MaskAsInts, NumSrcElts); 2364 } 2365 2366 /// Return true if this shuffle chooses elements from its source vectors 2367 /// without lane crossings and all operands have the same number of elements. 2368 /// In other words, this shuffle is equivalent to a vector select with a 2369 /// constant condition operand. 2370 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3> 2371 /// This returns false if the mask does not choose from both input vectors. 2372 /// In that case, the shuffle is better classified as an identity shuffle. 2373 /// TODO: Optionally allow length-changing shuffles. 2374 bool isSelect() const { 2375 return !changesLength() && isSelectMask(ShuffleMask, ShuffleMask.size()); 2376 } 2377 2378 /// Return true if this shuffle mask swaps the order of elements from exactly 2379 /// one source vector. 2380 /// Example: <7,6,undef,4> 2381 /// This assumes that vector operands (of length \p NumSrcElts) are the same 2382 /// length as the mask. 2383 static bool isReverseMask(ArrayRef<int> Mask, int NumSrcElts); 2384 static bool isReverseMask(const Constant *Mask, int NumSrcElts) { 2385 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2386 SmallVector<int, 16> MaskAsInts; 2387 getShuffleMask(Mask, MaskAsInts); 2388 return isReverseMask(MaskAsInts, NumSrcElts); 2389 } 2390 2391 /// Return true if this shuffle swaps the order of elements from exactly 2392 /// one source vector. 2393 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef> 2394 /// TODO: Optionally allow length-changing shuffles. 2395 bool isReverse() const { 2396 return !changesLength() && isReverseMask(ShuffleMask, ShuffleMask.size()); 2397 } 2398 2399 /// Return true if this shuffle mask chooses all elements with the same value 2400 /// as the first element of exactly one source vector. 2401 /// Example: <4,undef,undef,4> 2402 /// This assumes that vector operands (of length \p NumSrcElts) are the same 2403 /// length as the mask. 2404 static bool isZeroEltSplatMask(ArrayRef<int> Mask, int NumSrcElts); 2405 static bool isZeroEltSplatMask(const Constant *Mask, int NumSrcElts) { 2406 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2407 SmallVector<int, 16> MaskAsInts; 2408 getShuffleMask(Mask, MaskAsInts); 2409 return isZeroEltSplatMask(MaskAsInts, NumSrcElts); 2410 } 2411 2412 /// Return true if all elements of this shuffle are the same value as the 2413 /// first element of exactly one source vector without changing the length 2414 /// of that vector. 2415 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0> 2416 /// TODO: Optionally allow length-changing shuffles. 2417 /// TODO: Optionally allow splats from other elements. 2418 bool isZeroEltSplat() const { 2419 return !changesLength() && 2420 isZeroEltSplatMask(ShuffleMask, ShuffleMask.size()); 2421 } 2422 2423 /// Return true if this shuffle mask is a transpose mask. 2424 /// Transpose vector masks transpose a 2xn matrix. They read corresponding 2425 /// even- or odd-numbered vector elements from two n-dimensional source 2426 /// vectors and write each result into consecutive elements of an 2427 /// n-dimensional destination vector. Two shuffles are necessary to complete 2428 /// the transpose, one for the even elements and another for the odd elements. 2429 /// This description closely follows how the TRN1 and TRN2 AArch64 2430 /// instructions operate. 2431 /// 2432 /// For example, a simple 2x2 matrix can be transposed with: 2433 /// 2434 /// ; Original matrix 2435 /// m0 = < a, b > 2436 /// m1 = < c, d > 2437 /// 2438 /// ; Transposed matrix 2439 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 > 2440 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 > 2441 /// 2442 /// For matrices having greater than n columns, the resulting nx2 transposed 2443 /// matrix is stored in two result vectors such that one vector contains 2444 /// interleaved elements from all the even-numbered rows and the other vector 2445 /// contains interleaved elements from all the odd-numbered rows. For example, 2446 /// a 2x4 matrix can be transposed with: 2447 /// 2448 /// ; Original matrix 2449 /// m0 = < a, b, c, d > 2450 /// m1 = < e, f, g, h > 2451 /// 2452 /// ; Transposed matrix 2453 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 > 2454 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 > 2455 static bool isTransposeMask(ArrayRef<int> Mask, int NumSrcElts); 2456 static bool isTransposeMask(const Constant *Mask, int NumSrcElts) { 2457 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2458 SmallVector<int, 16> MaskAsInts; 2459 getShuffleMask(Mask, MaskAsInts); 2460 return isTransposeMask(MaskAsInts, NumSrcElts); 2461 } 2462 2463 /// Return true if this shuffle transposes the elements of its inputs without 2464 /// changing the length of the vectors. This operation may also be known as a 2465 /// merge or interleave. See the description for isTransposeMask() for the 2466 /// exact specification. 2467 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6> 2468 bool isTranspose() const { 2469 return !changesLength() && isTransposeMask(ShuffleMask, ShuffleMask.size()); 2470 } 2471 2472 /// Return true if this shuffle mask is a splice mask, concatenating the two 2473 /// inputs together and then extracts an original width vector starting from 2474 /// the splice index. 2475 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4> 2476 /// This assumes that vector operands (of length \p NumSrcElts) are the same 2477 /// length as the mask. 2478 static bool isSpliceMask(ArrayRef<int> Mask, int NumSrcElts, int &Index); 2479 static bool isSpliceMask(const Constant *Mask, int NumSrcElts, int &Index) { 2480 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2481 SmallVector<int, 16> MaskAsInts; 2482 getShuffleMask(Mask, MaskAsInts); 2483 return isSpliceMask(MaskAsInts, NumSrcElts, Index); 2484 } 2485 2486 /// Return true if this shuffle splices two inputs without changing the length 2487 /// of the vectors. This operation concatenates the two inputs together and 2488 /// then extracts an original width vector starting from the splice index. 2489 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4> 2490 bool isSplice(int &Index) const { 2491 return !changesLength() && 2492 isSpliceMask(ShuffleMask, ShuffleMask.size(), Index); 2493 } 2494 2495 /// Return true if this shuffle mask is an extract subvector mask. 2496 /// A valid extract subvector mask returns a smaller vector from a single 2497 /// source operand. The base extraction index is returned as well. 2498 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, 2499 int &Index); 2500 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts, 2501 int &Index) { 2502 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2503 // Not possible to express a shuffle mask for a scalable vector for this 2504 // case. 2505 if (isa<ScalableVectorType>(Mask->getType())) 2506 return false; 2507 SmallVector<int, 16> MaskAsInts; 2508 getShuffleMask(Mask, MaskAsInts); 2509 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index); 2510 } 2511 2512 /// Return true if this shuffle mask is an extract subvector mask. 2513 bool isExtractSubvectorMask(int &Index) const { 2514 // Not possible to express a shuffle mask for a scalable vector for this 2515 // case. 2516 if (isa<ScalableVectorType>(getType())) 2517 return false; 2518 2519 int NumSrcElts = 2520 cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); 2521 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index); 2522 } 2523 2524 /// Return true if this shuffle mask is an insert subvector mask. 2525 /// A valid insert subvector mask inserts the lowest elements of a second 2526 /// source operand into an in-place first source operand. 2527 /// Both the sub vector width and the insertion index is returned. 2528 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, 2529 int &NumSubElts, int &Index); 2530 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts, 2531 int &NumSubElts, int &Index) { 2532 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2533 // Not possible to express a shuffle mask for a scalable vector for this 2534 // case. 2535 if (isa<ScalableVectorType>(Mask->getType())) 2536 return false; 2537 SmallVector<int, 16> MaskAsInts; 2538 getShuffleMask(Mask, MaskAsInts); 2539 return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index); 2540 } 2541 2542 /// Return true if this shuffle mask is an insert subvector mask. 2543 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const { 2544 // Not possible to express a shuffle mask for a scalable vector for this 2545 // case. 2546 if (isa<ScalableVectorType>(getType())) 2547 return false; 2548 2549 int NumSrcElts = 2550 cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); 2551 return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index); 2552 } 2553 2554 /// Return true if this shuffle mask replicates each of the \p VF elements 2555 /// in a vector \p ReplicationFactor times. 2556 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is: 2557 /// <0,0,0,1,1,1,2,2,2,3,3,3> 2558 static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor, 2559 int &VF); 2560 static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor, 2561 int &VF) { 2562 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2563 // Not possible to express a shuffle mask for a scalable vector for this 2564 // case. 2565 if (isa<ScalableVectorType>(Mask->getType())) 2566 return false; 2567 SmallVector<int, 16> MaskAsInts; 2568 getShuffleMask(Mask, MaskAsInts); 2569 return isReplicationMask(MaskAsInts, ReplicationFactor, VF); 2570 } 2571 2572 /// Return true if this shuffle mask is a replication mask. 2573 bool isReplicationMask(int &ReplicationFactor, int &VF) const; 2574 2575 /// Return true if this shuffle mask represents "clustered" mask of size VF, 2576 /// i.e. each index between [0..VF) is used exactly once in each submask of 2577 /// size VF. 2578 /// For example, the mask for \p VF=4 is: 2579 /// 0, 1, 2, 3, 3, 2, 0, 1 - "clustered", because each submask of size 4 2580 /// (0,1,2,3 and 3,2,0,1) uses indices [0..VF) exactly one time. 2581 /// 0, 1, 2, 3, 3, 3, 1, 0 - not "clustered", because 2582 /// element 3 is used twice in the second submask 2583 /// (3,3,1,0) and index 2 is not used at all. 2584 static bool isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF); 2585 2586 /// Return true if this shuffle mask is a one-use-single-source("clustered") 2587 /// mask. 2588 bool isOneUseSingleSourceMask(int VF) const; 2589 2590 /// Change values in a shuffle permute mask assuming the two vector operands 2591 /// of length InVecNumElts have swapped position. 2592 static void commuteShuffleMask(MutableArrayRef<int> Mask, 2593 unsigned InVecNumElts) { 2594 for (int &Idx : Mask) { 2595 if (Idx == -1) 2596 continue; 2597 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts; 2598 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 && 2599 "shufflevector mask index out of range"); 2600 } 2601 } 2602 2603 /// Return if this shuffle interleaves its two input vectors together. 2604 bool isInterleave(unsigned Factor); 2605 2606 /// Return true if the mask interleaves one or more input vectors together. 2607 /// 2608 /// I.e. <0, LaneLen, ... , LaneLen*(Factor - 1), 1, LaneLen + 1, ...> 2609 /// E.g. For a Factor of 2 (LaneLen=4): 2610 /// <0, 4, 1, 5, 2, 6, 3, 7> 2611 /// E.g. For a Factor of 3 (LaneLen=4): 2612 /// <4, 0, 9, 5, 1, 10, 6, 2, 11, 7, 3, 12> 2613 /// E.g. For a Factor of 4 (LaneLen=2): 2614 /// <0, 2, 6, 4, 1, 3, 7, 5> 2615 /// 2616 /// NumInputElts is the total number of elements in the input vectors. 2617 /// 2618 /// StartIndexes are the first indexes of each vector being interleaved, 2619 /// substituting any indexes that were undef 2620 /// E.g. <4, -1, 2, 5, 1, 3> (Factor=3): StartIndexes=<4, 0, 2> 2621 /// 2622 /// Note that this does not check if the input vectors are consecutive: 2623 /// It will return true for masks such as 2624 /// <0, 4, 6, 1, 5, 7> (Factor=3, LaneLen=2) 2625 static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor, 2626 unsigned NumInputElts, 2627 SmallVectorImpl<unsigned> &StartIndexes); 2628 static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor, 2629 unsigned NumInputElts) { 2630 SmallVector<unsigned, 8> StartIndexes; 2631 return isInterleaveMask(Mask, Factor, NumInputElts, StartIndexes); 2632 } 2633 2634 /// Checks if the shuffle is a bit rotation of the first operand across 2635 /// multiple subelements, e.g: 2636 /// 2637 /// shuffle <8 x i8> %a, <8 x i8> poison, <8 x i32> <1, 0, 3, 2, 5, 4, 7, 6> 2638 /// 2639 /// could be expressed as 2640 /// 2641 /// rotl <4 x i16> %a, 8 2642 /// 2643 /// If it can be expressed as a rotation, returns the number of subelements to 2644 /// group by in NumSubElts and the number of bits to rotate left in RotateAmt. 2645 static bool isBitRotateMask(ArrayRef<int> Mask, unsigned EltSizeInBits, 2646 unsigned MinSubElts, unsigned MaxSubElts, 2647 unsigned &NumSubElts, unsigned &RotateAmt); 2648 2649 // Methods for support type inquiry through isa, cast, and dyn_cast: 2650 static bool classof(const Instruction *I) { 2651 return I->getOpcode() == Instruction::ShuffleVector; 2652 } 2653 static bool classof(const Value *V) { 2654 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2655 } 2656 }; 2657 2658 template <> 2659 struct OperandTraits<ShuffleVectorInst> 2660 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {}; 2661 2662 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value) 2663 2664 //===----------------------------------------------------------------------===// 2665 // ExtractValueInst Class 2666 //===----------------------------------------------------------------------===// 2667 2668 /// This instruction extracts a struct member or array 2669 /// element value from an aggregate value. 2670 /// 2671 class ExtractValueInst : public UnaryInstruction { 2672 SmallVector<unsigned, 4> Indices; 2673 2674 ExtractValueInst(const ExtractValueInst &EVI); 2675 2676 /// Constructors - Create a extractvalue instruction with a base aggregate 2677 /// value and a list of indices. The first and second ctor can optionally 2678 /// insert before an existing instruction, the third appends the new 2679 /// instruction to the specified BasicBlock. 2680 inline ExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs, 2681 const Twine &NameStr, 2682 BasicBlock::iterator InsertBefore); 2683 inline ExtractValueInst(Value *Agg, 2684 ArrayRef<unsigned> Idxs, 2685 const Twine &NameStr, 2686 Instruction *InsertBefore); 2687 inline ExtractValueInst(Value *Agg, 2688 ArrayRef<unsigned> Idxs, 2689 const Twine &NameStr, BasicBlock *InsertAtEnd); 2690 2691 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr); 2692 2693 protected: 2694 // Note: Instruction needs to be a friend here to call cloneImpl. 2695 friend class Instruction; 2696 2697 ExtractValueInst *cloneImpl() const; 2698 2699 public: 2700 static ExtractValueInst *Create(Value *Agg, ArrayRef<unsigned> Idxs, 2701 const Twine &NameStr, 2702 BasicBlock::iterator InsertBefore) { 2703 return new 2704 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore); 2705 } 2706 2707 static ExtractValueInst *Create(Value *Agg, 2708 ArrayRef<unsigned> Idxs, 2709 const Twine &NameStr = "", 2710 Instruction *InsertBefore = nullptr) { 2711 return new 2712 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore); 2713 } 2714 2715 static ExtractValueInst *Create(Value *Agg, 2716 ArrayRef<unsigned> Idxs, 2717 const Twine &NameStr, 2718 BasicBlock *InsertAtEnd) { 2719 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd); 2720 } 2721 2722 /// Returns the type of the element that would be extracted 2723 /// with an extractvalue instruction with the specified parameters. 2724 /// 2725 /// Null is returned if the indices are invalid for the specified type. 2726 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs); 2727 2728 using idx_iterator = const unsigned*; 2729 2730 inline idx_iterator idx_begin() const { return Indices.begin(); } 2731 inline idx_iterator idx_end() const { return Indices.end(); } 2732 inline iterator_range<idx_iterator> indices() const { 2733 return make_range(idx_begin(), idx_end()); 2734 } 2735 2736 Value *getAggregateOperand() { 2737 return getOperand(0); 2738 } 2739 const Value *getAggregateOperand() const { 2740 return getOperand(0); 2741 } 2742 static unsigned getAggregateOperandIndex() { 2743 return 0U; // get index for modifying correct operand 2744 } 2745 2746 ArrayRef<unsigned> getIndices() const { 2747 return Indices; 2748 } 2749 2750 unsigned getNumIndices() const { 2751 return (unsigned)Indices.size(); 2752 } 2753 2754 bool hasIndices() const { 2755 return true; 2756 } 2757 2758 // Methods for support type inquiry through isa, cast, and dyn_cast: 2759 static bool classof(const Instruction *I) { 2760 return I->getOpcode() == Instruction::ExtractValue; 2761 } 2762 static bool classof(const Value *V) { 2763 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2764 } 2765 }; 2766 2767 ExtractValueInst::ExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs, 2768 const Twine &NameStr, 2769 BasicBlock::iterator InsertBefore) 2770 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), 2771 ExtractValue, Agg, InsertBefore) { 2772 init(Idxs, NameStr); 2773 } 2774 2775 ExtractValueInst::ExtractValueInst(Value *Agg, 2776 ArrayRef<unsigned> Idxs, 2777 const Twine &NameStr, 2778 Instruction *InsertBefore) 2779 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), 2780 ExtractValue, Agg, InsertBefore) { 2781 init(Idxs, NameStr); 2782 } 2783 2784 ExtractValueInst::ExtractValueInst(Value *Agg, 2785 ArrayRef<unsigned> Idxs, 2786 const Twine &NameStr, 2787 BasicBlock *InsertAtEnd) 2788 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), 2789 ExtractValue, Agg, InsertAtEnd) { 2790 init(Idxs, NameStr); 2791 } 2792 2793 //===----------------------------------------------------------------------===// 2794 // InsertValueInst Class 2795 //===----------------------------------------------------------------------===// 2796 2797 /// This instruction inserts a struct field of array element 2798 /// value into an aggregate value. 2799 /// 2800 class InsertValueInst : public Instruction { 2801 SmallVector<unsigned, 4> Indices; 2802 2803 InsertValueInst(const InsertValueInst &IVI); 2804 2805 /// Constructors - Create a insertvalue instruction with a base aggregate 2806 /// value, a value to insert, and a list of indices. The first and second ctor 2807 /// can optionally insert before an existing instruction, the third appends 2808 /// the new instruction to the specified BasicBlock. 2809 inline InsertValueInst(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, 2810 const Twine &NameStr, 2811 BasicBlock::iterator InsertBefore); 2812 inline InsertValueInst(Value *Agg, Value *Val, 2813 ArrayRef<unsigned> Idxs, 2814 const Twine &NameStr, 2815 Instruction *InsertBefore); 2816 inline InsertValueInst(Value *Agg, Value *Val, 2817 ArrayRef<unsigned> Idxs, 2818 const Twine &NameStr, BasicBlock *InsertAtEnd); 2819 2820 /// Constructors - These three constructors are convenience methods because 2821 /// one and two index insertvalue instructions are so common. 2822 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr, 2823 BasicBlock::iterator InsertBefore); 2824 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, 2825 const Twine &NameStr = "", 2826 Instruction *InsertBefore = nullptr); 2827 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr, 2828 BasicBlock *InsertAtEnd); 2829 2830 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, 2831 const Twine &NameStr); 2832 2833 protected: 2834 // Note: Instruction needs to be a friend here to call cloneImpl. 2835 friend class Instruction; 2836 2837 InsertValueInst *cloneImpl() const; 2838 2839 public: 2840 // allocate space for exactly two operands 2841 void *operator new(size_t S) { return User::operator new(S, 2); } 2842 void operator delete(void *Ptr) { User::operator delete(Ptr); } 2843 2844 static InsertValueInst *Create(Value *Agg, Value *Val, 2845 ArrayRef<unsigned> Idxs, const Twine &NameStr, 2846 BasicBlock::iterator InsertBefore) { 2847 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore); 2848 } 2849 2850 static InsertValueInst *Create(Value *Agg, Value *Val, 2851 ArrayRef<unsigned> Idxs, 2852 const Twine &NameStr = "", 2853 Instruction *InsertBefore = nullptr) { 2854 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore); 2855 } 2856 2857 static InsertValueInst *Create(Value *Agg, Value *Val, 2858 ArrayRef<unsigned> Idxs, 2859 const Twine &NameStr, 2860 BasicBlock *InsertAtEnd) { 2861 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd); 2862 } 2863 2864 /// Transparently provide more efficient getOperand methods. 2865 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2866 2867 using idx_iterator = const unsigned*; 2868 2869 inline idx_iterator idx_begin() const { return Indices.begin(); } 2870 inline idx_iterator idx_end() const { return Indices.end(); } 2871 inline iterator_range<idx_iterator> indices() const { 2872 return make_range(idx_begin(), idx_end()); 2873 } 2874 2875 Value *getAggregateOperand() { 2876 return getOperand(0); 2877 } 2878 const Value *getAggregateOperand() const { 2879 return getOperand(0); 2880 } 2881 static unsigned getAggregateOperandIndex() { 2882 return 0U; // get index for modifying correct operand 2883 } 2884 2885 Value *getInsertedValueOperand() { 2886 return getOperand(1); 2887 } 2888 const Value *getInsertedValueOperand() const { 2889 return getOperand(1); 2890 } 2891 static unsigned getInsertedValueOperandIndex() { 2892 return 1U; // get index for modifying correct operand 2893 } 2894 2895 ArrayRef<unsigned> getIndices() const { 2896 return Indices; 2897 } 2898 2899 unsigned getNumIndices() const { 2900 return (unsigned)Indices.size(); 2901 } 2902 2903 bool hasIndices() const { 2904 return true; 2905 } 2906 2907 // Methods for support type inquiry through isa, cast, and dyn_cast: 2908 static bool classof(const Instruction *I) { 2909 return I->getOpcode() == Instruction::InsertValue; 2910 } 2911 static bool classof(const Value *V) { 2912 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2913 } 2914 }; 2915 2916 template <> 2917 struct OperandTraits<InsertValueInst> : 2918 public FixedNumOperandTraits<InsertValueInst, 2> { 2919 }; 2920 2921 InsertValueInst::InsertValueInst(Value *Agg, 2922 Value *Val, 2923 ArrayRef<unsigned> Idxs, 2924 const Twine &NameStr, 2925 BasicBlock::iterator InsertBefore) 2926 : Instruction(Agg->getType(), InsertValue, OperandTraits<InsertValueInst>::op_begin(this), 2927 2, InsertBefore) { 2928 init(Agg, Val, Idxs, NameStr); 2929 } 2930 2931 InsertValueInst::InsertValueInst(Value *Agg, 2932 Value *Val, 2933 ArrayRef<unsigned> Idxs, 2934 const Twine &NameStr, 2935 Instruction *InsertBefore) 2936 : Instruction(Agg->getType(), InsertValue, 2937 OperandTraits<InsertValueInst>::op_begin(this), 2938 2, InsertBefore) { 2939 init(Agg, Val, Idxs, NameStr); 2940 } 2941 2942 InsertValueInst::InsertValueInst(Value *Agg, 2943 Value *Val, 2944 ArrayRef<unsigned> Idxs, 2945 const Twine &NameStr, 2946 BasicBlock *InsertAtEnd) 2947 : Instruction(Agg->getType(), InsertValue, 2948 OperandTraits<InsertValueInst>::op_begin(this), 2949 2, InsertAtEnd) { 2950 init(Agg, Val, Idxs, NameStr); 2951 } 2952 2953 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value) 2954 2955 //===----------------------------------------------------------------------===// 2956 // PHINode Class 2957 //===----------------------------------------------------------------------===// 2958 2959 // PHINode - The PHINode class is used to represent the magical mystical PHI 2960 // node, that can not exist in nature, but can be synthesized in a computer 2961 // scientist's overactive imagination. 2962 // 2963 class PHINode : public Instruction { 2964 /// The number of operands actually allocated. NumOperands is 2965 /// the number actually in use. 2966 unsigned ReservedSpace; 2967 2968 PHINode(const PHINode &PN); 2969 2970 explicit PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, 2971 BasicBlock::iterator InsertBefore) 2972 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore), 2973 ReservedSpace(NumReservedValues) { 2974 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!"); 2975 setName(NameStr); 2976 allocHungoffUses(ReservedSpace); 2977 } 2978 2979 explicit PHINode(Type *Ty, unsigned NumReservedValues, 2980 const Twine &NameStr = "", 2981 Instruction *InsertBefore = nullptr) 2982 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore), 2983 ReservedSpace(NumReservedValues) { 2984 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!"); 2985 setName(NameStr); 2986 allocHungoffUses(ReservedSpace); 2987 } 2988 2989 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, 2990 BasicBlock *InsertAtEnd) 2991 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd), 2992 ReservedSpace(NumReservedValues) { 2993 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!"); 2994 setName(NameStr); 2995 allocHungoffUses(ReservedSpace); 2996 } 2997 2998 protected: 2999 // Note: Instruction needs to be a friend here to call cloneImpl. 3000 friend class Instruction; 3001 3002 PHINode *cloneImpl() const; 3003 3004 // allocHungoffUses - this is more complicated than the generic 3005 // User::allocHungoffUses, because we have to allocate Uses for the incoming 3006 // values and pointers to the incoming blocks, all in one allocation. 3007 void allocHungoffUses(unsigned N) { 3008 User::allocHungoffUses(N, /* IsPhi */ true); 3009 } 3010 3011 public: 3012 /// Constructors - NumReservedValues is a hint for the number of incoming 3013 /// edges that this phi node will have (use 0 if you really have no idea). 3014 static PHINode *Create(Type *Ty, unsigned NumReservedValues, 3015 const Twine &NameStr, 3016 BasicBlock::iterator InsertBefore) { 3017 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore); 3018 } 3019 3020 static PHINode *Create(Type *Ty, unsigned NumReservedValues, 3021 const Twine &NameStr = "", 3022 Instruction *InsertBefore = nullptr) { 3023 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore); 3024 } 3025 3026 static PHINode *Create(Type *Ty, unsigned NumReservedValues, 3027 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3028 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd); 3029 } 3030 3031 /// Provide fast operand accessors 3032 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3033 3034 // Block iterator interface. This provides access to the list of incoming 3035 // basic blocks, which parallels the list of incoming values. 3036 // Please note that we are not providing non-const iterators for blocks to 3037 // force all updates go through an interface function. 3038 3039 using block_iterator = BasicBlock **; 3040 using const_block_iterator = BasicBlock * const *; 3041 3042 const_block_iterator block_begin() const { 3043 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace); 3044 } 3045 3046 const_block_iterator block_end() const { 3047 return block_begin() + getNumOperands(); 3048 } 3049 3050 iterator_range<const_block_iterator> blocks() const { 3051 return make_range(block_begin(), block_end()); 3052 } 3053 3054 op_range incoming_values() { return operands(); } 3055 3056 const_op_range incoming_values() const { return operands(); } 3057 3058 /// Return the number of incoming edges 3059 /// 3060 unsigned getNumIncomingValues() const { return getNumOperands(); } 3061 3062 /// Return incoming value number x 3063 /// 3064 Value *getIncomingValue(unsigned i) const { 3065 return getOperand(i); 3066 } 3067 void setIncomingValue(unsigned i, Value *V) { 3068 assert(V && "PHI node got a null value!"); 3069 assert(getType() == V->getType() && 3070 "All operands to PHI node must be the same type as the PHI node!"); 3071 setOperand(i, V); 3072 } 3073 3074 static unsigned getOperandNumForIncomingValue(unsigned i) { 3075 return i; 3076 } 3077 3078 static unsigned getIncomingValueNumForOperand(unsigned i) { 3079 return i; 3080 } 3081 3082 /// Return incoming basic block number @p i. 3083 /// 3084 BasicBlock *getIncomingBlock(unsigned i) const { 3085 return block_begin()[i]; 3086 } 3087 3088 /// Return incoming basic block corresponding 3089 /// to an operand of the PHI. 3090 /// 3091 BasicBlock *getIncomingBlock(const Use &U) const { 3092 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?"); 3093 return getIncomingBlock(unsigned(&U - op_begin())); 3094 } 3095 3096 /// Return incoming basic block corresponding 3097 /// to value use iterator. 3098 /// 3099 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const { 3100 return getIncomingBlock(I.getUse()); 3101 } 3102 3103 void setIncomingBlock(unsigned i, BasicBlock *BB) { 3104 const_cast<block_iterator>(block_begin())[i] = BB; 3105 } 3106 3107 /// Copies the basic blocks from \p BBRange to the incoming basic block list 3108 /// of this PHINode, starting at \p ToIdx. 3109 void copyIncomingBlocks(iterator_range<const_block_iterator> BBRange, 3110 uint32_t ToIdx = 0) { 3111 copy(BBRange, const_cast<block_iterator>(block_begin()) + ToIdx); 3112 } 3113 3114 /// Replace every incoming basic block \p Old to basic block \p New. 3115 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) { 3116 assert(New && Old && "PHI node got a null basic block!"); 3117 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) 3118 if (getIncomingBlock(Op) == Old) 3119 setIncomingBlock(Op, New); 3120 } 3121 3122 /// Add an incoming value to the end of the PHI list 3123 /// 3124 void addIncoming(Value *V, BasicBlock *BB) { 3125 if (getNumOperands() == ReservedSpace) 3126 growOperands(); // Get more space! 3127 // Initialize some new operands. 3128 setNumHungOffUseOperands(getNumOperands() + 1); 3129 setIncomingValue(getNumOperands() - 1, V); 3130 setIncomingBlock(getNumOperands() - 1, BB); 3131 } 3132 3133 /// Remove an incoming value. This is useful if a 3134 /// predecessor basic block is deleted. The value removed is returned. 3135 /// 3136 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty 3137 /// is true), the PHI node is destroyed and any uses of it are replaced with 3138 /// dummy values. The only time there should be zero incoming values to a PHI 3139 /// node is when the block is dead, so this strategy is sound. 3140 /// 3141 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true); 3142 3143 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) { 3144 int Idx = getBasicBlockIndex(BB); 3145 assert(Idx >= 0 && "Invalid basic block argument to remove!"); 3146 return removeIncomingValue(Idx, DeletePHIIfEmpty); 3147 } 3148 3149 /// Remove all incoming values for which the predicate returns true. 3150 /// The predicate accepts the incoming value index. 3151 void removeIncomingValueIf(function_ref<bool(unsigned)> Predicate, 3152 bool DeletePHIIfEmpty = true); 3153 3154 /// Return the first index of the specified basic 3155 /// block in the value list for this PHI. Returns -1 if no instance. 3156 /// 3157 int getBasicBlockIndex(const BasicBlock *BB) const { 3158 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 3159 if (block_begin()[i] == BB) 3160 return i; 3161 return -1; 3162 } 3163 3164 Value *getIncomingValueForBlock(const BasicBlock *BB) const { 3165 int Idx = getBasicBlockIndex(BB); 3166 assert(Idx >= 0 && "Invalid basic block argument!"); 3167 return getIncomingValue(Idx); 3168 } 3169 3170 /// Set every incoming value(s) for block \p BB to \p V. 3171 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) { 3172 assert(BB && "PHI node got a null basic block!"); 3173 bool Found = false; 3174 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) 3175 if (getIncomingBlock(Op) == BB) { 3176 Found = true; 3177 setIncomingValue(Op, V); 3178 } 3179 (void)Found; 3180 assert(Found && "Invalid basic block argument to set!"); 3181 } 3182 3183 /// If the specified PHI node always merges together the 3184 /// same value, return the value, otherwise return null. 3185 Value *hasConstantValue() const; 3186 3187 /// Whether the specified PHI node always merges 3188 /// together the same value, assuming undefs are equal to a unique 3189 /// non-undef value. 3190 bool hasConstantOrUndefValue() const; 3191 3192 /// If the PHI node is complete which means all of its parent's predecessors 3193 /// have incoming value in this PHI, return true, otherwise return false. 3194 bool isComplete() const { 3195 return llvm::all_of(predecessors(getParent()), 3196 [this](const BasicBlock *Pred) { 3197 return getBasicBlockIndex(Pred) >= 0; 3198 }); 3199 } 3200 3201 /// Methods for support type inquiry through isa, cast, and dyn_cast: 3202 static bool classof(const Instruction *I) { 3203 return I->getOpcode() == Instruction::PHI; 3204 } 3205 static bool classof(const Value *V) { 3206 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3207 } 3208 3209 private: 3210 void growOperands(); 3211 }; 3212 3213 template <> 3214 struct OperandTraits<PHINode> : public HungoffOperandTraits<2> { 3215 }; 3216 3217 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value) 3218 3219 //===----------------------------------------------------------------------===// 3220 // LandingPadInst Class 3221 //===----------------------------------------------------------------------===// 3222 3223 //===--------------------------------------------------------------------------- 3224 /// The landingpad instruction holds all of the information 3225 /// necessary to generate correct exception handling. The landingpad instruction 3226 /// cannot be moved from the top of a landing pad block, which itself is 3227 /// accessible only from the 'unwind' edge of an invoke. This uses the 3228 /// SubclassData field in Value to store whether or not the landingpad is a 3229 /// cleanup. 3230 /// 3231 class LandingPadInst : public Instruction { 3232 using CleanupField = BoolBitfieldElementT<0>; 3233 3234 /// The number of operands actually allocated. NumOperands is 3235 /// the number actually in use. 3236 unsigned ReservedSpace; 3237 3238 LandingPadInst(const LandingPadInst &LP); 3239 3240 public: 3241 enum ClauseType { Catch, Filter }; 3242 3243 private: 3244 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, 3245 const Twine &NameStr, 3246 BasicBlock::iterator InsertBefore); 3247 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, 3248 const Twine &NameStr, Instruction *InsertBefore); 3249 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, 3250 const Twine &NameStr, BasicBlock *InsertAtEnd); 3251 3252 // Allocate space for exactly zero operands. 3253 void *operator new(size_t S) { return User::operator new(S); } 3254 3255 void growOperands(unsigned Size); 3256 void init(unsigned NumReservedValues, const Twine &NameStr); 3257 3258 protected: 3259 // Note: Instruction needs to be a friend here to call cloneImpl. 3260 friend class Instruction; 3261 3262 LandingPadInst *cloneImpl() const; 3263 3264 public: 3265 void operator delete(void *Ptr) { User::operator delete(Ptr); } 3266 3267 /// Constructors - NumReservedClauses is a hint for the number of incoming 3268 /// clauses that this landingpad will have (use 0 if you really have no idea). 3269 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, 3270 const Twine &NameStr, 3271 BasicBlock::iterator InsertBefore); 3272 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, 3273 const Twine &NameStr = "", 3274 Instruction *InsertBefore = nullptr); 3275 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, 3276 const Twine &NameStr, BasicBlock *InsertAtEnd); 3277 3278 /// Provide fast operand accessors 3279 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3280 3281 /// Return 'true' if this landingpad instruction is a 3282 /// cleanup. I.e., it should be run when unwinding even if its landing pad 3283 /// doesn't catch the exception. 3284 bool isCleanup() const { return getSubclassData<CleanupField>(); } 3285 3286 /// Indicate that this landingpad instruction is a cleanup. 3287 void setCleanup(bool V) { setSubclassData<CleanupField>(V); } 3288 3289 /// Add a catch or filter clause to the landing pad. 3290 void addClause(Constant *ClauseVal); 3291 3292 /// Get the value of the clause at index Idx. Use isCatch/isFilter to 3293 /// determine what type of clause this is. 3294 Constant *getClause(unsigned Idx) const { 3295 return cast<Constant>(getOperandList()[Idx]); 3296 } 3297 3298 /// Return 'true' if the clause and index Idx is a catch clause. 3299 bool isCatch(unsigned Idx) const { 3300 return !isa<ArrayType>(getOperandList()[Idx]->getType()); 3301 } 3302 3303 /// Return 'true' if the clause and index Idx is a filter clause. 3304 bool isFilter(unsigned Idx) const { 3305 return isa<ArrayType>(getOperandList()[Idx]->getType()); 3306 } 3307 3308 /// Get the number of clauses for this landing pad. 3309 unsigned getNumClauses() const { return getNumOperands(); } 3310 3311 /// Grow the size of the operand list to accommodate the new 3312 /// number of clauses. 3313 void reserveClauses(unsigned Size) { growOperands(Size); } 3314 3315 // Methods for support type inquiry through isa, cast, and dyn_cast: 3316 static bool classof(const Instruction *I) { 3317 return I->getOpcode() == Instruction::LandingPad; 3318 } 3319 static bool classof(const Value *V) { 3320 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3321 } 3322 }; 3323 3324 template <> 3325 struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> { 3326 }; 3327 3328 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value) 3329 3330 //===----------------------------------------------------------------------===// 3331 // ReturnInst Class 3332 //===----------------------------------------------------------------------===// 3333 3334 //===--------------------------------------------------------------------------- 3335 /// Return a value (possibly void), from a function. Execution 3336 /// does not continue in this function any longer. 3337 /// 3338 class ReturnInst : public Instruction { 3339 ReturnInst(const ReturnInst &RI); 3340 3341 private: 3342 // ReturnInst constructors: 3343 // ReturnInst() - 'ret void' instruction 3344 // ReturnInst( null) - 'ret void' instruction 3345 // ReturnInst(Value* X) - 'ret X' instruction 3346 // ReturnInst(null, Iterator It) - 'ret void' instruction, insert before I 3347 // ReturnInst(Value* X, Iterator It) - 'ret X' instruction, insert before I 3348 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I 3349 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I 3350 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B 3351 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B 3352 // 3353 // NOTE: If the Value* passed is of type void then the constructor behaves as 3354 // if it was passed NULL. 3355 explicit ReturnInst(LLVMContext &C, Value *retVal, 3356 BasicBlock::iterator InsertBefore); 3357 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr, 3358 Instruction *InsertBefore = nullptr); 3359 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd); 3360 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd); 3361 3362 protected: 3363 // Note: Instruction needs to be a friend here to call cloneImpl. 3364 friend class Instruction; 3365 3366 ReturnInst *cloneImpl() const; 3367 3368 public: 3369 static ReturnInst *Create(LLVMContext &C, Value *retVal, 3370 BasicBlock::iterator InsertBefore) { 3371 return new (!!retVal) ReturnInst(C, retVal, InsertBefore); 3372 } 3373 3374 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr, 3375 Instruction *InsertBefore = nullptr) { 3376 return new(!!retVal) ReturnInst(C, retVal, InsertBefore); 3377 } 3378 3379 static ReturnInst* Create(LLVMContext &C, Value *retVal, 3380 BasicBlock *InsertAtEnd) { 3381 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd); 3382 } 3383 3384 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) { 3385 return new(0) ReturnInst(C, InsertAtEnd); 3386 } 3387 3388 /// Provide fast operand accessors 3389 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3390 3391 /// Convenience accessor. Returns null if there is no return value. 3392 Value *getReturnValue() const { 3393 return getNumOperands() != 0 ? getOperand(0) : nullptr; 3394 } 3395 3396 unsigned getNumSuccessors() const { return 0; } 3397 3398 // Methods for support type inquiry through isa, cast, and dyn_cast: 3399 static bool classof(const Instruction *I) { 3400 return (I->getOpcode() == Instruction::Ret); 3401 } 3402 static bool classof(const Value *V) { 3403 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3404 } 3405 3406 private: 3407 BasicBlock *getSuccessor(unsigned idx) const { 3408 llvm_unreachable("ReturnInst has no successors!"); 3409 } 3410 3411 void setSuccessor(unsigned idx, BasicBlock *B) { 3412 llvm_unreachable("ReturnInst has no successors!"); 3413 } 3414 }; 3415 3416 template <> 3417 struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> { 3418 }; 3419 3420 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value) 3421 3422 //===----------------------------------------------------------------------===// 3423 // BranchInst Class 3424 //===----------------------------------------------------------------------===// 3425 3426 //===--------------------------------------------------------------------------- 3427 /// Conditional or Unconditional Branch instruction. 3428 /// 3429 class BranchInst : public Instruction { 3430 /// Ops list - Branches are strange. The operands are ordered: 3431 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because 3432 /// they don't have to check for cond/uncond branchness. These are mostly 3433 /// accessed relative from op_end(). 3434 BranchInst(const BranchInst &BI); 3435 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition): 3436 // BranchInst(BB *B) - 'br B' 3437 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F' 3438 // BranchInst(BB* B, Iter It) - 'br B' insert before I 3439 // BranchInst(BB* T, BB *F, Value *C, Iter It) - 'br C, T, F', insert before I 3440 // BranchInst(BB* B, Inst *I) - 'br B' insert before I 3441 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I 3442 // BranchInst(BB* B, BB *I) - 'br B' insert at end 3443 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end 3444 explicit BranchInst(BasicBlock *IfTrue, BasicBlock::iterator InsertBefore); 3445 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 3446 BasicBlock::iterator InsertBefore); 3447 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr); 3448 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 3449 Instruction *InsertBefore = nullptr); 3450 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd); 3451 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 3452 BasicBlock *InsertAtEnd); 3453 3454 void AssertOK(); 3455 3456 protected: 3457 // Note: Instruction needs to be a friend here to call cloneImpl. 3458 friend class Instruction; 3459 3460 BranchInst *cloneImpl() const; 3461 3462 public: 3463 /// Iterator type that casts an operand to a basic block. 3464 /// 3465 /// This only makes sense because the successors are stored as adjacent 3466 /// operands for branch instructions. 3467 struct succ_op_iterator 3468 : iterator_adaptor_base<succ_op_iterator, value_op_iterator, 3469 std::random_access_iterator_tag, BasicBlock *, 3470 ptrdiff_t, BasicBlock *, BasicBlock *> { 3471 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} 3472 3473 BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3474 BasicBlock *operator->() const { return operator*(); } 3475 }; 3476 3477 /// The const version of `succ_op_iterator`. 3478 struct const_succ_op_iterator 3479 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, 3480 std::random_access_iterator_tag, 3481 const BasicBlock *, ptrdiff_t, const BasicBlock *, 3482 const BasicBlock *> { 3483 explicit const_succ_op_iterator(const_value_op_iterator I) 3484 : iterator_adaptor_base(I) {} 3485 3486 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3487 const BasicBlock *operator->() const { return operator*(); } 3488 }; 3489 3490 static BranchInst *Create(BasicBlock *IfTrue, 3491 BasicBlock::iterator InsertBefore) { 3492 return new(1) BranchInst(IfTrue, InsertBefore); 3493 } 3494 3495 static BranchInst *Create(BasicBlock *IfTrue, 3496 Instruction *InsertBefore = nullptr) { 3497 return new(1) BranchInst(IfTrue, InsertBefore); 3498 } 3499 3500 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, 3501 Value *Cond, BasicBlock::iterator InsertBefore) { 3502 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore); 3503 } 3504 3505 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, 3506 Value *Cond, Instruction *InsertBefore = nullptr) { 3507 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore); 3508 } 3509 3510 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) { 3511 return new(1) BranchInst(IfTrue, InsertAtEnd); 3512 } 3513 3514 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, 3515 Value *Cond, BasicBlock *InsertAtEnd) { 3516 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd); 3517 } 3518 3519 /// Transparently provide more efficient getOperand methods. 3520 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3521 3522 bool isUnconditional() const { return getNumOperands() == 1; } 3523 bool isConditional() const { return getNumOperands() == 3; } 3524 3525 Value *getCondition() const { 3526 assert(isConditional() && "Cannot get condition of an uncond branch!"); 3527 return Op<-3>(); 3528 } 3529 3530 void setCondition(Value *V) { 3531 assert(isConditional() && "Cannot set condition of unconditional branch!"); 3532 Op<-3>() = V; 3533 } 3534 3535 unsigned getNumSuccessors() const { return 1+isConditional(); } 3536 3537 BasicBlock *getSuccessor(unsigned i) const { 3538 assert(i < getNumSuccessors() && "Successor # out of range for Branch!"); 3539 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get()); 3540 } 3541 3542 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 3543 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!"); 3544 *(&Op<-1>() - idx) = NewSucc; 3545 } 3546 3547 /// Swap the successors of this branch instruction. 3548 /// 3549 /// Swaps the successors of the branch instruction. This also swaps any 3550 /// branch weight metadata associated with the instruction so that it 3551 /// continues to map correctly to each operand. 3552 void swapSuccessors(); 3553 3554 iterator_range<succ_op_iterator> successors() { 3555 return make_range( 3556 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)), 3557 succ_op_iterator(value_op_end())); 3558 } 3559 3560 iterator_range<const_succ_op_iterator> successors() const { 3561 return make_range(const_succ_op_iterator( 3562 std::next(value_op_begin(), isConditional() ? 1 : 0)), 3563 const_succ_op_iterator(value_op_end())); 3564 } 3565 3566 // Methods for support type inquiry through isa, cast, and dyn_cast: 3567 static bool classof(const Instruction *I) { 3568 return (I->getOpcode() == Instruction::Br); 3569 } 3570 static bool classof(const Value *V) { 3571 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3572 } 3573 }; 3574 3575 template <> 3576 struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> { 3577 }; 3578 3579 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value) 3580 3581 //===----------------------------------------------------------------------===// 3582 // SwitchInst Class 3583 //===----------------------------------------------------------------------===// 3584 3585 //===--------------------------------------------------------------------------- 3586 /// Multiway switch 3587 /// 3588 class SwitchInst : public Instruction { 3589 unsigned ReservedSpace; 3590 3591 // Operand[0] = Value to switch on 3592 // Operand[1] = Default basic block destination 3593 // Operand[2n ] = Value to match 3594 // Operand[2n+1] = BasicBlock to go to on match 3595 SwitchInst(const SwitchInst &SI); 3596 3597 /// Create a new switch instruction, specifying a value to switch on and a 3598 /// default destination. The number of additional cases can be specified here 3599 /// to make memory allocation more efficient. This constructor can also 3600 /// auto-insert before another instruction. 3601 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3602 BasicBlock::iterator InsertBefore); 3603 3604 /// Create a new switch instruction, specifying a value to switch on and a 3605 /// default destination. The number of additional cases can be specified here 3606 /// to make memory allocation more efficient. This constructor can also 3607 /// auto-insert before another instruction. 3608 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3609 Instruction *InsertBefore); 3610 3611 /// Create a new switch instruction, specifying a value to switch on and a 3612 /// default destination. The number of additional cases can be specified here 3613 /// to make memory allocation more efficient. This constructor also 3614 /// auto-inserts at the end of the specified BasicBlock. 3615 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3616 BasicBlock *InsertAtEnd); 3617 3618 // allocate space for exactly zero operands 3619 void *operator new(size_t S) { return User::operator new(S); } 3620 3621 void init(Value *Value, BasicBlock *Default, unsigned NumReserved); 3622 void growOperands(); 3623 3624 protected: 3625 // Note: Instruction needs to be a friend here to call cloneImpl. 3626 friend class Instruction; 3627 3628 SwitchInst *cloneImpl() const; 3629 3630 public: 3631 void operator delete(void *Ptr) { User::operator delete(Ptr); } 3632 3633 // -2 3634 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1); 3635 3636 template <typename CaseHandleT> class CaseIteratorImpl; 3637 3638 /// A handle to a particular switch case. It exposes a convenient interface 3639 /// to both the case value and the successor block. 3640 /// 3641 /// We define this as a template and instantiate it to form both a const and 3642 /// non-const handle. 3643 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT> 3644 class CaseHandleImpl { 3645 // Directly befriend both const and non-const iterators. 3646 friend class SwitchInst::CaseIteratorImpl< 3647 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>; 3648 3649 protected: 3650 // Expose the switch type we're parameterized with to the iterator. 3651 using SwitchInstType = SwitchInstT; 3652 3653 SwitchInstT *SI; 3654 ptrdiff_t Index; 3655 3656 CaseHandleImpl() = default; 3657 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {} 3658 3659 public: 3660 /// Resolves case value for current case. 3661 ConstantIntT *getCaseValue() const { 3662 assert((unsigned)Index < SI->getNumCases() && 3663 "Index out the number of cases."); 3664 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2)); 3665 } 3666 3667 /// Resolves successor for current case. 3668 BasicBlockT *getCaseSuccessor() const { 3669 assert(((unsigned)Index < SI->getNumCases() || 3670 (unsigned)Index == DefaultPseudoIndex) && 3671 "Index out the number of cases."); 3672 return SI->getSuccessor(getSuccessorIndex()); 3673 } 3674 3675 /// Returns number of current case. 3676 unsigned getCaseIndex() const { return Index; } 3677 3678 /// Returns successor index for current case successor. 3679 unsigned getSuccessorIndex() const { 3680 assert(((unsigned)Index == DefaultPseudoIndex || 3681 (unsigned)Index < SI->getNumCases()) && 3682 "Index out the number of cases."); 3683 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0; 3684 } 3685 3686 bool operator==(const CaseHandleImpl &RHS) const { 3687 assert(SI == RHS.SI && "Incompatible operators."); 3688 return Index == RHS.Index; 3689 } 3690 }; 3691 3692 using ConstCaseHandle = 3693 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>; 3694 3695 class CaseHandle 3696 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> { 3697 friend class SwitchInst::CaseIteratorImpl<CaseHandle>; 3698 3699 public: 3700 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {} 3701 3702 /// Sets the new value for current case. 3703 void setValue(ConstantInt *V) const { 3704 assert((unsigned)Index < SI->getNumCases() && 3705 "Index out the number of cases."); 3706 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V)); 3707 } 3708 3709 /// Sets the new successor for current case. 3710 void setSuccessor(BasicBlock *S) const { 3711 SI->setSuccessor(getSuccessorIndex(), S); 3712 } 3713 }; 3714 3715 template <typename CaseHandleT> 3716 class CaseIteratorImpl 3717 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>, 3718 std::random_access_iterator_tag, 3719 const CaseHandleT> { 3720 using SwitchInstT = typename CaseHandleT::SwitchInstType; 3721 3722 CaseHandleT Case; 3723 3724 public: 3725 /// Default constructed iterator is in an invalid state until assigned to 3726 /// a case for a particular switch. 3727 CaseIteratorImpl() = default; 3728 3729 /// Initializes case iterator for given SwitchInst and for given 3730 /// case number. 3731 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {} 3732 3733 /// Initializes case iterator for given SwitchInst and for given 3734 /// successor index. 3735 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI, 3736 unsigned SuccessorIndex) { 3737 assert(SuccessorIndex < SI->getNumSuccessors() && 3738 "Successor index # out of range!"); 3739 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1) 3740 : CaseIteratorImpl(SI, DefaultPseudoIndex); 3741 } 3742 3743 /// Support converting to the const variant. This will be a no-op for const 3744 /// variant. 3745 operator CaseIteratorImpl<ConstCaseHandle>() const { 3746 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index); 3747 } 3748 3749 CaseIteratorImpl &operator+=(ptrdiff_t N) { 3750 // Check index correctness after addition. 3751 // Note: Index == getNumCases() means end(). 3752 assert(Case.Index + N >= 0 && 3753 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && 3754 "Case.Index out the number of cases."); 3755 Case.Index += N; 3756 return *this; 3757 } 3758 CaseIteratorImpl &operator-=(ptrdiff_t N) { 3759 // Check index correctness after subtraction. 3760 // Note: Case.Index == getNumCases() means end(). 3761 assert(Case.Index - N >= 0 && 3762 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && 3763 "Case.Index out the number of cases."); 3764 Case.Index -= N; 3765 return *this; 3766 } 3767 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const { 3768 assert(Case.SI == RHS.Case.SI && "Incompatible operators."); 3769 return Case.Index - RHS.Case.Index; 3770 } 3771 bool operator==(const CaseIteratorImpl &RHS) const { 3772 return Case == RHS.Case; 3773 } 3774 bool operator<(const CaseIteratorImpl &RHS) const { 3775 assert(Case.SI == RHS.Case.SI && "Incompatible operators."); 3776 return Case.Index < RHS.Case.Index; 3777 } 3778 const CaseHandleT &operator*() const { return Case; } 3779 }; 3780 3781 using CaseIt = CaseIteratorImpl<CaseHandle>; 3782 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>; 3783 3784 static SwitchInst *Create(Value *Value, BasicBlock *Default, 3785 unsigned NumCases, 3786 BasicBlock::iterator InsertBefore) { 3787 return new SwitchInst(Value, Default, NumCases, InsertBefore); 3788 } 3789 3790 static SwitchInst *Create(Value *Value, BasicBlock *Default, 3791 unsigned NumCases, 3792 Instruction *InsertBefore = nullptr) { 3793 return new SwitchInst(Value, Default, NumCases, InsertBefore); 3794 } 3795 3796 static SwitchInst *Create(Value *Value, BasicBlock *Default, 3797 unsigned NumCases, BasicBlock *InsertAtEnd) { 3798 return new SwitchInst(Value, Default, NumCases, InsertAtEnd); 3799 } 3800 3801 /// Provide fast operand accessors 3802 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3803 3804 // Accessor Methods for Switch stmt 3805 Value *getCondition() const { return getOperand(0); } 3806 void setCondition(Value *V) { setOperand(0, V); } 3807 3808 BasicBlock *getDefaultDest() const { 3809 return cast<BasicBlock>(getOperand(1)); 3810 } 3811 3812 /// Returns true if the default branch must result in immediate undefined 3813 /// behavior, false otherwise. 3814 bool defaultDestUndefined() const { 3815 return isa<UnreachableInst>(getDefaultDest()->getFirstNonPHIOrDbg()); 3816 } 3817 3818 void setDefaultDest(BasicBlock *DefaultCase) { 3819 setOperand(1, reinterpret_cast<Value*>(DefaultCase)); 3820 } 3821 3822 /// Return the number of 'cases' in this switch instruction, excluding the 3823 /// default case. 3824 unsigned getNumCases() const { 3825 return getNumOperands()/2 - 1; 3826 } 3827 3828 /// Returns a read/write iterator that points to the first case in the 3829 /// SwitchInst. 3830 CaseIt case_begin() { 3831 return CaseIt(this, 0); 3832 } 3833 3834 /// Returns a read-only iterator that points to the first case in the 3835 /// SwitchInst. 3836 ConstCaseIt case_begin() const { 3837 return ConstCaseIt(this, 0); 3838 } 3839 3840 /// Returns a read/write iterator that points one past the last in the 3841 /// SwitchInst. 3842 CaseIt case_end() { 3843 return CaseIt(this, getNumCases()); 3844 } 3845 3846 /// Returns a read-only iterator that points one past the last in the 3847 /// SwitchInst. 3848 ConstCaseIt case_end() const { 3849 return ConstCaseIt(this, getNumCases()); 3850 } 3851 3852 /// Iteration adapter for range-for loops. 3853 iterator_range<CaseIt> cases() { 3854 return make_range(case_begin(), case_end()); 3855 } 3856 3857 /// Constant iteration adapter for range-for loops. 3858 iterator_range<ConstCaseIt> cases() const { 3859 return make_range(case_begin(), case_end()); 3860 } 3861 3862 /// Returns an iterator that points to the default case. 3863 /// Note: this iterator allows to resolve successor only. Attempt 3864 /// to resolve case value causes an assertion. 3865 /// Also note, that increment and decrement also causes an assertion and 3866 /// makes iterator invalid. 3867 CaseIt case_default() { 3868 return CaseIt(this, DefaultPseudoIndex); 3869 } 3870 ConstCaseIt case_default() const { 3871 return ConstCaseIt(this, DefaultPseudoIndex); 3872 } 3873 3874 /// Search all of the case values for the specified constant. If it is 3875 /// explicitly handled, return the case iterator of it, otherwise return 3876 /// default case iterator to indicate that it is handled by the default 3877 /// handler. 3878 CaseIt findCaseValue(const ConstantInt *C) { 3879 return CaseIt( 3880 this, 3881 const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex()); 3882 } 3883 ConstCaseIt findCaseValue(const ConstantInt *C) const { 3884 ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) { 3885 return Case.getCaseValue() == C; 3886 }); 3887 if (I != case_end()) 3888 return I; 3889 3890 return case_default(); 3891 } 3892 3893 /// Finds the unique case value for a given successor. Returns null if the 3894 /// successor is not found, not unique, or is the default case. 3895 ConstantInt *findCaseDest(BasicBlock *BB) { 3896 if (BB == getDefaultDest()) 3897 return nullptr; 3898 3899 ConstantInt *CI = nullptr; 3900 for (auto Case : cases()) { 3901 if (Case.getCaseSuccessor() != BB) 3902 continue; 3903 3904 if (CI) 3905 return nullptr; // Multiple cases lead to BB. 3906 3907 CI = Case.getCaseValue(); 3908 } 3909 3910 return CI; 3911 } 3912 3913 /// Add an entry to the switch instruction. 3914 /// Note: 3915 /// This action invalidates case_end(). Old case_end() iterator will 3916 /// point to the added case. 3917 void addCase(ConstantInt *OnVal, BasicBlock *Dest); 3918 3919 /// This method removes the specified case and its successor from the switch 3920 /// instruction. Note that this operation may reorder the remaining cases at 3921 /// index idx and above. 3922 /// Note: 3923 /// This action invalidates iterators for all cases following the one removed, 3924 /// including the case_end() iterator. It returns an iterator for the next 3925 /// case. 3926 CaseIt removeCase(CaseIt I); 3927 3928 unsigned getNumSuccessors() const { return getNumOperands()/2; } 3929 BasicBlock *getSuccessor(unsigned idx) const { 3930 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!"); 3931 return cast<BasicBlock>(getOperand(idx*2+1)); 3932 } 3933 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 3934 assert(idx < getNumSuccessors() && "Successor # out of range for switch!"); 3935 setOperand(idx * 2 + 1, NewSucc); 3936 } 3937 3938 // Methods for support type inquiry through isa, cast, and dyn_cast: 3939 static bool classof(const Instruction *I) { 3940 return I->getOpcode() == Instruction::Switch; 3941 } 3942 static bool classof(const Value *V) { 3943 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3944 } 3945 }; 3946 3947 /// A wrapper class to simplify modification of SwitchInst cases along with 3948 /// their prof branch_weights metadata. 3949 class SwitchInstProfUpdateWrapper { 3950 SwitchInst &SI; 3951 std::optional<SmallVector<uint32_t, 8>> Weights; 3952 bool Changed = false; 3953 3954 protected: 3955 MDNode *buildProfBranchWeightsMD(); 3956 3957 void init(); 3958 3959 public: 3960 using CaseWeightOpt = std::optional<uint32_t>; 3961 SwitchInst *operator->() { return &SI; } 3962 SwitchInst &operator*() { return SI; } 3963 operator SwitchInst *() { return &SI; } 3964 3965 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); } 3966 3967 ~SwitchInstProfUpdateWrapper() { 3968 if (Changed) 3969 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD()); 3970 } 3971 3972 /// Delegate the call to the underlying SwitchInst::removeCase() and remove 3973 /// correspondent branch weight. 3974 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I); 3975 3976 /// Delegate the call to the underlying SwitchInst::addCase() and set the 3977 /// specified branch weight for the added case. 3978 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W); 3979 3980 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark 3981 /// this object to not touch the underlying SwitchInst in destructor. 3982 Instruction::InstListType::iterator eraseFromParent(); 3983 3984 void setSuccessorWeight(unsigned idx, CaseWeightOpt W); 3985 CaseWeightOpt getSuccessorWeight(unsigned idx); 3986 3987 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx); 3988 }; 3989 3990 template <> 3991 struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> { 3992 }; 3993 3994 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value) 3995 3996 //===----------------------------------------------------------------------===// 3997 // IndirectBrInst Class 3998 //===----------------------------------------------------------------------===// 3999 4000 //===--------------------------------------------------------------------------- 4001 /// Indirect Branch Instruction. 4002 /// 4003 class IndirectBrInst : public Instruction { 4004 unsigned ReservedSpace; 4005 4006 // Operand[0] = Address to jump to 4007 // Operand[n+1] = n-th destination 4008 IndirectBrInst(const IndirectBrInst &IBI); 4009 4010 /// Create a new indirectbr instruction, specifying an 4011 /// Address to jump to. The number of expected destinations can be specified 4012 /// here to make memory allocation more efficient. This constructor can also 4013 /// autoinsert before another instruction. 4014 IndirectBrInst(Value *Address, unsigned NumDests, 4015 BasicBlock::iterator InsertBefore); 4016 4017 /// Create a new indirectbr instruction, specifying an 4018 /// Address to jump to. The number of expected destinations can be specified 4019 /// here to make memory allocation more efficient. This constructor can also 4020 /// autoinsert before another instruction. 4021 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore); 4022 4023 /// Create a new indirectbr instruction, specifying an 4024 /// Address to jump to. The number of expected destinations can be specified 4025 /// here to make memory allocation more efficient. This constructor also 4026 /// autoinserts at the end of the specified BasicBlock. 4027 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd); 4028 4029 // allocate space for exactly zero operands 4030 void *operator new(size_t S) { return User::operator new(S); } 4031 4032 void init(Value *Address, unsigned NumDests); 4033 void growOperands(); 4034 4035 protected: 4036 // Note: Instruction needs to be a friend here to call cloneImpl. 4037 friend class Instruction; 4038 4039 IndirectBrInst *cloneImpl() const; 4040 4041 public: 4042 void operator delete(void *Ptr) { User::operator delete(Ptr); } 4043 4044 /// Iterator type that casts an operand to a basic block. 4045 /// 4046 /// This only makes sense because the successors are stored as adjacent 4047 /// operands for indirectbr instructions. 4048 struct succ_op_iterator 4049 : iterator_adaptor_base<succ_op_iterator, value_op_iterator, 4050 std::random_access_iterator_tag, BasicBlock *, 4051 ptrdiff_t, BasicBlock *, BasicBlock *> { 4052 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} 4053 4054 BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 4055 BasicBlock *operator->() const { return operator*(); } 4056 }; 4057 4058 /// The const version of `succ_op_iterator`. 4059 struct const_succ_op_iterator 4060 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, 4061 std::random_access_iterator_tag, 4062 const BasicBlock *, ptrdiff_t, const BasicBlock *, 4063 const BasicBlock *> { 4064 explicit const_succ_op_iterator(const_value_op_iterator I) 4065 : iterator_adaptor_base(I) {} 4066 4067 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 4068 const BasicBlock *operator->() const { return operator*(); } 4069 }; 4070 4071 static IndirectBrInst *Create(Value *Address, unsigned NumDests, 4072 BasicBlock::iterator InsertBefore) { 4073 return new IndirectBrInst(Address, NumDests, InsertBefore); 4074 } 4075 4076 static IndirectBrInst *Create(Value *Address, unsigned NumDests, 4077 Instruction *InsertBefore = nullptr) { 4078 return new IndirectBrInst(Address, NumDests, InsertBefore); 4079 } 4080 4081 static IndirectBrInst *Create(Value *Address, unsigned NumDests, 4082 BasicBlock *InsertAtEnd) { 4083 return new IndirectBrInst(Address, NumDests, InsertAtEnd); 4084 } 4085 4086 /// Provide fast operand accessors. 4087 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4088 4089 // Accessor Methods for IndirectBrInst instruction. 4090 Value *getAddress() { return getOperand(0); } 4091 const Value *getAddress() const { return getOperand(0); } 4092 void setAddress(Value *V) { setOperand(0, V); } 4093 4094 /// return the number of possible destinations in this 4095 /// indirectbr instruction. 4096 unsigned getNumDestinations() const { return getNumOperands()-1; } 4097 4098 /// Return the specified destination. 4099 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); } 4100 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); } 4101 4102 /// Add a destination. 4103 /// 4104 void addDestination(BasicBlock *Dest); 4105 4106 /// This method removes the specified successor from the 4107 /// indirectbr instruction. 4108 void removeDestination(unsigned i); 4109 4110 unsigned getNumSuccessors() const { return getNumOperands()-1; } 4111 BasicBlock *getSuccessor(unsigned i) const { 4112 return cast<BasicBlock>(getOperand(i+1)); 4113 } 4114 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 4115 setOperand(i + 1, NewSucc); 4116 } 4117 4118 iterator_range<succ_op_iterator> successors() { 4119 return make_range(succ_op_iterator(std::next(value_op_begin())), 4120 succ_op_iterator(value_op_end())); 4121 } 4122 4123 iterator_range<const_succ_op_iterator> successors() const { 4124 return make_range(const_succ_op_iterator(std::next(value_op_begin())), 4125 const_succ_op_iterator(value_op_end())); 4126 } 4127 4128 // Methods for support type inquiry through isa, cast, and dyn_cast: 4129 static bool classof(const Instruction *I) { 4130 return I->getOpcode() == Instruction::IndirectBr; 4131 } 4132 static bool classof(const Value *V) { 4133 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4134 } 4135 }; 4136 4137 template <> 4138 struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> { 4139 }; 4140 4141 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value) 4142 4143 //===----------------------------------------------------------------------===// 4144 // InvokeInst Class 4145 //===----------------------------------------------------------------------===// 4146 4147 /// Invoke instruction. The SubclassData field is used to hold the 4148 /// calling convention of the call. 4149 /// 4150 class InvokeInst : public CallBase { 4151 /// The number of operands for this call beyond the called function, 4152 /// arguments, and operand bundles. 4153 static constexpr int NumExtraOperands = 2; 4154 4155 /// The index from the end of the operand array to the normal destination. 4156 static constexpr int NormalDestOpEndIdx = -3; 4157 4158 /// The index from the end of the operand array to the unwind destination. 4159 static constexpr int UnwindDestOpEndIdx = -2; 4160 4161 InvokeInst(const InvokeInst &BI); 4162 4163 /// Construct an InvokeInst given a range of arguments. 4164 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4165 BasicBlock *IfException, ArrayRef<Value *> Args, 4166 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4167 const Twine &NameStr, BasicBlock::iterator InsertBefore); 4168 4169 /// Construct an InvokeInst given a range of arguments. 4170 /// 4171 /// Construct an InvokeInst from a range of arguments 4172 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4173 BasicBlock *IfException, ArrayRef<Value *> Args, 4174 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4175 const Twine &NameStr, Instruction *InsertBefore); 4176 4177 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4178 BasicBlock *IfException, ArrayRef<Value *> Args, 4179 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4180 const Twine &NameStr, BasicBlock *InsertAtEnd); 4181 4182 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4183 BasicBlock *IfException, ArrayRef<Value *> Args, 4184 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 4185 4186 /// Compute the number of operands to allocate. 4187 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { 4188 // We need one operand for the called function, plus our extra operands and 4189 // the input operand counts provided. 4190 return 1 + NumExtraOperands + NumArgs + NumBundleInputs; 4191 } 4192 4193 protected: 4194 // Note: Instruction needs to be a friend here to call cloneImpl. 4195 friend class Instruction; 4196 4197 InvokeInst *cloneImpl() const; 4198 4199 public: 4200 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4201 BasicBlock *IfException, ArrayRef<Value *> Args, 4202 const Twine &NameStr, 4203 BasicBlock::iterator InsertBefore) { 4204 int NumOperands = ComputeNumOperands(Args.size()); 4205 return new (NumOperands) 4206 InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt, 4207 NumOperands, NameStr, InsertBefore); 4208 } 4209 4210 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4211 BasicBlock *IfException, ArrayRef<Value *> Args, 4212 const Twine &NameStr, 4213 Instruction *InsertBefore = nullptr) { 4214 int NumOperands = ComputeNumOperands(Args.size()); 4215 return new (NumOperands) 4216 InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt, 4217 NumOperands, NameStr, InsertBefore); 4218 } 4219 4220 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4221 BasicBlock *IfException, ArrayRef<Value *> Args, 4222 ArrayRef<OperandBundleDef> Bundles, 4223 const Twine &NameStr, 4224 BasicBlock::iterator InsertBefore) { 4225 int NumOperands = 4226 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 4227 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 4228 4229 return new (NumOperands, DescriptorBytes) 4230 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, 4231 NameStr, InsertBefore); 4232 } 4233 4234 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4235 BasicBlock *IfException, ArrayRef<Value *> Args, 4236 ArrayRef<OperandBundleDef> Bundles = std::nullopt, 4237 const Twine &NameStr = "", 4238 Instruction *InsertBefore = nullptr) { 4239 int NumOperands = 4240 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 4241 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 4242 4243 return new (NumOperands, DescriptorBytes) 4244 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, 4245 NameStr, InsertBefore); 4246 } 4247 4248 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4249 BasicBlock *IfException, ArrayRef<Value *> Args, 4250 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4251 int NumOperands = ComputeNumOperands(Args.size()); 4252 return new (NumOperands) 4253 InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt, 4254 NumOperands, NameStr, InsertAtEnd); 4255 } 4256 4257 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4258 BasicBlock *IfException, ArrayRef<Value *> Args, 4259 ArrayRef<OperandBundleDef> Bundles, 4260 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4261 int NumOperands = 4262 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 4263 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 4264 4265 return new (NumOperands, DescriptorBytes) 4266 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, 4267 NameStr, InsertAtEnd); 4268 } 4269 4270 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 4271 BasicBlock *IfException, ArrayRef<Value *> Args, 4272 const Twine &NameStr, 4273 BasicBlock::iterator InsertBefore) { 4274 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 4275 IfException, Args, std::nullopt, NameStr, InsertBefore); 4276 } 4277 4278 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 4279 BasicBlock *IfException, ArrayRef<Value *> Args, 4280 const Twine &NameStr, 4281 Instruction *InsertBefore = nullptr) { 4282 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 4283 IfException, Args, std::nullopt, NameStr, InsertBefore); 4284 } 4285 4286 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 4287 BasicBlock *IfException, ArrayRef<Value *> Args, 4288 ArrayRef<OperandBundleDef> Bundles, 4289 const Twine &NameStr, 4290 BasicBlock::iterator InsertBefore) { 4291 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 4292 IfException, Args, Bundles, NameStr, InsertBefore); 4293 } 4294 4295 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 4296 BasicBlock *IfException, ArrayRef<Value *> Args, 4297 ArrayRef<OperandBundleDef> Bundles = std::nullopt, 4298 const Twine &NameStr = "", 4299 Instruction *InsertBefore = nullptr) { 4300 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 4301 IfException, Args, Bundles, NameStr, InsertBefore); 4302 } 4303 4304 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 4305 BasicBlock *IfException, ArrayRef<Value *> Args, 4306 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4307 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 4308 IfException, Args, NameStr, InsertAtEnd); 4309 } 4310 4311 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 4312 BasicBlock *IfException, ArrayRef<Value *> Args, 4313 ArrayRef<OperandBundleDef> Bundles, 4314 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4315 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 4316 IfException, Args, Bundles, NameStr, InsertAtEnd); 4317 } 4318 4319 /// Create a clone of \p II with a different set of operand bundles and 4320 /// insert it before \p InsertPt. 4321 /// 4322 /// The returned invoke instruction is identical to \p II in every way except 4323 /// that the operand bundles for the new instruction are set to the operand 4324 /// bundles in \p Bundles. 4325 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles, 4326 BasicBlock::iterator InsertPt); 4327 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles, 4328 Instruction *InsertPt = nullptr); 4329 4330 // get*Dest - Return the destination basic blocks... 4331 BasicBlock *getNormalDest() const { 4332 return cast<BasicBlock>(Op<NormalDestOpEndIdx>()); 4333 } 4334 BasicBlock *getUnwindDest() const { 4335 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>()); 4336 } 4337 void setNormalDest(BasicBlock *B) { 4338 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B); 4339 } 4340 void setUnwindDest(BasicBlock *B) { 4341 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B); 4342 } 4343 4344 /// Get the landingpad instruction from the landing pad 4345 /// block (the unwind destination). 4346 LandingPadInst *getLandingPadInst() const; 4347 4348 BasicBlock *getSuccessor(unsigned i) const { 4349 assert(i < 2 && "Successor # out of range for invoke!"); 4350 return i == 0 ? getNormalDest() : getUnwindDest(); 4351 } 4352 4353 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 4354 assert(i < 2 && "Successor # out of range for invoke!"); 4355 if (i == 0) 4356 setNormalDest(NewSucc); 4357 else 4358 setUnwindDest(NewSucc); 4359 } 4360 4361 unsigned getNumSuccessors() const { return 2; } 4362 4363 // Methods for support type inquiry through isa, cast, and dyn_cast: 4364 static bool classof(const Instruction *I) { 4365 return (I->getOpcode() == Instruction::Invoke); 4366 } 4367 static bool classof(const Value *V) { 4368 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4369 } 4370 4371 private: 4372 // Shadow Instruction::setInstructionSubclassData with a private forwarding 4373 // method so that subclasses cannot accidentally use it. 4374 template <typename Bitfield> 4375 void setSubclassData(typename Bitfield::Type Value) { 4376 Instruction::setSubclassData<Bitfield>(Value); 4377 } 4378 }; 4379 4380 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4381 BasicBlock *IfException, ArrayRef<Value *> Args, 4382 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4383 const Twine &NameStr, BasicBlock::iterator InsertBefore) 4384 : CallBase(Ty->getReturnType(), Instruction::Invoke, 4385 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 4386 InsertBefore) { 4387 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); 4388 } 4389 4390 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4391 BasicBlock *IfException, ArrayRef<Value *> Args, 4392 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4393 const Twine &NameStr, Instruction *InsertBefore) 4394 : CallBase(Ty->getReturnType(), Instruction::Invoke, 4395 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 4396 InsertBefore) { 4397 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); 4398 } 4399 4400 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 4401 BasicBlock *IfException, ArrayRef<Value *> Args, 4402 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4403 const Twine &NameStr, BasicBlock *InsertAtEnd) 4404 : CallBase(Ty->getReturnType(), Instruction::Invoke, 4405 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 4406 InsertAtEnd) { 4407 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); 4408 } 4409 4410 //===----------------------------------------------------------------------===// 4411 // CallBrInst Class 4412 //===----------------------------------------------------------------------===// 4413 4414 /// CallBr instruction, tracking function calls that may not return control but 4415 /// instead transfer it to a third location. The SubclassData field is used to 4416 /// hold the calling convention of the call. 4417 /// 4418 class CallBrInst : public CallBase { 4419 4420 unsigned NumIndirectDests; 4421 4422 CallBrInst(const CallBrInst &BI); 4423 4424 /// Construct a CallBrInst given a range of arguments. 4425 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4426 ArrayRef<BasicBlock *> IndirectDests, 4427 ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles, 4428 int NumOperands, const Twine &NameStr, 4429 BasicBlock::iterator InsertBefore); 4430 4431 /// Construct a CallBrInst given a range of arguments. 4432 /// 4433 /// Construct a CallBrInst from a range of arguments 4434 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4435 ArrayRef<BasicBlock *> IndirectDests, 4436 ArrayRef<Value *> Args, 4437 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4438 const Twine &NameStr, Instruction *InsertBefore); 4439 4440 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4441 ArrayRef<BasicBlock *> IndirectDests, 4442 ArrayRef<Value *> Args, 4443 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4444 const Twine &NameStr, BasicBlock *InsertAtEnd); 4445 4446 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest, 4447 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args, 4448 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 4449 4450 /// Compute the number of operands to allocate. 4451 static int ComputeNumOperands(int NumArgs, int NumIndirectDests, 4452 int NumBundleInputs = 0) { 4453 // We need one operand for the called function, plus our extra operands and 4454 // the input operand counts provided. 4455 return 2 + NumIndirectDests + NumArgs + NumBundleInputs; 4456 } 4457 4458 protected: 4459 // Note: Instruction needs to be a friend here to call cloneImpl. 4460 friend class Instruction; 4461 4462 CallBrInst *cloneImpl() const; 4463 4464 public: 4465 static CallBrInst *Create(FunctionType *Ty, Value *Func, 4466 BasicBlock *DefaultDest, 4467 ArrayRef<BasicBlock *> IndirectDests, 4468 ArrayRef<Value *> Args, const Twine &NameStr, 4469 BasicBlock::iterator InsertBefore) { 4470 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); 4471 return new (NumOperands) 4472 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt, 4473 NumOperands, NameStr, InsertBefore); 4474 } 4475 4476 static CallBrInst *Create(FunctionType *Ty, Value *Func, 4477 BasicBlock *DefaultDest, 4478 ArrayRef<BasicBlock *> IndirectDests, 4479 ArrayRef<Value *> Args, const Twine &NameStr, 4480 Instruction *InsertBefore = nullptr) { 4481 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); 4482 return new (NumOperands) 4483 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt, 4484 NumOperands, NameStr, InsertBefore); 4485 } 4486 4487 static CallBrInst * 4488 Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4489 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args, 4490 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 4491 BasicBlock::iterator InsertBefore) { 4492 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), 4493 CountBundleInputs(Bundles)); 4494 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 4495 4496 return new (NumOperands, DescriptorBytes) 4497 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, 4498 NumOperands, NameStr, InsertBefore); 4499 } 4500 4501 static CallBrInst * 4502 Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4503 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args, 4504 ArrayRef<OperandBundleDef> Bundles = std::nullopt, 4505 const Twine &NameStr = "", Instruction *InsertBefore = nullptr) { 4506 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), 4507 CountBundleInputs(Bundles)); 4508 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 4509 4510 return new (NumOperands, DescriptorBytes) 4511 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, 4512 NumOperands, NameStr, InsertBefore); 4513 } 4514 4515 static CallBrInst *Create(FunctionType *Ty, Value *Func, 4516 BasicBlock *DefaultDest, 4517 ArrayRef<BasicBlock *> IndirectDests, 4518 ArrayRef<Value *> Args, const Twine &NameStr, 4519 BasicBlock *InsertAtEnd) { 4520 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); 4521 return new (NumOperands) 4522 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt, 4523 NumOperands, NameStr, InsertAtEnd); 4524 } 4525 4526 static CallBrInst *Create(FunctionType *Ty, Value *Func, 4527 BasicBlock *DefaultDest, 4528 ArrayRef<BasicBlock *> IndirectDests, 4529 ArrayRef<Value *> Args, 4530 ArrayRef<OperandBundleDef> Bundles, 4531 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4532 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), 4533 CountBundleInputs(Bundles)); 4534 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 4535 4536 return new (NumOperands, DescriptorBytes) 4537 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, 4538 NumOperands, NameStr, InsertAtEnd); 4539 } 4540 4541 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4542 ArrayRef<BasicBlock *> IndirectDests, 4543 ArrayRef<Value *> Args, const Twine &NameStr, 4544 BasicBlock::iterator InsertBefore) { 4545 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4546 IndirectDests, Args, NameStr, InsertBefore); 4547 } 4548 4549 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4550 ArrayRef<BasicBlock *> IndirectDests, 4551 ArrayRef<Value *> Args, const Twine &NameStr, 4552 Instruction *InsertBefore = nullptr) { 4553 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4554 IndirectDests, Args, NameStr, InsertBefore); 4555 } 4556 4557 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4558 ArrayRef<BasicBlock *> IndirectDests, 4559 ArrayRef<Value *> Args, 4560 ArrayRef<OperandBundleDef> Bundles, 4561 const Twine &NameStr, 4562 BasicBlock::iterator InsertBefore) { 4563 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4564 IndirectDests, Args, Bundles, NameStr, InsertBefore); 4565 } 4566 4567 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4568 ArrayRef<BasicBlock *> IndirectDests, 4569 ArrayRef<Value *> Args, 4570 ArrayRef<OperandBundleDef> Bundles = std::nullopt, 4571 const Twine &NameStr = "", 4572 Instruction *InsertBefore = nullptr) { 4573 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4574 IndirectDests, Args, Bundles, NameStr, InsertBefore); 4575 } 4576 4577 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4578 ArrayRef<BasicBlock *> IndirectDests, 4579 ArrayRef<Value *> Args, const Twine &NameStr, 4580 BasicBlock *InsertAtEnd) { 4581 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4582 IndirectDests, Args, NameStr, InsertAtEnd); 4583 } 4584 4585 static CallBrInst *Create(FunctionCallee Func, 4586 BasicBlock *DefaultDest, 4587 ArrayRef<BasicBlock *> IndirectDests, 4588 ArrayRef<Value *> Args, 4589 ArrayRef<OperandBundleDef> Bundles, 4590 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4591 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4592 IndirectDests, Args, Bundles, NameStr, InsertAtEnd); 4593 } 4594 4595 /// Create a clone of \p CBI with a different set of operand bundles and 4596 /// insert it before \p InsertPt. 4597 /// 4598 /// The returned callbr instruction is identical to \p CBI in every way 4599 /// except that the operand bundles for the new instruction are set to the 4600 /// operand bundles in \p Bundles. 4601 static CallBrInst *Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> Bundles, 4602 BasicBlock::iterator InsertPt); 4603 static CallBrInst *Create(CallBrInst *CBI, 4604 ArrayRef<OperandBundleDef> Bundles, 4605 Instruction *InsertPt = nullptr); 4606 4607 /// Return the number of callbr indirect dest labels. 4608 /// 4609 unsigned getNumIndirectDests() const { return NumIndirectDests; } 4610 4611 /// getIndirectDestLabel - Return the i-th indirect dest label. 4612 /// 4613 Value *getIndirectDestLabel(unsigned i) const { 4614 assert(i < getNumIndirectDests() && "Out of bounds!"); 4615 return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1); 4616 } 4617 4618 Value *getIndirectDestLabelUse(unsigned i) const { 4619 assert(i < getNumIndirectDests() && "Out of bounds!"); 4620 return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1); 4621 } 4622 4623 // Return the destination basic blocks... 4624 BasicBlock *getDefaultDest() const { 4625 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1)); 4626 } 4627 BasicBlock *getIndirectDest(unsigned i) const { 4628 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i)); 4629 } 4630 SmallVector<BasicBlock *, 16> getIndirectDests() const { 4631 SmallVector<BasicBlock *, 16> IndirectDests; 4632 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i) 4633 IndirectDests.push_back(getIndirectDest(i)); 4634 return IndirectDests; 4635 } 4636 void setDefaultDest(BasicBlock *B) { 4637 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B); 4638 } 4639 void setIndirectDest(unsigned i, BasicBlock *B) { 4640 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B); 4641 } 4642 4643 BasicBlock *getSuccessor(unsigned i) const { 4644 assert(i < getNumSuccessors() + 1 && 4645 "Successor # out of range for callbr!"); 4646 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1); 4647 } 4648 4649 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 4650 assert(i < getNumIndirectDests() + 1 && 4651 "Successor # out of range for callbr!"); 4652 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc); 4653 } 4654 4655 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; } 4656 4657 // Methods for support type inquiry through isa, cast, and dyn_cast: 4658 static bool classof(const Instruction *I) { 4659 return (I->getOpcode() == Instruction::CallBr); 4660 } 4661 static bool classof(const Value *V) { 4662 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4663 } 4664 4665 private: 4666 // Shadow Instruction::setInstructionSubclassData with a private forwarding 4667 // method so that subclasses cannot accidentally use it. 4668 template <typename Bitfield> 4669 void setSubclassData(typename Bitfield::Type Value) { 4670 Instruction::setSubclassData<Bitfield>(Value); 4671 } 4672 }; 4673 4674 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4675 ArrayRef<BasicBlock *> IndirectDests, 4676 ArrayRef<Value *> Args, 4677 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4678 const Twine &NameStr, BasicBlock::iterator InsertBefore) 4679 : CallBase(Ty->getReturnType(), Instruction::CallBr, 4680 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 4681 InsertBefore) { 4682 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); 4683 } 4684 4685 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4686 ArrayRef<BasicBlock *> IndirectDests, 4687 ArrayRef<Value *> Args, 4688 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4689 const Twine &NameStr, Instruction *InsertBefore) 4690 : CallBase(Ty->getReturnType(), Instruction::CallBr, 4691 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 4692 InsertBefore) { 4693 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); 4694 } 4695 4696 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4697 ArrayRef<BasicBlock *> IndirectDests, 4698 ArrayRef<Value *> Args, 4699 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4700 const Twine &NameStr, BasicBlock *InsertAtEnd) 4701 : CallBase(Ty->getReturnType(), Instruction::CallBr, 4702 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 4703 InsertAtEnd) { 4704 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); 4705 } 4706 4707 //===----------------------------------------------------------------------===// 4708 // ResumeInst Class 4709 //===----------------------------------------------------------------------===// 4710 4711 //===--------------------------------------------------------------------------- 4712 /// Resume the propagation of an exception. 4713 /// 4714 class ResumeInst : public Instruction { 4715 ResumeInst(const ResumeInst &RI); 4716 4717 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr); 4718 explicit ResumeInst(Value *Exn, BasicBlock::iterator InsertBefore); 4719 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd); 4720 4721 protected: 4722 // Note: Instruction needs to be a friend here to call cloneImpl. 4723 friend class Instruction; 4724 4725 ResumeInst *cloneImpl() const; 4726 4727 public: 4728 static ResumeInst *Create(Value *Exn, BasicBlock::iterator InsertBefore) { 4729 return new (1) ResumeInst(Exn, InsertBefore); 4730 } 4731 4732 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) { 4733 return new(1) ResumeInst(Exn, InsertBefore); 4734 } 4735 4736 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) { 4737 return new(1) ResumeInst(Exn, InsertAtEnd); 4738 } 4739 4740 /// Provide fast operand accessors 4741 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4742 4743 /// Convenience accessor. 4744 Value *getValue() const { return Op<0>(); } 4745 4746 unsigned getNumSuccessors() const { return 0; } 4747 4748 // Methods for support type inquiry through isa, cast, and dyn_cast: 4749 static bool classof(const Instruction *I) { 4750 return I->getOpcode() == Instruction::Resume; 4751 } 4752 static bool classof(const Value *V) { 4753 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4754 } 4755 4756 private: 4757 BasicBlock *getSuccessor(unsigned idx) const { 4758 llvm_unreachable("ResumeInst has no successors!"); 4759 } 4760 4761 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 4762 llvm_unreachable("ResumeInst has no successors!"); 4763 } 4764 }; 4765 4766 template <> 4767 struct OperandTraits<ResumeInst> : 4768 public FixedNumOperandTraits<ResumeInst, 1> { 4769 }; 4770 4771 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value) 4772 4773 //===----------------------------------------------------------------------===// 4774 // CatchSwitchInst Class 4775 //===----------------------------------------------------------------------===// 4776 class CatchSwitchInst : public Instruction { 4777 using UnwindDestField = BoolBitfieldElementT<0>; 4778 4779 /// The number of operands actually allocated. NumOperands is 4780 /// the number actually in use. 4781 unsigned ReservedSpace; 4782 4783 // Operand[0] = Outer scope 4784 // Operand[1] = Unwind block destination 4785 // Operand[n] = BasicBlock to go to on match 4786 CatchSwitchInst(const CatchSwitchInst &CSI); 4787 4788 /// Create a new switch instruction, specifying a 4789 /// default destination. The number of additional handlers can be specified 4790 /// here to make memory allocation more efficient. 4791 /// This constructor can also autoinsert before another instruction. 4792 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 4793 unsigned NumHandlers, const Twine &NameStr, 4794 BasicBlock::iterator InsertBefore); 4795 4796 /// Create a new switch instruction, specifying a 4797 /// default destination. The number of additional handlers can be specified 4798 /// here to make memory allocation more efficient. 4799 /// This constructor can also autoinsert before another instruction. 4800 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 4801 unsigned NumHandlers, const Twine &NameStr, 4802 Instruction *InsertBefore); 4803 4804 /// Create a new switch instruction, specifying a 4805 /// default destination. The number of additional handlers can be specified 4806 /// here to make memory allocation more efficient. 4807 /// This constructor also autoinserts at the end of the specified BasicBlock. 4808 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 4809 unsigned NumHandlers, const Twine &NameStr, 4810 BasicBlock *InsertAtEnd); 4811 4812 // allocate space for exactly zero operands 4813 void *operator new(size_t S) { return User::operator new(S); } 4814 4815 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved); 4816 void growOperands(unsigned Size); 4817 4818 protected: 4819 // Note: Instruction needs to be a friend here to call cloneImpl. 4820 friend class Instruction; 4821 4822 CatchSwitchInst *cloneImpl() const; 4823 4824 public: 4825 void operator delete(void *Ptr) { return User::operator delete(Ptr); } 4826 4827 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, 4828 unsigned NumHandlers, const Twine &NameStr, 4829 BasicBlock::iterator InsertBefore) { 4830 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, 4831 InsertBefore); 4832 } 4833 4834 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, 4835 unsigned NumHandlers, 4836 const Twine &NameStr = "", 4837 Instruction *InsertBefore = nullptr) { 4838 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, 4839 InsertBefore); 4840 } 4841 4842 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, 4843 unsigned NumHandlers, const Twine &NameStr, 4844 BasicBlock *InsertAtEnd) { 4845 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, 4846 InsertAtEnd); 4847 } 4848 4849 /// Provide fast operand accessors 4850 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4851 4852 // Accessor Methods for CatchSwitch stmt 4853 Value *getParentPad() const { return getOperand(0); } 4854 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); } 4855 4856 // Accessor Methods for CatchSwitch stmt 4857 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } 4858 bool unwindsToCaller() const { return !hasUnwindDest(); } 4859 BasicBlock *getUnwindDest() const { 4860 if (hasUnwindDest()) 4861 return cast<BasicBlock>(getOperand(1)); 4862 return nullptr; 4863 } 4864 void setUnwindDest(BasicBlock *UnwindDest) { 4865 assert(UnwindDest); 4866 assert(hasUnwindDest()); 4867 setOperand(1, UnwindDest); 4868 } 4869 4870 /// return the number of 'handlers' in this catchswitch 4871 /// instruction, except the default handler 4872 unsigned getNumHandlers() const { 4873 if (hasUnwindDest()) 4874 return getNumOperands() - 2; 4875 return getNumOperands() - 1; 4876 } 4877 4878 private: 4879 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); } 4880 static const BasicBlock *handler_helper(const Value *V) { 4881 return cast<BasicBlock>(V); 4882 } 4883 4884 public: 4885 using DerefFnTy = BasicBlock *(*)(Value *); 4886 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>; 4887 using handler_range = iterator_range<handler_iterator>; 4888 using ConstDerefFnTy = const BasicBlock *(*)(const Value *); 4889 using const_handler_iterator = 4890 mapped_iterator<const_op_iterator, ConstDerefFnTy>; 4891 using const_handler_range = iterator_range<const_handler_iterator>; 4892 4893 /// Returns an iterator that points to the first handler in CatchSwitchInst. 4894 handler_iterator handler_begin() { 4895 op_iterator It = op_begin() + 1; 4896 if (hasUnwindDest()) 4897 ++It; 4898 return handler_iterator(It, DerefFnTy(handler_helper)); 4899 } 4900 4901 /// Returns an iterator that points to the first handler in the 4902 /// CatchSwitchInst. 4903 const_handler_iterator handler_begin() const { 4904 const_op_iterator It = op_begin() + 1; 4905 if (hasUnwindDest()) 4906 ++It; 4907 return const_handler_iterator(It, ConstDerefFnTy(handler_helper)); 4908 } 4909 4910 /// Returns a read-only iterator that points one past the last 4911 /// handler in the CatchSwitchInst. 4912 handler_iterator handler_end() { 4913 return handler_iterator(op_end(), DerefFnTy(handler_helper)); 4914 } 4915 4916 /// Returns an iterator that points one past the last handler in the 4917 /// CatchSwitchInst. 4918 const_handler_iterator handler_end() const { 4919 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper)); 4920 } 4921 4922 /// iteration adapter for range-for loops. 4923 handler_range handlers() { 4924 return make_range(handler_begin(), handler_end()); 4925 } 4926 4927 /// iteration adapter for range-for loops. 4928 const_handler_range handlers() const { 4929 return make_range(handler_begin(), handler_end()); 4930 } 4931 4932 /// Add an entry to the switch instruction... 4933 /// Note: 4934 /// This action invalidates handler_end(). Old handler_end() iterator will 4935 /// point to the added handler. 4936 void addHandler(BasicBlock *Dest); 4937 4938 void removeHandler(handler_iterator HI); 4939 4940 unsigned getNumSuccessors() const { return getNumOperands() - 1; } 4941 BasicBlock *getSuccessor(unsigned Idx) const { 4942 assert(Idx < getNumSuccessors() && 4943 "Successor # out of range for catchswitch!"); 4944 return cast<BasicBlock>(getOperand(Idx + 1)); 4945 } 4946 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) { 4947 assert(Idx < getNumSuccessors() && 4948 "Successor # out of range for catchswitch!"); 4949 setOperand(Idx + 1, NewSucc); 4950 } 4951 4952 // Methods for support type inquiry through isa, cast, and dyn_cast: 4953 static bool classof(const Instruction *I) { 4954 return I->getOpcode() == Instruction::CatchSwitch; 4955 } 4956 static bool classof(const Value *V) { 4957 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4958 } 4959 }; 4960 4961 template <> 4962 struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {}; 4963 4964 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value) 4965 4966 //===----------------------------------------------------------------------===// 4967 // CleanupPadInst Class 4968 //===----------------------------------------------------------------------===// 4969 class CleanupPadInst : public FuncletPadInst { 4970 private: 4971 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, 4972 unsigned Values, const Twine &NameStr, 4973 BasicBlock::iterator InsertBefore) 4974 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, 4975 NameStr, InsertBefore) {} 4976 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, 4977 unsigned Values, const Twine &NameStr, 4978 Instruction *InsertBefore) 4979 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, 4980 NameStr, InsertBefore) {} 4981 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, 4982 unsigned Values, const Twine &NameStr, 4983 BasicBlock *InsertAtEnd) 4984 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, 4985 NameStr, InsertAtEnd) {} 4986 4987 public: 4988 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args, 4989 const Twine &NameStr, 4990 BasicBlock::iterator InsertBefore) { 4991 unsigned Values = 1 + Args.size(); 4992 return new (Values) 4993 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore); 4994 } 4995 4996 static CleanupPadInst *Create(Value *ParentPad, 4997 ArrayRef<Value *> Args = std::nullopt, 4998 const Twine &NameStr = "", 4999 Instruction *InsertBefore = nullptr) { 5000 unsigned Values = 1 + Args.size(); 5001 return new (Values) 5002 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore); 5003 } 5004 5005 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args, 5006 const Twine &NameStr, BasicBlock *InsertAtEnd) { 5007 unsigned Values = 1 + Args.size(); 5008 return new (Values) 5009 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd); 5010 } 5011 5012 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5013 static bool classof(const Instruction *I) { 5014 return I->getOpcode() == Instruction::CleanupPad; 5015 } 5016 static bool classof(const Value *V) { 5017 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5018 } 5019 }; 5020 5021 //===----------------------------------------------------------------------===// 5022 // CatchPadInst Class 5023 //===----------------------------------------------------------------------===// 5024 class CatchPadInst : public FuncletPadInst { 5025 private: 5026 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, 5027 unsigned Values, const Twine &NameStr, 5028 BasicBlock::iterator InsertBefore) 5029 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, 5030 NameStr, InsertBefore) {} 5031 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, 5032 unsigned Values, const Twine &NameStr, 5033 Instruction *InsertBefore) 5034 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, 5035 NameStr, InsertBefore) {} 5036 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, 5037 unsigned Values, const Twine &NameStr, 5038 BasicBlock *InsertAtEnd) 5039 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, 5040 NameStr, InsertAtEnd) {} 5041 5042 public: 5043 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, 5044 const Twine &NameStr, 5045 BasicBlock::iterator InsertBefore) { 5046 unsigned Values = 1 + Args.size(); 5047 return new (Values) 5048 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore); 5049 } 5050 5051 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, 5052 const Twine &NameStr = "", 5053 Instruction *InsertBefore = nullptr) { 5054 unsigned Values = 1 + Args.size(); 5055 return new (Values) 5056 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore); 5057 } 5058 5059 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, 5060 const Twine &NameStr, BasicBlock *InsertAtEnd) { 5061 unsigned Values = 1 + Args.size(); 5062 return new (Values) 5063 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd); 5064 } 5065 5066 /// Convenience accessors 5067 CatchSwitchInst *getCatchSwitch() const { 5068 return cast<CatchSwitchInst>(Op<-1>()); 5069 } 5070 void setCatchSwitch(Value *CatchSwitch) { 5071 assert(CatchSwitch); 5072 Op<-1>() = CatchSwitch; 5073 } 5074 5075 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5076 static bool classof(const Instruction *I) { 5077 return I->getOpcode() == Instruction::CatchPad; 5078 } 5079 static bool classof(const Value *V) { 5080 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5081 } 5082 }; 5083 5084 //===----------------------------------------------------------------------===// 5085 // CatchReturnInst Class 5086 //===----------------------------------------------------------------------===// 5087 5088 class CatchReturnInst : public Instruction { 5089 CatchReturnInst(const CatchReturnInst &RI); 5090 CatchReturnInst(Value *CatchPad, BasicBlock *BB, 5091 BasicBlock::iterator InsertBefore); 5092 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore); 5093 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd); 5094 5095 void init(Value *CatchPad, BasicBlock *BB); 5096 5097 protected: 5098 // Note: Instruction needs to be a friend here to call cloneImpl. 5099 friend class Instruction; 5100 5101 CatchReturnInst *cloneImpl() const; 5102 5103 public: 5104 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, 5105 BasicBlock::iterator InsertBefore) { 5106 assert(CatchPad); 5107 assert(BB); 5108 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore); 5109 } 5110 5111 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, 5112 Instruction *InsertBefore = nullptr) { 5113 assert(CatchPad); 5114 assert(BB); 5115 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore); 5116 } 5117 5118 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, 5119 BasicBlock *InsertAtEnd) { 5120 assert(CatchPad); 5121 assert(BB); 5122 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd); 5123 } 5124 5125 /// Provide fast operand accessors 5126 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 5127 5128 /// Convenience accessors. 5129 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); } 5130 void setCatchPad(CatchPadInst *CatchPad) { 5131 assert(CatchPad); 5132 Op<0>() = CatchPad; 5133 } 5134 5135 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); } 5136 void setSuccessor(BasicBlock *NewSucc) { 5137 assert(NewSucc); 5138 Op<1>() = NewSucc; 5139 } 5140 unsigned getNumSuccessors() const { return 1; } 5141 5142 /// Get the parentPad of this catchret's catchpad's catchswitch. 5143 /// The successor block is implicitly a member of this funclet. 5144 Value *getCatchSwitchParentPad() const { 5145 return getCatchPad()->getCatchSwitch()->getParentPad(); 5146 } 5147 5148 // Methods for support type inquiry through isa, cast, and dyn_cast: 5149 static bool classof(const Instruction *I) { 5150 return (I->getOpcode() == Instruction::CatchRet); 5151 } 5152 static bool classof(const Value *V) { 5153 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5154 } 5155 5156 private: 5157 BasicBlock *getSuccessor(unsigned Idx) const { 5158 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!"); 5159 return getSuccessor(); 5160 } 5161 5162 void setSuccessor(unsigned Idx, BasicBlock *B) { 5163 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!"); 5164 setSuccessor(B); 5165 } 5166 }; 5167 5168 template <> 5169 struct OperandTraits<CatchReturnInst> 5170 : public FixedNumOperandTraits<CatchReturnInst, 2> {}; 5171 5172 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value) 5173 5174 //===----------------------------------------------------------------------===// 5175 // CleanupReturnInst Class 5176 //===----------------------------------------------------------------------===// 5177 5178 class CleanupReturnInst : public Instruction { 5179 using UnwindDestField = BoolBitfieldElementT<0>; 5180 5181 private: 5182 CleanupReturnInst(const CleanupReturnInst &RI); 5183 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, 5184 BasicBlock::iterator InsertBefore); 5185 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, 5186 Instruction *InsertBefore = nullptr); 5187 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, 5188 BasicBlock *InsertAtEnd); 5189 5190 void init(Value *CleanupPad, BasicBlock *UnwindBB); 5191 5192 protected: 5193 // Note: Instruction needs to be a friend here to call cloneImpl. 5194 friend class Instruction; 5195 5196 CleanupReturnInst *cloneImpl() const; 5197 5198 public: 5199 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB, 5200 BasicBlock::iterator InsertBefore) { 5201 assert(CleanupPad); 5202 unsigned Values = 1; 5203 if (UnwindBB) 5204 ++Values; 5205 return new (Values) 5206 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore); 5207 } 5208 5209 static CleanupReturnInst *Create(Value *CleanupPad, 5210 BasicBlock *UnwindBB = nullptr, 5211 Instruction *InsertBefore = nullptr) { 5212 assert(CleanupPad); 5213 unsigned Values = 1; 5214 if (UnwindBB) 5215 ++Values; 5216 return new (Values) 5217 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore); 5218 } 5219 5220 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB, 5221 BasicBlock *InsertAtEnd) { 5222 assert(CleanupPad); 5223 unsigned Values = 1; 5224 if (UnwindBB) 5225 ++Values; 5226 return new (Values) 5227 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd); 5228 } 5229 5230 /// Provide fast operand accessors 5231 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 5232 5233 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } 5234 bool unwindsToCaller() const { return !hasUnwindDest(); } 5235 5236 /// Convenience accessor. 5237 CleanupPadInst *getCleanupPad() const { 5238 return cast<CleanupPadInst>(Op<0>()); 5239 } 5240 void setCleanupPad(CleanupPadInst *CleanupPad) { 5241 assert(CleanupPad); 5242 Op<0>() = CleanupPad; 5243 } 5244 5245 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; } 5246 5247 BasicBlock *getUnwindDest() const { 5248 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr; 5249 } 5250 void setUnwindDest(BasicBlock *NewDest) { 5251 assert(NewDest); 5252 assert(hasUnwindDest()); 5253 Op<1>() = NewDest; 5254 } 5255 5256 // Methods for support type inquiry through isa, cast, and dyn_cast: 5257 static bool classof(const Instruction *I) { 5258 return (I->getOpcode() == Instruction::CleanupRet); 5259 } 5260 static bool classof(const Value *V) { 5261 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5262 } 5263 5264 private: 5265 BasicBlock *getSuccessor(unsigned Idx) const { 5266 assert(Idx == 0); 5267 return getUnwindDest(); 5268 } 5269 5270 void setSuccessor(unsigned Idx, BasicBlock *B) { 5271 assert(Idx == 0); 5272 setUnwindDest(B); 5273 } 5274 5275 // Shadow Instruction::setInstructionSubclassData with a private forwarding 5276 // method so that subclasses cannot accidentally use it. 5277 template <typename Bitfield> 5278 void setSubclassData(typename Bitfield::Type Value) { 5279 Instruction::setSubclassData<Bitfield>(Value); 5280 } 5281 }; 5282 5283 template <> 5284 struct OperandTraits<CleanupReturnInst> 5285 : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {}; 5286 5287 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value) 5288 5289 //===----------------------------------------------------------------------===// 5290 // UnreachableInst Class 5291 //===----------------------------------------------------------------------===// 5292 5293 //===--------------------------------------------------------------------------- 5294 /// This function has undefined behavior. In particular, the 5295 /// presence of this instruction indicates some higher level knowledge that the 5296 /// end of the block cannot be reached. 5297 /// 5298 class UnreachableInst : public Instruction { 5299 protected: 5300 // Note: Instruction needs to be a friend here to call cloneImpl. 5301 friend class Instruction; 5302 5303 UnreachableInst *cloneImpl() const; 5304 5305 public: 5306 explicit UnreachableInst(LLVMContext &C, BasicBlock::iterator InsertBefore); 5307 explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr); 5308 explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd); 5309 5310 // allocate space for exactly zero operands 5311 void *operator new(size_t S) { return User::operator new(S, 0); } 5312 void operator delete(void *Ptr) { User::operator delete(Ptr); } 5313 5314 unsigned getNumSuccessors() const { return 0; } 5315 5316 // Methods for support type inquiry through isa, cast, and dyn_cast: 5317 static bool classof(const Instruction *I) { 5318 return I->getOpcode() == Instruction::Unreachable; 5319 } 5320 static bool classof(const Value *V) { 5321 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5322 } 5323 5324 private: 5325 BasicBlock *getSuccessor(unsigned idx) const { 5326 llvm_unreachable("UnreachableInst has no successors!"); 5327 } 5328 5329 void setSuccessor(unsigned idx, BasicBlock *B) { 5330 llvm_unreachable("UnreachableInst has no successors!"); 5331 } 5332 }; 5333 5334 //===----------------------------------------------------------------------===// 5335 // TruncInst Class 5336 //===----------------------------------------------------------------------===// 5337 5338 /// This class represents a truncation of integer types. 5339 class TruncInst : public CastInst { 5340 protected: 5341 // Note: Instruction needs to be a friend here to call cloneImpl. 5342 friend class Instruction; 5343 5344 /// Clone an identical TruncInst 5345 TruncInst *cloneImpl() const; 5346 5347 public: 5348 /// Constructor with insert-before-instruction semantics 5349 TruncInst( 5350 Value *S, ///< The value to be truncated 5351 Type *Ty, ///< The (smaller) type to truncate to 5352 const Twine &NameStr, ///< A name for the new instruction 5353 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5354 ); 5355 5356 /// Constructor with insert-before-instruction semantics 5357 TruncInst( 5358 Value *S, ///< The value to be truncated 5359 Type *Ty, ///< The (smaller) type to truncate to 5360 const Twine &NameStr = "", ///< A name for the new instruction 5361 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5362 ); 5363 5364 /// Constructor with insert-at-end-of-block semantics 5365 TruncInst( 5366 Value *S, ///< The value to be truncated 5367 Type *Ty, ///< The (smaller) type to truncate to 5368 const Twine &NameStr, ///< A name for the new instruction 5369 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5370 ); 5371 5372 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5373 static bool classof(const Instruction *I) { 5374 return I->getOpcode() == Trunc; 5375 } 5376 static bool classof(const Value *V) { 5377 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5378 } 5379 }; 5380 5381 //===----------------------------------------------------------------------===// 5382 // ZExtInst Class 5383 //===----------------------------------------------------------------------===// 5384 5385 /// This class represents zero extension of integer types. 5386 class ZExtInst : public CastInst { 5387 protected: 5388 // Note: Instruction needs to be a friend here to call cloneImpl. 5389 friend class Instruction; 5390 5391 /// Clone an identical ZExtInst 5392 ZExtInst *cloneImpl() const; 5393 5394 public: 5395 /// Constructor with insert-before-instruction semantics 5396 ZExtInst( 5397 Value *S, ///< The value to be zero extended 5398 Type *Ty, ///< The type to zero extend to 5399 const Twine &NameStr, ///< A name for the new instruction 5400 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5401 ); 5402 5403 /// Constructor with insert-before-instruction semantics 5404 ZExtInst( 5405 Value *S, ///< The value to be zero extended 5406 Type *Ty, ///< The type to zero extend to 5407 const Twine &NameStr = "", ///< A name for the new instruction 5408 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5409 ); 5410 5411 /// Constructor with insert-at-end semantics. 5412 ZExtInst( 5413 Value *S, ///< The value to be zero extended 5414 Type *Ty, ///< The type to zero extend to 5415 const Twine &NameStr, ///< A name for the new instruction 5416 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5417 ); 5418 5419 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5420 static bool classof(const Instruction *I) { 5421 return I->getOpcode() == ZExt; 5422 } 5423 static bool classof(const Value *V) { 5424 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5425 } 5426 }; 5427 5428 //===----------------------------------------------------------------------===// 5429 // SExtInst Class 5430 //===----------------------------------------------------------------------===// 5431 5432 /// This class represents a sign extension of integer types. 5433 class SExtInst : public CastInst { 5434 protected: 5435 // Note: Instruction needs to be a friend here to call cloneImpl. 5436 friend class Instruction; 5437 5438 /// Clone an identical SExtInst 5439 SExtInst *cloneImpl() const; 5440 5441 public: 5442 /// Constructor with insert-before-instruction semantics 5443 SExtInst( 5444 Value *S, ///< The value to be sign extended 5445 Type *Ty, ///< The type to sign extend to 5446 const Twine &NameStr, ///< A name for the new instruction 5447 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5448 ); 5449 5450 /// Constructor with insert-before-instruction semantics 5451 SExtInst( 5452 Value *S, ///< The value to be sign extended 5453 Type *Ty, ///< The type to sign extend to 5454 const Twine &NameStr = "", ///< A name for the new instruction 5455 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5456 ); 5457 5458 /// Constructor with insert-at-end-of-block semantics 5459 SExtInst( 5460 Value *S, ///< The value to be sign extended 5461 Type *Ty, ///< The type to sign extend to 5462 const Twine &NameStr, ///< A name for the new instruction 5463 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5464 ); 5465 5466 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5467 static bool classof(const Instruction *I) { 5468 return I->getOpcode() == SExt; 5469 } 5470 static bool classof(const Value *V) { 5471 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5472 } 5473 }; 5474 5475 //===----------------------------------------------------------------------===// 5476 // FPTruncInst Class 5477 //===----------------------------------------------------------------------===// 5478 5479 /// This class represents a truncation of floating point types. 5480 class FPTruncInst : public CastInst { 5481 protected: 5482 // Note: Instruction needs to be a friend here to call cloneImpl. 5483 friend class Instruction; 5484 5485 /// Clone an identical FPTruncInst 5486 FPTruncInst *cloneImpl() const; 5487 5488 public: 5489 /// Constructor with insert-before-instruction semantics 5490 FPTruncInst( 5491 Value *S, ///< The value to be truncated 5492 Type *Ty, ///< The type to truncate to 5493 const Twine &NameStr, ///< A name for the new instruction 5494 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5495 ); 5496 5497 /// Constructor with insert-before-instruction semantics 5498 FPTruncInst( 5499 Value *S, ///< The value to be truncated 5500 Type *Ty, ///< The type to truncate to 5501 const Twine &NameStr = "", ///< A name for the new instruction 5502 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5503 ); 5504 5505 /// Constructor with insert-before-instruction semantics 5506 FPTruncInst( 5507 Value *S, ///< The value to be truncated 5508 Type *Ty, ///< The type to truncate to 5509 const Twine &NameStr, ///< A name for the new instruction 5510 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5511 ); 5512 5513 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5514 static bool classof(const Instruction *I) { 5515 return I->getOpcode() == FPTrunc; 5516 } 5517 static bool classof(const Value *V) { 5518 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5519 } 5520 }; 5521 5522 //===----------------------------------------------------------------------===// 5523 // FPExtInst Class 5524 //===----------------------------------------------------------------------===// 5525 5526 /// This class represents an extension of floating point types. 5527 class FPExtInst : public CastInst { 5528 protected: 5529 // Note: Instruction needs to be a friend here to call cloneImpl. 5530 friend class Instruction; 5531 5532 /// Clone an identical FPExtInst 5533 FPExtInst *cloneImpl() const; 5534 5535 public: 5536 /// Constructor with insert-before-instruction semantics 5537 FPExtInst( 5538 Value *S, ///< The value to be extended 5539 Type *Ty, ///< The type to extend to 5540 const Twine &NameStr, ///< A name for the new instruction 5541 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5542 ); 5543 5544 /// Constructor with insert-before-instruction semantics 5545 FPExtInst( 5546 Value *S, ///< The value to be extended 5547 Type *Ty, ///< The type to extend to 5548 const Twine &NameStr = "", ///< A name for the new instruction 5549 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5550 ); 5551 5552 /// Constructor with insert-at-end-of-block semantics 5553 FPExtInst( 5554 Value *S, ///< The value to be extended 5555 Type *Ty, ///< The type to extend to 5556 const Twine &NameStr, ///< A name for the new instruction 5557 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5558 ); 5559 5560 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5561 static bool classof(const Instruction *I) { 5562 return I->getOpcode() == FPExt; 5563 } 5564 static bool classof(const Value *V) { 5565 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5566 } 5567 }; 5568 5569 //===----------------------------------------------------------------------===// 5570 // UIToFPInst Class 5571 //===----------------------------------------------------------------------===// 5572 5573 /// This class represents a cast unsigned integer to floating point. 5574 class UIToFPInst : public CastInst { 5575 protected: 5576 // Note: Instruction needs to be a friend here to call cloneImpl. 5577 friend class Instruction; 5578 5579 /// Clone an identical UIToFPInst 5580 UIToFPInst *cloneImpl() const; 5581 5582 public: 5583 /// Constructor with insert-before-instruction semantics 5584 UIToFPInst( 5585 Value *S, ///< The value to be converted 5586 Type *Ty, ///< The type to convert to 5587 const Twine &NameStr, ///< A name for the new instruction 5588 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5589 ); 5590 5591 /// Constructor with insert-before-instruction semantics 5592 UIToFPInst( 5593 Value *S, ///< The value to be converted 5594 Type *Ty, ///< The type to convert to 5595 const Twine &NameStr = "", ///< A name for the new instruction 5596 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5597 ); 5598 5599 /// Constructor with insert-at-end-of-block semantics 5600 UIToFPInst( 5601 Value *S, ///< The value to be converted 5602 Type *Ty, ///< The type to convert to 5603 const Twine &NameStr, ///< A name for the new instruction 5604 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5605 ); 5606 5607 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5608 static bool classof(const Instruction *I) { 5609 return I->getOpcode() == UIToFP; 5610 } 5611 static bool classof(const Value *V) { 5612 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5613 } 5614 }; 5615 5616 //===----------------------------------------------------------------------===// 5617 // SIToFPInst Class 5618 //===----------------------------------------------------------------------===// 5619 5620 /// This class represents a cast from signed integer to floating point. 5621 class SIToFPInst : public CastInst { 5622 protected: 5623 // Note: Instruction needs to be a friend here to call cloneImpl. 5624 friend class Instruction; 5625 5626 /// Clone an identical SIToFPInst 5627 SIToFPInst *cloneImpl() const; 5628 5629 public: 5630 /// Constructor with insert-before-instruction semantics 5631 SIToFPInst( 5632 Value *S, ///< The value to be converted 5633 Type *Ty, ///< The type to convert to 5634 const Twine &NameStr, ///< A name for the new instruction 5635 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5636 ); 5637 5638 /// Constructor with insert-before-instruction semantics 5639 SIToFPInst( 5640 Value *S, ///< The value to be converted 5641 Type *Ty, ///< The type to convert to 5642 const Twine &NameStr = "", ///< A name for the new instruction 5643 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5644 ); 5645 5646 /// Constructor with insert-at-end-of-block semantics 5647 SIToFPInst( 5648 Value *S, ///< The value to be converted 5649 Type *Ty, ///< The type to convert to 5650 const Twine &NameStr, ///< A name for the new instruction 5651 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5652 ); 5653 5654 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5655 static bool classof(const Instruction *I) { 5656 return I->getOpcode() == SIToFP; 5657 } 5658 static bool classof(const Value *V) { 5659 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5660 } 5661 }; 5662 5663 //===----------------------------------------------------------------------===// 5664 // FPToUIInst Class 5665 //===----------------------------------------------------------------------===// 5666 5667 /// This class represents a cast from floating point to unsigned integer 5668 class FPToUIInst : public CastInst { 5669 protected: 5670 // Note: Instruction needs to be a friend here to call cloneImpl. 5671 friend class Instruction; 5672 5673 /// Clone an identical FPToUIInst 5674 FPToUIInst *cloneImpl() const; 5675 5676 public: 5677 /// Constructor with insert-before-instruction semantics 5678 FPToUIInst( 5679 Value *S, ///< The value to be converted 5680 Type *Ty, ///< The type to convert to 5681 const Twine &NameStr, ///< A name for the new instruction 5682 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5683 ); 5684 5685 /// Constructor with insert-before-instruction semantics 5686 FPToUIInst( 5687 Value *S, ///< The value to be converted 5688 Type *Ty, ///< The type to convert to 5689 const Twine &NameStr = "", ///< A name for the new instruction 5690 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5691 ); 5692 5693 /// Constructor with insert-at-end-of-block semantics 5694 FPToUIInst( 5695 Value *S, ///< The value to be converted 5696 Type *Ty, ///< The type to convert to 5697 const Twine &NameStr, ///< A name for the new instruction 5698 BasicBlock *InsertAtEnd ///< Where to insert the new instruction 5699 ); 5700 5701 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5702 static bool classof(const Instruction *I) { 5703 return I->getOpcode() == FPToUI; 5704 } 5705 static bool classof(const Value *V) { 5706 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5707 } 5708 }; 5709 5710 //===----------------------------------------------------------------------===// 5711 // FPToSIInst Class 5712 //===----------------------------------------------------------------------===// 5713 5714 /// This class represents a cast from floating point to signed integer. 5715 class FPToSIInst : public CastInst { 5716 protected: 5717 // Note: Instruction needs to be a friend here to call cloneImpl. 5718 friend class Instruction; 5719 5720 /// Clone an identical FPToSIInst 5721 FPToSIInst *cloneImpl() const; 5722 5723 public: 5724 /// Constructor with insert-before-instruction semantics 5725 FPToSIInst( 5726 Value *S, ///< The value to be converted 5727 Type *Ty, ///< The type to convert to 5728 const Twine &NameStr, ///< A name for the new instruction 5729 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5730 ); 5731 5732 /// Constructor with insert-before-instruction semantics 5733 FPToSIInst( 5734 Value *S, ///< The value to be converted 5735 Type *Ty, ///< The type to convert to 5736 const Twine &NameStr = "", ///< A name for the new instruction 5737 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5738 ); 5739 5740 /// Constructor with insert-at-end-of-block semantics 5741 FPToSIInst( 5742 Value *S, ///< The value to be converted 5743 Type *Ty, ///< The type to convert to 5744 const Twine &NameStr, ///< A name for the new instruction 5745 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5746 ); 5747 5748 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5749 static bool classof(const Instruction *I) { 5750 return I->getOpcode() == FPToSI; 5751 } 5752 static bool classof(const Value *V) { 5753 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5754 } 5755 }; 5756 5757 //===----------------------------------------------------------------------===// 5758 // IntToPtrInst Class 5759 //===----------------------------------------------------------------------===// 5760 5761 /// This class represents a cast from an integer to a pointer. 5762 class IntToPtrInst : public CastInst { 5763 public: 5764 // Note: Instruction needs to be a friend here to call cloneImpl. 5765 friend class Instruction; 5766 5767 /// Constructor with insert-before-instruction semantics 5768 IntToPtrInst( 5769 Value *S, ///< The value to be converted 5770 Type *Ty, ///< The type to convert to 5771 const Twine &NameStr, ///< A name for the new instruction 5772 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5773 ); 5774 5775 /// Constructor with insert-before-instruction semantics 5776 IntToPtrInst( 5777 Value *S, ///< The value to be converted 5778 Type *Ty, ///< The type to convert to 5779 const Twine &NameStr = "", ///< A name for the new instruction 5780 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5781 ); 5782 5783 /// Constructor with insert-at-end-of-block semantics 5784 IntToPtrInst( 5785 Value *S, ///< The value to be converted 5786 Type *Ty, ///< The type to convert to 5787 const Twine &NameStr, ///< A name for the new instruction 5788 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5789 ); 5790 5791 /// Clone an identical IntToPtrInst. 5792 IntToPtrInst *cloneImpl() const; 5793 5794 /// Returns the address space of this instruction's pointer type. 5795 unsigned getAddressSpace() const { 5796 return getType()->getPointerAddressSpace(); 5797 } 5798 5799 // Methods for support type inquiry through isa, cast, and dyn_cast: 5800 static bool classof(const Instruction *I) { 5801 return I->getOpcode() == IntToPtr; 5802 } 5803 static bool classof(const Value *V) { 5804 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5805 } 5806 }; 5807 5808 //===----------------------------------------------------------------------===// 5809 // PtrToIntInst Class 5810 //===----------------------------------------------------------------------===// 5811 5812 /// This class represents a cast from a pointer to an integer. 5813 class PtrToIntInst : public CastInst { 5814 protected: 5815 // Note: Instruction needs to be a friend here to call cloneImpl. 5816 friend class Instruction; 5817 5818 /// Clone an identical PtrToIntInst. 5819 PtrToIntInst *cloneImpl() const; 5820 5821 public: 5822 /// Constructor with insert-before-instruction semantics 5823 PtrToIntInst( 5824 Value *S, ///< The value to be converted 5825 Type *Ty, ///< The type to convert to 5826 const Twine &NameStr, ///< A name for the new instruction 5827 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5828 ); 5829 5830 /// Constructor with insert-before-instruction semantics 5831 PtrToIntInst( 5832 Value *S, ///< The value to be converted 5833 Type *Ty, ///< The type to convert to 5834 const Twine &NameStr = "", ///< A name for the new instruction 5835 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5836 ); 5837 5838 /// Constructor with insert-at-end-of-block semantics 5839 PtrToIntInst( 5840 Value *S, ///< The value to be converted 5841 Type *Ty, ///< The type to convert to 5842 const Twine &NameStr, ///< A name for the new instruction 5843 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5844 ); 5845 5846 /// Gets the pointer operand. 5847 Value *getPointerOperand() { return getOperand(0); } 5848 /// Gets the pointer operand. 5849 const Value *getPointerOperand() const { return getOperand(0); } 5850 /// Gets the operand index of the pointer operand. 5851 static unsigned getPointerOperandIndex() { return 0U; } 5852 5853 /// Returns the address space of the pointer operand. 5854 unsigned getPointerAddressSpace() const { 5855 return getPointerOperand()->getType()->getPointerAddressSpace(); 5856 } 5857 5858 // Methods for support type inquiry through isa, cast, and dyn_cast: 5859 static bool classof(const Instruction *I) { 5860 return I->getOpcode() == PtrToInt; 5861 } 5862 static bool classof(const Value *V) { 5863 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5864 } 5865 }; 5866 5867 //===----------------------------------------------------------------------===// 5868 // BitCastInst Class 5869 //===----------------------------------------------------------------------===// 5870 5871 /// This class represents a no-op cast from one type to another. 5872 class BitCastInst : public CastInst { 5873 protected: 5874 // Note: Instruction needs to be a friend here to call cloneImpl. 5875 friend class Instruction; 5876 5877 /// Clone an identical BitCastInst. 5878 BitCastInst *cloneImpl() const; 5879 5880 public: 5881 /// Constructor with insert-before-instruction semantics 5882 BitCastInst( 5883 Value *S, ///< The value to be casted 5884 Type *Ty, ///< The type to casted to 5885 const Twine &NameStr, ///< A name for the new instruction 5886 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5887 ); 5888 5889 /// Constructor with insert-before-instruction semantics 5890 BitCastInst( 5891 Value *S, ///< The value to be casted 5892 Type *Ty, ///< The type to casted to 5893 const Twine &NameStr = "", ///< A name for the new instruction 5894 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5895 ); 5896 5897 /// Constructor with insert-at-end-of-block semantics 5898 BitCastInst( 5899 Value *S, ///< The value to be casted 5900 Type *Ty, ///< The type to casted to 5901 const Twine &NameStr, ///< A name for the new instruction 5902 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5903 ); 5904 5905 // Methods for support type inquiry through isa, cast, and dyn_cast: 5906 static bool classof(const Instruction *I) { 5907 return I->getOpcode() == BitCast; 5908 } 5909 static bool classof(const Value *V) { 5910 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5911 } 5912 }; 5913 5914 //===----------------------------------------------------------------------===// 5915 // AddrSpaceCastInst Class 5916 //===----------------------------------------------------------------------===// 5917 5918 /// This class represents a conversion between pointers from one address space 5919 /// to another. 5920 class AddrSpaceCastInst : public CastInst { 5921 protected: 5922 // Note: Instruction needs to be a friend here to call cloneImpl. 5923 friend class Instruction; 5924 5925 /// Clone an identical AddrSpaceCastInst. 5926 AddrSpaceCastInst *cloneImpl() const; 5927 5928 public: 5929 /// Constructor with insert-before-instruction semantics 5930 AddrSpaceCastInst( 5931 Value *S, ///< The value to be casted 5932 Type *Ty, ///< The type to casted to 5933 const Twine &NameStr, ///< A name for the new instruction 5934 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction 5935 ); 5936 5937 /// Constructor with insert-before-instruction semantics 5938 AddrSpaceCastInst( 5939 Value *S, ///< The value to be casted 5940 Type *Ty, ///< The type to casted to 5941 const Twine &NameStr = "", ///< A name for the new instruction 5942 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5943 ); 5944 5945 /// Constructor with insert-at-end-of-block semantics 5946 AddrSpaceCastInst( 5947 Value *S, ///< The value to be casted 5948 Type *Ty, ///< The type to casted to 5949 const Twine &NameStr, ///< A name for the new instruction 5950 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5951 ); 5952 5953 // Methods for support type inquiry through isa, cast, and dyn_cast: 5954 static bool classof(const Instruction *I) { 5955 return I->getOpcode() == AddrSpaceCast; 5956 } 5957 static bool classof(const Value *V) { 5958 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5959 } 5960 5961 /// Gets the pointer operand. 5962 Value *getPointerOperand() { 5963 return getOperand(0); 5964 } 5965 5966 /// Gets the pointer operand. 5967 const Value *getPointerOperand() const { 5968 return getOperand(0); 5969 } 5970 5971 /// Gets the operand index of the pointer operand. 5972 static unsigned getPointerOperandIndex() { 5973 return 0U; 5974 } 5975 5976 /// Returns the address space of the pointer operand. 5977 unsigned getSrcAddressSpace() const { 5978 return getPointerOperand()->getType()->getPointerAddressSpace(); 5979 } 5980 5981 /// Returns the address space of the result. 5982 unsigned getDestAddressSpace() const { 5983 return getType()->getPointerAddressSpace(); 5984 } 5985 }; 5986 5987 //===----------------------------------------------------------------------===// 5988 // Helper functions 5989 //===----------------------------------------------------------------------===// 5990 5991 /// A helper function that returns the pointer operand of a load or store 5992 /// instruction. Returns nullptr if not load or store. 5993 inline const Value *getLoadStorePointerOperand(const Value *V) { 5994 if (auto *Load = dyn_cast<LoadInst>(V)) 5995 return Load->getPointerOperand(); 5996 if (auto *Store = dyn_cast<StoreInst>(V)) 5997 return Store->getPointerOperand(); 5998 return nullptr; 5999 } 6000 inline Value *getLoadStorePointerOperand(Value *V) { 6001 return const_cast<Value *>( 6002 getLoadStorePointerOperand(static_cast<const Value *>(V))); 6003 } 6004 6005 /// A helper function that returns the pointer operand of a load, store 6006 /// or GEP instruction. Returns nullptr if not load, store, or GEP. 6007 inline const Value *getPointerOperand(const Value *V) { 6008 if (auto *Ptr = getLoadStorePointerOperand(V)) 6009 return Ptr; 6010 if (auto *Gep = dyn_cast<GetElementPtrInst>(V)) 6011 return Gep->getPointerOperand(); 6012 return nullptr; 6013 } 6014 inline Value *getPointerOperand(Value *V) { 6015 return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V))); 6016 } 6017 6018 /// A helper function that returns the alignment of load or store instruction. 6019 inline Align getLoadStoreAlignment(Value *I) { 6020 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 6021 "Expected Load or Store instruction"); 6022 if (auto *LI = dyn_cast<LoadInst>(I)) 6023 return LI->getAlign(); 6024 return cast<StoreInst>(I)->getAlign(); 6025 } 6026 6027 /// A helper function that returns the address space of the pointer operand of 6028 /// load or store instruction. 6029 inline unsigned getLoadStoreAddressSpace(Value *I) { 6030 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 6031 "Expected Load or Store instruction"); 6032 if (auto *LI = dyn_cast<LoadInst>(I)) 6033 return LI->getPointerAddressSpace(); 6034 return cast<StoreInst>(I)->getPointerAddressSpace(); 6035 } 6036 6037 /// A helper function that returns the type of a load or store instruction. 6038 inline Type *getLoadStoreType(Value *I) { 6039 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 6040 "Expected Load or Store instruction"); 6041 if (auto *LI = dyn_cast<LoadInst>(I)) 6042 return LI->getType(); 6043 return cast<StoreInst>(I)->getValueOperand()->getType(); 6044 } 6045 6046 /// A helper function that returns an atomic operation's sync scope; returns 6047 /// std::nullopt if it is not an atomic operation. 6048 inline std::optional<SyncScope::ID> getAtomicSyncScopeID(const Instruction *I) { 6049 if (!I->isAtomic()) 6050 return std::nullopt; 6051 if (auto *AI = dyn_cast<LoadInst>(I)) 6052 return AI->getSyncScopeID(); 6053 if (auto *AI = dyn_cast<StoreInst>(I)) 6054 return AI->getSyncScopeID(); 6055 if (auto *AI = dyn_cast<FenceInst>(I)) 6056 return AI->getSyncScopeID(); 6057 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) 6058 return AI->getSyncScopeID(); 6059 if (auto *AI = dyn_cast<AtomicRMWInst>(I)) 6060 return AI->getSyncScopeID(); 6061 llvm_unreachable("unhandled atomic operation"); 6062 } 6063 6064 //===----------------------------------------------------------------------===// 6065 // FreezeInst Class 6066 //===----------------------------------------------------------------------===// 6067 6068 /// This class represents a freeze function that returns random concrete 6069 /// value if an operand is either a poison value or an undef value 6070 class FreezeInst : public UnaryInstruction { 6071 protected: 6072 // Note: Instruction needs to be a friend here to call cloneImpl. 6073 friend class Instruction; 6074 6075 /// Clone an identical FreezeInst 6076 FreezeInst *cloneImpl() const; 6077 6078 public: 6079 explicit FreezeInst(Value *S, const Twine &NameStr, 6080 BasicBlock::iterator InsertBefore); 6081 explicit FreezeInst(Value *S, 6082 const Twine &NameStr = "", 6083 Instruction *InsertBefore = nullptr); 6084 FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd); 6085 6086 // Methods for support type inquiry through isa, cast, and dyn_cast: 6087 static inline bool classof(const Instruction *I) { 6088 return I->getOpcode() == Freeze; 6089 } 6090 static inline bool classof(const Value *V) { 6091 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 6092 } 6093 }; 6094 6095 } // end namespace llvm 6096 6097 #endif // LLVM_IR_INSTRUCTIONS_H 6098