1 //== MemoryTaggingSupport.cpp - helpers for memory tagging implementations ===//
2 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
3 // See https://llvm.org/LICENSE.txt for license information.
4 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file declares common infrastructure for HWAddressSanitizer and
9 // Aarch64StackTagging.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/Transforms/Utils/MemoryTaggingSupport.h"
14
15 #include "llvm/Analysis/CFG.h"
16 #include "llvm/Analysis/PostDominators.h"
17 #include "llvm/Analysis/StackSafetyAnalysis.h"
18 #include "llvm/Analysis/ValueTracking.h"
19 #include "llvm/IR/BasicBlock.h"
20 #include "llvm/IR/IntrinsicInst.h"
21 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
22
23 namespace llvm {
24 namespace memtag {
25 namespace {
maybeReachableFromEachOther(const SmallVectorImpl<IntrinsicInst * > & Insts,const DominatorTree * DT,const LoopInfo * LI,size_t MaxLifetimes)26 bool maybeReachableFromEachOther(const SmallVectorImpl<IntrinsicInst *> &Insts,
27 const DominatorTree *DT, const LoopInfo *LI,
28 size_t MaxLifetimes) {
29 // If we have too many lifetime ends, give up, as the algorithm below is N^2.
30 if (Insts.size() > MaxLifetimes)
31 return true;
32 for (size_t I = 0; I < Insts.size(); ++I) {
33 for (size_t J = 0; J < Insts.size(); ++J) {
34 if (I == J)
35 continue;
36 if (isPotentiallyReachable(Insts[I], Insts[J], nullptr, DT, LI))
37 return true;
38 }
39 }
40 return false;
41 }
42 } // namespace
43
forAllReachableExits(const DominatorTree & DT,const PostDominatorTree & PDT,const LoopInfo & LI,const Instruction * Start,const SmallVectorImpl<IntrinsicInst * > & Ends,const SmallVectorImpl<Instruction * > & RetVec,llvm::function_ref<void (Instruction *)> Callback)44 bool forAllReachableExits(const DominatorTree &DT, const PostDominatorTree &PDT,
45 const LoopInfo &LI, const Instruction *Start,
46 const SmallVectorImpl<IntrinsicInst *> &Ends,
47 const SmallVectorImpl<Instruction *> &RetVec,
48 llvm::function_ref<void(Instruction *)> Callback) {
49 if (Ends.size() == 1 && PDT.dominates(Ends[0], Start)) {
50 Callback(Ends[0]);
51 return true;
52 }
53 SmallPtrSet<BasicBlock *, 2> EndBlocks;
54 for (auto *End : Ends) {
55 EndBlocks.insert(End->getParent());
56 }
57 SmallVector<Instruction *, 8> ReachableRetVec;
58 unsigned NumCoveredExits = 0;
59 for (auto *RI : RetVec) {
60 if (!isPotentiallyReachable(Start, RI, nullptr, &DT, &LI))
61 continue;
62 ReachableRetVec.push_back(RI);
63 // If there is an end in the same basic block as the return, we know for
64 // sure that the return is covered. Otherwise, we can check whether there
65 // is a way to reach the RI from the start of the lifetime without passing
66 // through an end.
67 if (EndBlocks.count(RI->getParent()) > 0 ||
68 !isPotentiallyReachable(Start, RI, &EndBlocks, &DT, &LI)) {
69 ++NumCoveredExits;
70 }
71 }
72 // If there's a mix of covered and non-covered exits, just put the untag
73 // on exits, so we avoid the redundancy of untagging twice.
74 if (NumCoveredExits == ReachableRetVec.size()) {
75 for (auto *End : Ends)
76 Callback(End);
77 } else {
78 for (auto *RI : ReachableRetVec)
79 Callback(RI);
80 // We may have inserted untag outside of the lifetime interval.
81 // Signal the caller to remove the lifetime end call for this alloca.
82 return false;
83 }
84 return true;
85 }
86
isStandardLifetime(const SmallVectorImpl<IntrinsicInst * > & LifetimeStart,const SmallVectorImpl<IntrinsicInst * > & LifetimeEnd,const DominatorTree * DT,const LoopInfo * LI,size_t MaxLifetimes)87 bool isStandardLifetime(const SmallVectorImpl<IntrinsicInst *> &LifetimeStart,
88 const SmallVectorImpl<IntrinsicInst *> &LifetimeEnd,
89 const DominatorTree *DT, const LoopInfo *LI,
90 size_t MaxLifetimes) {
91 // An alloca that has exactly one start and end in every possible execution.
92 // If it has multiple ends, they have to be unreachable from each other, so
93 // at most one of them is actually used for each execution of the function.
94 return LifetimeStart.size() == 1 &&
95 (LifetimeEnd.size() == 1 ||
96 (LifetimeEnd.size() > 0 &&
97 !maybeReachableFromEachOther(LifetimeEnd, DT, LI, MaxLifetimes)));
98 }
99
getUntagLocationIfFunctionExit(Instruction & Inst)100 Instruction *getUntagLocationIfFunctionExit(Instruction &Inst) {
101 if (isa<ReturnInst>(Inst)) {
102 if (CallInst *CI = Inst.getParent()->getTerminatingMustTailCall())
103 return CI;
104 return &Inst;
105 }
106 if (isa<ResumeInst, CleanupReturnInst>(Inst)) {
107 return &Inst;
108 }
109 return nullptr;
110 }
111
visit(Instruction & Inst)112 void StackInfoBuilder::visit(Instruction &Inst) {
113 if (CallInst *CI = dyn_cast<CallInst>(&Inst)) {
114 if (CI->canReturnTwice()) {
115 Info.CallsReturnTwice = true;
116 }
117 }
118 if (AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
119 if (isInterestingAlloca(*AI)) {
120 Info.AllocasToInstrument[AI].AI = AI;
121 }
122 return;
123 }
124 auto *II = dyn_cast<IntrinsicInst>(&Inst);
125 if (II && (II->getIntrinsicID() == Intrinsic::lifetime_start ||
126 II->getIntrinsicID() == Intrinsic::lifetime_end)) {
127 AllocaInst *AI = findAllocaForValue(II->getArgOperand(1));
128 if (!AI) {
129 Info.UnrecognizedLifetimes.push_back(&Inst);
130 return;
131 }
132 if (!isInterestingAlloca(*AI))
133 return;
134 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
135 Info.AllocasToInstrument[AI].LifetimeStart.push_back(II);
136 else
137 Info.AllocasToInstrument[AI].LifetimeEnd.push_back(II);
138 return;
139 }
140 if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&Inst)) {
141 for (Value *V : DVI->location_ops()) {
142 if (auto *AI = dyn_cast_or_null<AllocaInst>(V)) {
143 if (!isInterestingAlloca(*AI))
144 continue;
145 AllocaInfo &AInfo = Info.AllocasToInstrument[AI];
146 auto &DVIVec = AInfo.DbgVariableIntrinsics;
147 if (DVIVec.empty() || DVIVec.back() != DVI)
148 DVIVec.push_back(DVI);
149 }
150 }
151 }
152 Instruction *ExitUntag = getUntagLocationIfFunctionExit(Inst);
153 if (ExitUntag)
154 Info.RetVec.push_back(ExitUntag);
155 }
156
isInterestingAlloca(const AllocaInst & AI)157 bool StackInfoBuilder::isInterestingAlloca(const AllocaInst &AI) {
158 return (AI.getAllocatedType()->isSized() &&
159 // FIXME: instrument dynamic allocas, too
160 AI.isStaticAlloca() &&
161 // alloca() may be called with 0 size, ignore it.
162 memtag::getAllocaSizeInBytes(AI) > 0 &&
163 // We are only interested in allocas not promotable to registers.
164 // Promotable allocas are common under -O0.
165 !isAllocaPromotable(&AI) &&
166 // inalloca allocas are not treated as static, and we don't want
167 // dynamic alloca instrumentation for them as well.
168 !AI.isUsedWithInAlloca() &&
169 // swifterror allocas are register promoted by ISel
170 !AI.isSwiftError()) &&
171 // safe allocas are not interesting
172 !(SSI && SSI->isSafe(AI));
173 }
174
getAllocaSizeInBytes(const AllocaInst & AI)175 uint64_t getAllocaSizeInBytes(const AllocaInst &AI) {
176 auto DL = AI.getModule()->getDataLayout();
177 return *AI.getAllocationSize(DL);
178 }
179
alignAndPadAlloca(memtag::AllocaInfo & Info,llvm::Align Alignment)180 void alignAndPadAlloca(memtag::AllocaInfo &Info, llvm::Align Alignment) {
181 const Align NewAlignment = std::max(Info.AI->getAlign(), Alignment);
182 Info.AI->setAlignment(NewAlignment);
183 auto &Ctx = Info.AI->getFunction()->getContext();
184
185 uint64_t Size = getAllocaSizeInBytes(*Info.AI);
186 uint64_t AlignedSize = alignTo(Size, Alignment);
187 if (Size == AlignedSize)
188 return;
189
190 // Add padding to the alloca.
191 Type *AllocatedType =
192 Info.AI->isArrayAllocation()
193 ? ArrayType::get(
194 Info.AI->getAllocatedType(),
195 cast<ConstantInt>(Info.AI->getArraySize())->getZExtValue())
196 : Info.AI->getAllocatedType();
197 Type *PaddingType = ArrayType::get(Type::getInt8Ty(Ctx), AlignedSize - Size);
198 Type *TypeWithPadding = StructType::get(AllocatedType, PaddingType);
199 auto *NewAI = new AllocaInst(TypeWithPadding, Info.AI->getAddressSpace(),
200 nullptr, "", Info.AI);
201 NewAI->takeName(Info.AI);
202 NewAI->setAlignment(Info.AI->getAlign());
203 NewAI->setUsedWithInAlloca(Info.AI->isUsedWithInAlloca());
204 NewAI->setSwiftError(Info.AI->isSwiftError());
205 NewAI->copyMetadata(*Info.AI);
206
207 Value *NewPtr = NewAI;
208
209 // TODO: Remove when typed pointers dropped
210 if (Info.AI->getType() != NewAI->getType())
211 NewPtr = new BitCastInst(NewAI, Info.AI->getType(), "", Info.AI);
212
213 Info.AI->replaceAllUsesWith(NewPtr);
214 Info.AI->eraseFromParent();
215 Info.AI = NewAI;
216 }
217
218 } // namespace memtag
219 } // namespace llvm
220