1 //== MemoryTaggingSupport.cpp - helpers for memory tagging implementations ===//
2 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
3 // See https://llvm.org/LICENSE.txt for license information.
4 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file declares common infrastructure for HWAddressSanitizer and
9 // Aarch64StackTagging.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/Transforms/Utils/MemoryTaggingSupport.h"
14
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/Analysis/CFG.h"
17 #include "llvm/Analysis/PostDominators.h"
18 #include "llvm/Analysis/StackSafetyAnalysis.h"
19 #include "llvm/Analysis/ValueTracking.h"
20 #include "llvm/BinaryFormat/Dwarf.h"
21 #include "llvm/IR/BasicBlock.h"
22 #include "llvm/IR/IRBuilder.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/TargetParser/Triple.h"
25 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
26
27 namespace llvm {
28 namespace memtag {
29 namespace {
maybeReachableFromEachOther(const SmallVectorImpl<IntrinsicInst * > & Insts,const DominatorTree * DT,const LoopInfo * LI,size_t MaxLifetimes)30 bool maybeReachableFromEachOther(const SmallVectorImpl<IntrinsicInst *> &Insts,
31 const DominatorTree *DT, const LoopInfo *LI,
32 size_t MaxLifetimes) {
33 // If we have too many lifetime ends, give up, as the algorithm below is N^2.
34 if (Insts.size() > MaxLifetimes)
35 return true;
36 for (size_t I = 0; I < Insts.size(); ++I) {
37 for (size_t J = 0; J < Insts.size(); ++J) {
38 if (I == J)
39 continue;
40 if (isPotentiallyReachable(Insts[I], Insts[J], nullptr, DT, LI))
41 return true;
42 }
43 }
44 return false;
45 }
46 } // namespace
47
forAllReachableExits(const DominatorTree & DT,const PostDominatorTree & PDT,const LoopInfo & LI,const Instruction * Start,const SmallVectorImpl<IntrinsicInst * > & Ends,const SmallVectorImpl<Instruction * > & RetVec,llvm::function_ref<void (Instruction *)> Callback)48 bool forAllReachableExits(const DominatorTree &DT, const PostDominatorTree &PDT,
49 const LoopInfo &LI, const Instruction *Start,
50 const SmallVectorImpl<IntrinsicInst *> &Ends,
51 const SmallVectorImpl<Instruction *> &RetVec,
52 llvm::function_ref<void(Instruction *)> Callback) {
53 if (Ends.size() == 1 && PDT.dominates(Ends[0], Start)) {
54 Callback(Ends[0]);
55 return true;
56 }
57 SmallPtrSet<BasicBlock *, 2> EndBlocks;
58 for (auto *End : Ends) {
59 EndBlocks.insert(End->getParent());
60 }
61 SmallVector<Instruction *, 8> ReachableRetVec;
62 unsigned NumCoveredExits = 0;
63 for (auto *RI : RetVec) {
64 if (!isPotentiallyReachable(Start, RI, nullptr, &DT, &LI))
65 continue;
66 ReachableRetVec.push_back(RI);
67 // If there is an end in the same basic block as the return, we know for
68 // sure that the return is covered. Otherwise, we can check whether there
69 // is a way to reach the RI from the start of the lifetime without passing
70 // through an end.
71 if (EndBlocks.contains(RI->getParent()) ||
72 !isPotentiallyReachable(Start, RI, &EndBlocks, &DT, &LI)) {
73 ++NumCoveredExits;
74 }
75 }
76 if (NumCoveredExits == ReachableRetVec.size()) {
77 for_each(Ends, Callback);
78 } else {
79 // If there's a mix of covered and non-covered exits, just put the untag
80 // on exits, so we avoid the redundancy of untagging twice.
81 for_each(ReachableRetVec, Callback);
82 // We may have inserted untag outside of the lifetime interval.
83 // Signal the caller to remove the lifetime end call for this alloca.
84 return false;
85 }
86 return true;
87 }
88
isStandardLifetime(const SmallVectorImpl<IntrinsicInst * > & LifetimeStart,const SmallVectorImpl<IntrinsicInst * > & LifetimeEnd,const DominatorTree * DT,const LoopInfo * LI,size_t MaxLifetimes)89 bool isStandardLifetime(const SmallVectorImpl<IntrinsicInst *> &LifetimeStart,
90 const SmallVectorImpl<IntrinsicInst *> &LifetimeEnd,
91 const DominatorTree *DT, const LoopInfo *LI,
92 size_t MaxLifetimes) {
93 // An alloca that has exactly one start and end in every possible execution.
94 // If it has multiple ends, they have to be unreachable from each other, so
95 // at most one of them is actually used for each execution of the function.
96 return LifetimeStart.size() == 1 &&
97 (LifetimeEnd.size() == 1 ||
98 (LifetimeEnd.size() > 0 &&
99 !maybeReachableFromEachOther(LifetimeEnd, DT, LI, MaxLifetimes)));
100 }
101
getUntagLocationIfFunctionExit(Instruction & Inst)102 Instruction *getUntagLocationIfFunctionExit(Instruction &Inst) {
103 if (isa<ReturnInst>(Inst)) {
104 if (CallInst *CI = Inst.getParent()->getTerminatingMustTailCall())
105 return CI;
106 return &Inst;
107 }
108 if (isa<ResumeInst, CleanupReturnInst>(Inst)) {
109 return &Inst;
110 }
111 return nullptr;
112 }
113
visit(Instruction & Inst)114 void StackInfoBuilder::visit(Instruction &Inst) {
115 // Visit non-intrinsic debug-info records attached to Inst.
116 for (DbgVariableRecord &DVR : filterDbgVars(Inst.getDbgRecordRange())) {
117 auto AddIfInteresting = [&](Value *V) {
118 if (auto *AI = dyn_cast_or_null<AllocaInst>(V)) {
119 if (!isInterestingAlloca(*AI))
120 return;
121 AllocaInfo &AInfo = Info.AllocasToInstrument[AI];
122 auto &DVRVec = AInfo.DbgVariableRecords;
123 if (DVRVec.empty() || DVRVec.back() != &DVR)
124 DVRVec.push_back(&DVR);
125 }
126 };
127
128 for_each(DVR.location_ops(), AddIfInteresting);
129 if (DVR.isDbgAssign())
130 AddIfInteresting(DVR.getAddress());
131 }
132
133 if (CallInst *CI = dyn_cast<CallInst>(&Inst)) {
134 if (CI->canReturnTwice()) {
135 Info.CallsReturnTwice = true;
136 }
137 }
138 if (AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
139 if (isInterestingAlloca(*AI)) {
140 Info.AllocasToInstrument[AI].AI = AI;
141 }
142 return;
143 }
144 auto *II = dyn_cast<IntrinsicInst>(&Inst);
145 if (II && (II->getIntrinsicID() == Intrinsic::lifetime_start ||
146 II->getIntrinsicID() == Intrinsic::lifetime_end)) {
147 AllocaInst *AI = findAllocaForValue(II->getArgOperand(1));
148 if (!AI) {
149 Info.UnrecognizedLifetimes.push_back(&Inst);
150 return;
151 }
152 if (!isInterestingAlloca(*AI))
153 return;
154 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
155 Info.AllocasToInstrument[AI].LifetimeStart.push_back(II);
156 else
157 Info.AllocasToInstrument[AI].LifetimeEnd.push_back(II);
158 return;
159 }
160 if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&Inst)) {
161 auto AddIfInteresting = [&](Value *V) {
162 if (auto *AI = dyn_cast_or_null<AllocaInst>(V)) {
163 if (!isInterestingAlloca(*AI))
164 return;
165 AllocaInfo &AInfo = Info.AllocasToInstrument[AI];
166 auto &DVIVec = AInfo.DbgVariableIntrinsics;
167 if (DVIVec.empty() || DVIVec.back() != DVI)
168 DVIVec.push_back(DVI);
169 }
170 };
171 for_each(DVI->location_ops(), AddIfInteresting);
172 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(DVI))
173 AddIfInteresting(DAI->getAddress());
174 }
175
176 Instruction *ExitUntag = getUntagLocationIfFunctionExit(Inst);
177 if (ExitUntag)
178 Info.RetVec.push_back(ExitUntag);
179 }
180
isInterestingAlloca(const AllocaInst & AI)181 bool StackInfoBuilder::isInterestingAlloca(const AllocaInst &AI) {
182 return (AI.getAllocatedType()->isSized() &&
183 // FIXME: support vscale.
184 !AI.getAllocatedType()->isScalableTy() &&
185 // FIXME: instrument dynamic allocas, too
186 AI.isStaticAlloca() &&
187 // alloca() may be called with 0 size, ignore it.
188 memtag::getAllocaSizeInBytes(AI) > 0 &&
189 // We are only interested in allocas not promotable to registers.
190 // Promotable allocas are common under -O0.
191 !isAllocaPromotable(&AI) &&
192 // inalloca allocas are not treated as static, and we don't want
193 // dynamic alloca instrumentation for them as well.
194 !AI.isUsedWithInAlloca() &&
195 // swifterror allocas are register promoted by ISel
196 !AI.isSwiftError()) &&
197 // safe allocas are not interesting
198 !(SSI && SSI->isSafe(AI));
199 }
200
getAllocaSizeInBytes(const AllocaInst & AI)201 uint64_t getAllocaSizeInBytes(const AllocaInst &AI) {
202 auto DL = AI.getDataLayout();
203 return *AI.getAllocationSize(DL);
204 }
205
alignAndPadAlloca(memtag::AllocaInfo & Info,llvm::Align Alignment)206 void alignAndPadAlloca(memtag::AllocaInfo &Info, llvm::Align Alignment) {
207 const Align NewAlignment = std::max(Info.AI->getAlign(), Alignment);
208 Info.AI->setAlignment(NewAlignment);
209 auto &Ctx = Info.AI->getFunction()->getContext();
210
211 uint64_t Size = getAllocaSizeInBytes(*Info.AI);
212 uint64_t AlignedSize = alignTo(Size, Alignment);
213 if (Size == AlignedSize)
214 return;
215
216 // Add padding to the alloca.
217 Type *AllocatedType =
218 Info.AI->isArrayAllocation()
219 ? ArrayType::get(
220 Info.AI->getAllocatedType(),
221 cast<ConstantInt>(Info.AI->getArraySize())->getZExtValue())
222 : Info.AI->getAllocatedType();
223 Type *PaddingType = ArrayType::get(Type::getInt8Ty(Ctx), AlignedSize - Size);
224 Type *TypeWithPadding = StructType::get(AllocatedType, PaddingType);
225 auto *NewAI = new AllocaInst(TypeWithPadding, Info.AI->getAddressSpace(),
226 nullptr, "", Info.AI->getIterator());
227 NewAI->takeName(Info.AI);
228 NewAI->setAlignment(Info.AI->getAlign());
229 NewAI->setUsedWithInAlloca(Info.AI->isUsedWithInAlloca());
230 NewAI->setSwiftError(Info.AI->isSwiftError());
231 NewAI->copyMetadata(*Info.AI);
232
233 Value *NewPtr = NewAI;
234
235 // TODO: Remove when typed pointers dropped
236 if (Info.AI->getType() != NewAI->getType())
237 NewPtr = new BitCastInst(NewAI, Info.AI->getType(), "", Info.AI->getIterator());
238
239 Info.AI->replaceAllUsesWith(NewPtr);
240 Info.AI->eraseFromParent();
241 Info.AI = NewAI;
242 }
243
isLifetimeIntrinsic(Value * V)244 bool isLifetimeIntrinsic(Value *V) {
245 auto *II = dyn_cast<IntrinsicInst>(V);
246 return II && II->isLifetimeStartOrEnd();
247 }
248
readRegister(IRBuilder<> & IRB,StringRef Name)249 Value *readRegister(IRBuilder<> &IRB, StringRef Name) {
250 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
251 Function *ReadRegister = Intrinsic::getDeclaration(
252 M, Intrinsic::read_register, IRB.getIntPtrTy(M->getDataLayout()));
253 MDNode *MD =
254 MDNode::get(M->getContext(), {MDString::get(M->getContext(), Name)});
255 Value *Args[] = {MetadataAsValue::get(M->getContext(), MD)};
256 return IRB.CreateCall(ReadRegister, Args);
257 }
258
getPC(const Triple & TargetTriple,IRBuilder<> & IRB)259 Value *getPC(const Triple &TargetTriple, IRBuilder<> &IRB) {
260 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
261 if (TargetTriple.getArch() == Triple::aarch64)
262 return memtag::readRegister(IRB, "pc");
263 return IRB.CreatePtrToInt(IRB.GetInsertBlock()->getParent(),
264 IRB.getIntPtrTy(M->getDataLayout()));
265 }
266
getFP(IRBuilder<> & IRB)267 Value *getFP(IRBuilder<> &IRB) {
268 Function *F = IRB.GetInsertBlock()->getParent();
269 Module *M = F->getParent();
270 auto *GetStackPointerFn = Intrinsic::getDeclaration(
271 M, Intrinsic::frameaddress,
272 IRB.getPtrTy(M->getDataLayout().getAllocaAddrSpace()));
273 return IRB.CreatePtrToInt(
274 IRB.CreateCall(GetStackPointerFn,
275 {Constant::getNullValue(IRB.getInt32Ty())}),
276 IRB.getIntPtrTy(M->getDataLayout()));
277 }
278
getAndroidSlotPtr(IRBuilder<> & IRB,int Slot)279 Value *getAndroidSlotPtr(IRBuilder<> &IRB, int Slot) {
280 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
281 // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
282 // in Bionic's libc/private/bionic_tls.h.
283 Function *ThreadPointerFunc =
284 Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
285 return IRB.CreateConstGEP1_32(IRB.getInt8Ty(),
286 IRB.CreateCall(ThreadPointerFunc), 8 * Slot);
287 }
288
DynCastToDbgAssign(DbgVariableIntrinsic * DVI)289 static DbgAssignIntrinsic *DynCastToDbgAssign(DbgVariableIntrinsic *DVI) {
290 return dyn_cast<DbgAssignIntrinsic>(DVI);
291 }
292
DynCastToDbgAssign(DbgVariableRecord * DVR)293 static DbgVariableRecord *DynCastToDbgAssign(DbgVariableRecord *DVR) {
294 return DVR->isDbgAssign() ? DVR : nullptr;
295 }
296
annotateDebugRecords(AllocaInfo & Info,unsigned int Tag)297 void annotateDebugRecords(AllocaInfo &Info, unsigned int Tag) {
298 // Helper utility for adding DW_OP_LLVM_tag_offset to debug-info records,
299 // abstracted over whether they're intrinsic-stored or DbgVariableRecord
300 // stored.
301 auto AnnotateDbgRecord = [&](auto *DPtr) {
302 // Prepend "tag_offset, N" to the dwarf expression.
303 // Tag offset logically applies to the alloca pointer, and it makes sense
304 // to put it at the beginning of the expression.
305 SmallVector<uint64_t, 8> NewOps = {dwarf::DW_OP_LLVM_tag_offset, Tag};
306 for (size_t LocNo = 0; LocNo < DPtr->getNumVariableLocationOps(); ++LocNo)
307 if (DPtr->getVariableLocationOp(LocNo) == Info.AI)
308 DPtr->setExpression(
309 DIExpression::appendOpsToArg(DPtr->getExpression(), NewOps, LocNo));
310 if (auto *DAI = DynCastToDbgAssign(DPtr)) {
311 if (DAI->getAddress() == Info.AI)
312 DAI->setAddressExpression(
313 DIExpression::prependOpcodes(DAI->getAddressExpression(), NewOps));
314 }
315 };
316
317 llvm::for_each(Info.DbgVariableIntrinsics, AnnotateDbgRecord);
318 llvm::for_each(Info.DbgVariableRecords, AnnotateDbgRecord);
319 }
320
321 } // namespace memtag
322 } // namespace llvm
323