xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Utils/MemoryOpRemark.cpp (revision 833a452e9f082a7982a31c21f0da437dbbe0a39d)
1 //===-- MemoryOpRemark.cpp - Auto-init remark analysis---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Implementation of the analysis for the "auto-init" remark.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Transforms/Utils/MemoryOpRemark.h"
14 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/IR/DebugInfo.h"
17 #include "llvm/IR/Instructions.h"
18 #include "llvm/IR/IntrinsicInst.h"
19 
20 using namespace llvm;
21 using namespace llvm::ore;
22 
23 MemoryOpRemark::~MemoryOpRemark() = default;
24 
25 bool MemoryOpRemark::canHandle(const Instruction *I, const TargetLibraryInfo &TLI) {
26   if (isa<StoreInst>(I))
27     return true;
28 
29   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
30     switch (II->getIntrinsicID()) {
31     case Intrinsic::memcpy_inline:
32     case Intrinsic::memcpy:
33     case Intrinsic::memmove:
34     case Intrinsic::memset:
35     case Intrinsic::memcpy_element_unordered_atomic:
36     case Intrinsic::memmove_element_unordered_atomic:
37     case Intrinsic::memset_element_unordered_atomic:
38       return true;
39     default:
40       return false;
41     }
42   }
43 
44   if (auto *CI = dyn_cast<CallInst>(I)) {
45     auto *CF = CI->getCalledFunction();
46     if (!CF)
47       return false;
48 
49     if (!CF->hasName())
50       return false;
51 
52     LibFunc LF;
53     bool KnownLibCall = TLI.getLibFunc(*CF, LF) && TLI.has(LF);
54     if (!KnownLibCall)
55       return false;
56 
57     switch (LF) {
58     case LibFunc_memcpy_chk:
59     case LibFunc_mempcpy_chk:
60     case LibFunc_memset_chk:
61     case LibFunc_memmove_chk:
62     case LibFunc_memcpy:
63     case LibFunc_mempcpy:
64     case LibFunc_memset:
65     case LibFunc_memmove:
66     case LibFunc_bzero:
67     case LibFunc_bcopy:
68       return true;
69     default:
70       return false;
71     }
72   }
73 
74   return false;
75 }
76 
77 void MemoryOpRemark::visit(const Instruction *I) {
78   // For some of them, we can provide more information:
79 
80   // For stores:
81   // * size
82   // * volatile / atomic
83   if (auto *SI = dyn_cast<StoreInst>(I)) {
84     visitStore(*SI);
85     return;
86   }
87 
88   // For intrinsics:
89   // * user-friendly name
90   // * size
91   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
92     visitIntrinsicCall(*II);
93     return;
94   }
95 
96   // For calls:
97   // * known/unknown function (e.g. the compiler knows bzero, but it doesn't
98   //                                know my_bzero)
99   // * memory operation size
100   if (auto *CI = dyn_cast<CallInst>(I)) {
101     visitCall(*CI);
102     return;
103   }
104 
105   visitUnknown(*I);
106 }
107 
108 std::string MemoryOpRemark::explainSource(StringRef Type) const {
109   return (Type + ".").str();
110 }
111 
112 StringRef MemoryOpRemark::remarkName(RemarkKind RK) const {
113   switch (RK) {
114   case RK_Store:
115     return "MemoryOpStore";
116   case RK_Unknown:
117     return "MemoryOpUnknown";
118   case RK_IntrinsicCall:
119     return "MemoryOpIntrinsicCall";
120   case RK_Call:
121     return "MemoryOpCall";
122   }
123   llvm_unreachable("missing RemarkKind case");
124 }
125 
126 static void inlineVolatileOrAtomicWithExtraArgs(bool *Inline, bool Volatile,
127                                                 bool Atomic,
128                                                 DiagnosticInfoIROptimization &R) {
129   if (Inline && *Inline)
130     R << " Inlined: " << NV("StoreInlined", true) << ".";
131   if (Volatile)
132     R << " Volatile: " << NV("StoreVolatile", true) << ".";
133   if (Atomic)
134     R << " Atomic: " << NV("StoreAtomic", true) << ".";
135   // Emit the false cases under ExtraArgs. This won't show them in the remark
136   // message but will end up in the serialized remarks.
137   if ((Inline && !*Inline) || !Volatile || !Atomic)
138     R << setExtraArgs();
139   if (Inline && !*Inline)
140     R << " Inlined: " << NV("StoreInlined", false) << ".";
141   if (!Volatile)
142     R << " Volatile: " << NV("StoreVolatile", false) << ".";
143   if (!Atomic)
144     R << " Atomic: " << NV("StoreAtomic", false) << ".";
145 }
146 
147 static Optional<uint64_t> getSizeInBytes(Optional<uint64_t> SizeInBits) {
148   if (!SizeInBits || *SizeInBits % 8 != 0)
149     return None;
150   return *SizeInBits / 8;
151 }
152 
153 template<typename ...Ts>
154 std::unique_ptr<DiagnosticInfoIROptimization>
155 MemoryOpRemark::makeRemark(Ts... Args) {
156   switch (diagnosticKind()) {
157   case DK_OptimizationRemarkAnalysis:
158     return std::make_unique<OptimizationRemarkAnalysis>(Args...);
159   case DK_OptimizationRemarkMissed:
160     return std::make_unique<OptimizationRemarkMissed>(Args...);
161   default:
162     llvm_unreachable("unexpected DiagnosticKind");
163   }
164 }
165 
166 void MemoryOpRemark::visitStore(const StoreInst &SI) {
167   bool Volatile = SI.isVolatile();
168   bool Atomic = SI.isAtomic();
169   int64_t Size = DL.getTypeStoreSize(SI.getOperand(0)->getType());
170 
171   auto R = makeRemark(RemarkPass.data(), remarkName(RK_Store), &SI);
172   *R << explainSource("Store") << "\nStore size: " << NV("StoreSize", Size)
173      << " bytes.";
174   visitPtr(SI.getOperand(1), /*IsRead=*/false, *R);
175   inlineVolatileOrAtomicWithExtraArgs(nullptr, Volatile, Atomic, *R);
176   ORE.emit(*R);
177 }
178 
179 void MemoryOpRemark::visitUnknown(const Instruction &I) {
180   auto R = makeRemark(RemarkPass.data(), remarkName(RK_Unknown), &I);
181   *R << explainSource("Initialization");
182   ORE.emit(*R);
183 }
184 
185 void MemoryOpRemark::visitIntrinsicCall(const IntrinsicInst &II) {
186   SmallString<32> CallTo;
187   bool Atomic = false;
188   bool Inline = false;
189   switch (II.getIntrinsicID()) {
190   case Intrinsic::memcpy_inline:
191     CallTo = "memcpy";
192     Inline = true;
193     break;
194   case Intrinsic::memcpy:
195     CallTo = "memcpy";
196     break;
197   case Intrinsic::memmove:
198     CallTo = "memmove";
199     break;
200   case Intrinsic::memset:
201     CallTo = "memset";
202     break;
203   case Intrinsic::memcpy_element_unordered_atomic:
204     CallTo = "memcpy";
205     Atomic = true;
206     break;
207   case Intrinsic::memmove_element_unordered_atomic:
208     CallTo = "memmove";
209     Atomic = true;
210     break;
211   case Intrinsic::memset_element_unordered_atomic:
212     CallTo = "memset";
213     Atomic = true;
214     break;
215   default:
216     return visitUnknown(II);
217   }
218 
219   auto R = makeRemark(RemarkPass.data(), remarkName(RK_IntrinsicCall), &II);
220   visitCallee(CallTo.str(), /*KnownLibCall=*/true, *R);
221   visitSizeOperand(II.getOperand(2), *R);
222 
223   auto *CIVolatile = dyn_cast<ConstantInt>(II.getOperand(3));
224   // No such thing as a memory intrinsic that is both atomic and volatile.
225   bool Volatile = !Atomic && CIVolatile && CIVolatile->getZExtValue();
226   switch (II.getIntrinsicID()) {
227   case Intrinsic::memcpy_inline:
228   case Intrinsic::memcpy:
229   case Intrinsic::memmove:
230   case Intrinsic::memcpy_element_unordered_atomic:
231     visitPtr(II.getOperand(1), /*IsRead=*/true, *R);
232     visitPtr(II.getOperand(0), /*IsRead=*/false, *R);
233     break;
234   case Intrinsic::memset:
235   case Intrinsic::memset_element_unordered_atomic:
236     visitPtr(II.getOperand(0), /*IsRead=*/false, *R);
237     break;
238   }
239   inlineVolatileOrAtomicWithExtraArgs(&Inline, Volatile, Atomic, *R);
240   ORE.emit(*R);
241 }
242 
243 void MemoryOpRemark::visitCall(const CallInst &CI) {
244   Function *F = CI.getCalledFunction();
245   if (!F)
246     return visitUnknown(CI);
247 
248   LibFunc LF;
249   bool KnownLibCall = TLI.getLibFunc(*F, LF) && TLI.has(LF);
250   auto R = makeRemark(RemarkPass.data(), remarkName(RK_Call), &CI);
251   visitCallee(F, KnownLibCall, *R);
252   visitKnownLibCall(CI, LF, *R);
253   ORE.emit(*R);
254 }
255 
256 template <typename FTy>
257 void MemoryOpRemark::visitCallee(FTy F, bool KnownLibCall,
258                                  DiagnosticInfoIROptimization &R) {
259   R << "Call to ";
260   if (!KnownLibCall)
261     R << NV("UnknownLibCall", "unknown") << " function ";
262   R << NV("Callee", F) << explainSource("");
263 }
264 
265 void MemoryOpRemark::visitKnownLibCall(const CallInst &CI, LibFunc LF,
266                                        DiagnosticInfoIROptimization &R) {
267   switch (LF) {
268   default:
269     return;
270   case LibFunc_memset_chk:
271   case LibFunc_memset:
272     visitSizeOperand(CI.getOperand(2), R);
273     visitPtr(CI.getOperand(0), /*IsRead=*/false, R);
274     break;
275   case LibFunc_bzero:
276     visitSizeOperand(CI.getOperand(1), R);
277     visitPtr(CI.getOperand(0), /*IsRead=*/false, R);
278     break;
279   case LibFunc_memcpy_chk:
280   case LibFunc_mempcpy_chk:
281   case LibFunc_memmove_chk:
282   case LibFunc_memcpy:
283   case LibFunc_mempcpy:
284   case LibFunc_memmove:
285   case LibFunc_bcopy:
286     visitSizeOperand(CI.getOperand(2), R);
287     visitPtr(CI.getOperand(1), /*IsRead=*/true, R);
288     visitPtr(CI.getOperand(0), /*IsRead=*/false, R);
289     break;
290   }
291 }
292 
293 void MemoryOpRemark::visitSizeOperand(Value *V, DiagnosticInfoIROptimization &R) {
294   if (auto *Len = dyn_cast<ConstantInt>(V)) {
295     uint64_t Size = Len->getZExtValue();
296     R << " Memory operation size: " << NV("StoreSize", Size) << " bytes.";
297   }
298 }
299 
300 static Optional<StringRef> nameOrNone(const Value *V) {
301   if (V->hasName())
302     return V->getName();
303   return None;
304 }
305 
306 void MemoryOpRemark::visitVariable(const Value *V,
307                                    SmallVectorImpl<VariableInfo> &Result) {
308   if (auto *GV = dyn_cast<GlobalVariable>(V)) {
309     auto *Ty = GV->getValueType();
310     uint64_t Size = DL.getTypeSizeInBits(Ty).getFixedSize();
311     VariableInfo Var{nameOrNone(GV), Size};
312     if (!Var.isEmpty())
313       Result.push_back(std::move(Var));
314     return;
315   }
316 
317   // If we find some information in the debug info, take that.
318   bool FoundDI = false;
319   // Try to get an llvm.dbg.declare, which has a DILocalVariable giving us the
320   // real debug info name and size of the variable.
321   for (const DbgVariableIntrinsic *DVI :
322        FindDbgAddrUses(const_cast<Value *>(V))) {
323     if (DILocalVariable *DILV = DVI->getVariable()) {
324       Optional<uint64_t> DISize = getSizeInBytes(DILV->getSizeInBits());
325       VariableInfo Var{DILV->getName(), DISize};
326       if (!Var.isEmpty()) {
327         Result.push_back(std::move(Var));
328         FoundDI = true;
329       }
330     }
331   }
332   if (FoundDI) {
333     assert(!Result.empty());
334     return;
335   }
336 
337   const auto *AI = dyn_cast<AllocaInst>(V);
338   if (!AI)
339     return;
340 
341   // If not, get it from the alloca.
342   Optional<TypeSize> TySize = AI->getAllocationSizeInBits(DL);
343   Optional<uint64_t> Size =
344       TySize ? getSizeInBytes(TySize->getFixedSize()) : None;
345   VariableInfo Var{nameOrNone(AI), Size};
346   if (!Var.isEmpty())
347     Result.push_back(std::move(Var));
348 }
349 
350 void MemoryOpRemark::visitPtr(Value *Ptr, bool IsRead, DiagnosticInfoIROptimization &R) {
351   // Find if Ptr is a known variable we can give more information on.
352   SmallVector<Value *, 2> Objects;
353   getUnderlyingObjectsForCodeGen(Ptr, Objects);
354   SmallVector<VariableInfo, 2> VIs;
355   for (const Value *V : Objects)
356     visitVariable(V, VIs);
357 
358   if (VIs.empty()) {
359     bool CanBeNull;
360     bool CanBeFreed;
361     uint64_t Size = Ptr->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
362     if (!Size)
363       return;
364     VIs.push_back({None, Size});
365   }
366 
367   R << (IsRead ? "\n Read Variables: " : "\n Written Variables: ");
368   for (unsigned i = 0; i < VIs.size(); ++i) {
369     const VariableInfo &VI = VIs[i];
370     assert(!VI.isEmpty() && "No extra content to display.");
371     if (i != 0)
372       R << ", ";
373     if (VI.Name)
374       R << NV(IsRead ? "RVarName" : "WVarName", *VI.Name);
375     else
376       R << NV(IsRead ? "RVarName" : "WVarName", "<unknown>");
377     if (VI.Size)
378       R << " (" << NV(IsRead ? "RVarSize" : "WVarSize", *VI.Size) << " bytes)";
379   }
380   R << ".";
381 }
382 
383 bool AutoInitRemark::canHandle(const Instruction *I) {
384   if (!I->hasMetadata(LLVMContext::MD_annotation))
385     return false;
386   return any_of(I->getMetadata(LLVMContext::MD_annotation)->operands(),
387                 [](const MDOperand &Op) {
388                   return cast<MDString>(Op.get())->getString() == "auto-init";
389                 });
390 }
391 
392 std::string AutoInitRemark::explainSource(StringRef Type) const {
393   return (Type + " inserted by -ftrivial-auto-var-init.").str();
394 }
395 
396 StringRef AutoInitRemark::remarkName(RemarkKind RK) const {
397   switch (RK) {
398   case RK_Store:
399     return "AutoInitStore";
400   case RK_Unknown:
401     return "AutoInitUnknownInstruction";
402   case RK_IntrinsicCall:
403     return "AutoInitIntrinsicCall";
404   case RK_Call:
405     return "AutoInitCall";
406   }
407   llvm_unreachable("missing RemarkKind case");
408 }
409