xref: /freebsd/contrib/llvm-project/llvm/lib/Analysis/MemoryLocation.cpp (revision 9f44a47fd07924afc035991af15d84e6585dea4f)
1 //===- MemoryLocation.cpp - Memory location descriptions -------------------==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "llvm/Analysis/MemoryLocation.h"
10 #include "llvm/Analysis/TargetLibraryInfo.h"
11 #include "llvm/IR/DataLayout.h"
12 #include "llvm/IR/Instructions.h"
13 #include "llvm/IR/IntrinsicInst.h"
14 #include "llvm/IR/IntrinsicsARM.h"
15 #include "llvm/IR/Module.h"
16 #include "llvm/IR/Type.h"
17 using namespace llvm;
18 
19 void LocationSize::print(raw_ostream &OS) const {
20   OS << "LocationSize::";
21   if (*this == beforeOrAfterPointer())
22     OS << "beforeOrAfterPointer";
23   else if (*this == afterPointer())
24     OS << "afterPointer";
25   else if (*this == mapEmpty())
26     OS << "mapEmpty";
27   else if (*this == mapTombstone())
28     OS << "mapTombstone";
29   else if (isPrecise())
30     OS << "precise(" << getValue() << ')';
31   else
32     OS << "upperBound(" << getValue() << ')';
33 }
34 
35 MemoryLocation MemoryLocation::get(const LoadInst *LI) {
36   const auto &DL = LI->getModule()->getDataLayout();
37 
38   return MemoryLocation(
39       LI->getPointerOperand(),
40       LocationSize::precise(DL.getTypeStoreSize(LI->getType())),
41       LI->getAAMetadata());
42 }
43 
44 MemoryLocation MemoryLocation::get(const StoreInst *SI) {
45   const auto &DL = SI->getModule()->getDataLayout();
46 
47   return MemoryLocation(SI->getPointerOperand(),
48                         LocationSize::precise(DL.getTypeStoreSize(
49                             SI->getValueOperand()->getType())),
50                         SI->getAAMetadata());
51 }
52 
53 MemoryLocation MemoryLocation::get(const VAArgInst *VI) {
54   return MemoryLocation(VI->getPointerOperand(),
55                         LocationSize::afterPointer(), VI->getAAMetadata());
56 }
57 
58 MemoryLocation MemoryLocation::get(const AtomicCmpXchgInst *CXI) {
59   const auto &DL = CXI->getModule()->getDataLayout();
60 
61   return MemoryLocation(CXI->getPointerOperand(),
62                         LocationSize::precise(DL.getTypeStoreSize(
63                             CXI->getCompareOperand()->getType())),
64                         CXI->getAAMetadata());
65 }
66 
67 MemoryLocation MemoryLocation::get(const AtomicRMWInst *RMWI) {
68   const auto &DL = RMWI->getModule()->getDataLayout();
69 
70   return MemoryLocation(RMWI->getPointerOperand(),
71                         LocationSize::precise(DL.getTypeStoreSize(
72                             RMWI->getValOperand()->getType())),
73                         RMWI->getAAMetadata());
74 }
75 
76 Optional<MemoryLocation> MemoryLocation::getOrNone(const Instruction *Inst) {
77   switch (Inst->getOpcode()) {
78   case Instruction::Load:
79     return get(cast<LoadInst>(Inst));
80   case Instruction::Store:
81     return get(cast<StoreInst>(Inst));
82   case Instruction::VAArg:
83     return get(cast<VAArgInst>(Inst));
84   case Instruction::AtomicCmpXchg:
85     return get(cast<AtomicCmpXchgInst>(Inst));
86   case Instruction::AtomicRMW:
87     return get(cast<AtomicRMWInst>(Inst));
88   default:
89     return None;
90   }
91 }
92 
93 MemoryLocation MemoryLocation::getForSource(const MemTransferInst *MTI) {
94   return getForSource(cast<AnyMemTransferInst>(MTI));
95 }
96 
97 MemoryLocation MemoryLocation::getForSource(const AtomicMemTransferInst *MTI) {
98   return getForSource(cast<AnyMemTransferInst>(MTI));
99 }
100 
101 MemoryLocation MemoryLocation::getForSource(const AnyMemTransferInst *MTI) {
102   assert(MTI->getRawSource() == MTI->getArgOperand(1));
103   return getForArgument(MTI, 1, nullptr);
104 }
105 
106 MemoryLocation MemoryLocation::getForDest(const MemIntrinsic *MI) {
107   return getForDest(cast<AnyMemIntrinsic>(MI));
108 }
109 
110 MemoryLocation MemoryLocation::getForDest(const AtomicMemIntrinsic *MI) {
111   return getForDest(cast<AnyMemIntrinsic>(MI));
112 }
113 
114 MemoryLocation MemoryLocation::getForDest(const AnyMemIntrinsic *MI) {
115   assert(MI->getRawDest() == MI->getArgOperand(0));
116   return getForArgument(MI, 0, nullptr);
117 }
118 
119 Optional<MemoryLocation>
120 MemoryLocation::getForDest(const CallBase *CB, const TargetLibraryInfo &TLI) {
121   if (!CB->onlyAccessesArgMemory())
122     return None;
123 
124   if (CB->hasOperandBundles())
125     // TODO: remove implementation restriction
126     return None;
127 
128   Value *UsedV = nullptr;
129   Optional<unsigned> UsedIdx;
130   for (unsigned i = 0; i < CB->arg_size(); i++) {
131     if (!CB->getArgOperand(i)->getType()->isPointerTy())
132       continue;
133      if (CB->onlyReadsMemory(i))
134        continue;
135     if (!UsedV) {
136       // First potentially writing parameter
137       UsedV = CB->getArgOperand(i);
138       UsedIdx = i;
139       continue;
140     }
141     UsedIdx = None;
142     if (UsedV != CB->getArgOperand(i))
143       // Can't describe writing to two distinct locations.
144       // TODO: This results in an inprecision when two values derived from the
145       // same object are passed as arguments to the same function.
146       return None;
147   }
148   if (!UsedV)
149     // We don't currently have a way to represent a "does not write" result
150     // and thus have to be conservative and return unknown.
151     return None;
152 
153   if (UsedIdx)
154     return getForArgument(CB, *UsedIdx, &TLI);
155   return MemoryLocation::getBeforeOrAfter(UsedV, CB->getAAMetadata());
156 }
157 
158 MemoryLocation MemoryLocation::getForArgument(const CallBase *Call,
159                                               unsigned ArgIdx,
160                                               const TargetLibraryInfo *TLI) {
161   AAMDNodes AATags = Call->getAAMetadata();
162   const Value *Arg = Call->getArgOperand(ArgIdx);
163 
164   // We may be able to produce an exact size for known intrinsics.
165   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call)) {
166     const DataLayout &DL = II->getModule()->getDataLayout();
167 
168     switch (II->getIntrinsicID()) {
169     default:
170       break;
171     case Intrinsic::memset:
172     case Intrinsic::memcpy:
173     case Intrinsic::memcpy_inline:
174     case Intrinsic::memmove:
175     case Intrinsic::memcpy_element_unordered_atomic:
176     case Intrinsic::memmove_element_unordered_atomic:
177     case Intrinsic::memset_element_unordered_atomic:
178       assert((ArgIdx == 0 || ArgIdx == 1) &&
179              "Invalid argument index for memory intrinsic");
180       if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
181         return MemoryLocation(Arg, LocationSize::precise(LenCI->getZExtValue()),
182                               AATags);
183       return MemoryLocation::getAfter(Arg, AATags);
184 
185     case Intrinsic::lifetime_start:
186     case Intrinsic::lifetime_end:
187     case Intrinsic::invariant_start:
188       assert(ArgIdx == 1 && "Invalid argument index");
189       return MemoryLocation(
190           Arg,
191           LocationSize::precise(
192               cast<ConstantInt>(II->getArgOperand(0))->getZExtValue()),
193           AATags);
194 
195     case Intrinsic::masked_load:
196       assert(ArgIdx == 0 && "Invalid argument index");
197       return MemoryLocation(
198           Arg,
199           LocationSize::upperBound(DL.getTypeStoreSize(II->getType())),
200           AATags);
201 
202     case Intrinsic::masked_store:
203       assert(ArgIdx == 1 && "Invalid argument index");
204       return MemoryLocation(
205           Arg,
206           LocationSize::upperBound(
207               DL.getTypeStoreSize(II->getArgOperand(0)->getType())),
208           AATags);
209 
210     case Intrinsic::invariant_end:
211       // The first argument to an invariant.end is a "descriptor" type (e.g. a
212       // pointer to a empty struct) which is never actually dereferenced.
213       if (ArgIdx == 0)
214         return MemoryLocation(Arg, LocationSize::precise(0), AATags);
215       assert(ArgIdx == 2 && "Invalid argument index");
216       return MemoryLocation(
217           Arg,
218           LocationSize::precise(
219               cast<ConstantInt>(II->getArgOperand(1))->getZExtValue()),
220           AATags);
221 
222     case Intrinsic::arm_neon_vld1:
223       assert(ArgIdx == 0 && "Invalid argument index");
224       // LLVM's vld1 and vst1 intrinsics currently only support a single
225       // vector register.
226       return MemoryLocation(
227           Arg, LocationSize::precise(DL.getTypeStoreSize(II->getType())),
228           AATags);
229 
230     case Intrinsic::arm_neon_vst1:
231       assert(ArgIdx == 0 && "Invalid argument index");
232       return MemoryLocation(Arg,
233                             LocationSize::precise(DL.getTypeStoreSize(
234                                 II->getArgOperand(1)->getType())),
235                             AATags);
236     }
237 
238     assert(
239         !isa<AnyMemTransferInst>(II) &&
240         "all memory transfer intrinsics should be handled by the switch above");
241   }
242 
243   // We can bound the aliasing properties of memset_pattern16 just as we can
244   // for memcpy/memset.  This is particularly important because the
245   // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
246   // whenever possible.
247   LibFunc F;
248   if (TLI && TLI->getLibFunc(*Call, F) && TLI->has(F)) {
249     switch (F) {
250     case LibFunc_strcpy:
251     case LibFunc_strcat:
252     case LibFunc_strncat:
253       assert((ArgIdx == 0 || ArgIdx == 1) && "Invalid argument index for str function");
254       return MemoryLocation::getAfter(Arg, AATags);
255 
256     case LibFunc_memset_chk: {
257       assert(ArgIdx == 0 && "Invalid argument index for memset_chk");
258       LocationSize Size = LocationSize::afterPointer();
259       if (const auto *Len = dyn_cast<ConstantInt>(Call->getArgOperand(2))) {
260         // memset_chk writes at most Len bytes. It may write less, if Len
261         // exceeds the specified max size and aborts.
262         Size = LocationSize::upperBound(Len->getZExtValue());
263       }
264       return MemoryLocation(Arg, Size, AATags);
265     }
266     case LibFunc_strncpy: {
267       assert((ArgIdx == 0 || ArgIdx == 1) &&
268              "Invalid argument index for strncpy");
269       LocationSize Size = LocationSize::afterPointer();
270       if (const auto *Len = dyn_cast<ConstantInt>(Call->getArgOperand(2))) {
271         // strncpy is guaranteed to write Len bytes, but only reads up to Len
272         // bytes.
273         Size = ArgIdx == 0 ? LocationSize::precise(Len->getZExtValue())
274                            : LocationSize::upperBound(Len->getZExtValue());
275       }
276       return MemoryLocation(Arg, Size, AATags);
277     }
278     case LibFunc_memset_pattern16:
279     case LibFunc_memset_pattern4:
280     case LibFunc_memset_pattern8:
281       assert((ArgIdx == 0 || ArgIdx == 1) &&
282              "Invalid argument index for memset_pattern16");
283       if (ArgIdx == 1) {
284         unsigned Size = 16;
285         if (F == LibFunc_memset_pattern4)
286           Size = 4;
287         else if (F == LibFunc_memset_pattern8)
288           Size = 8;
289         return MemoryLocation(Arg, LocationSize::precise(Size), AATags);
290       }
291       if (const ConstantInt *LenCI =
292               dyn_cast<ConstantInt>(Call->getArgOperand(2)))
293         return MemoryLocation(Arg, LocationSize::precise(LenCI->getZExtValue()),
294                               AATags);
295       return MemoryLocation::getAfter(Arg, AATags);
296     case LibFunc_bcmp:
297     case LibFunc_memcmp:
298       assert((ArgIdx == 0 || ArgIdx == 1) &&
299              "Invalid argument index for memcmp/bcmp");
300       if (const ConstantInt *LenCI =
301               dyn_cast<ConstantInt>(Call->getArgOperand(2)))
302         return MemoryLocation(Arg, LocationSize::precise(LenCI->getZExtValue()),
303                               AATags);
304       return MemoryLocation::getAfter(Arg, AATags);
305     case LibFunc_memchr:
306       assert((ArgIdx == 0) && "Invalid argument index for memchr");
307       if (const ConstantInt *LenCI =
308               dyn_cast<ConstantInt>(Call->getArgOperand(2)))
309         return MemoryLocation(Arg, LocationSize::precise(LenCI->getZExtValue()),
310                               AATags);
311       return MemoryLocation::getAfter(Arg, AATags);
312     case LibFunc_memccpy:
313       assert((ArgIdx == 0 || ArgIdx == 1) &&
314              "Invalid argument index for memccpy");
315       // We only know an upper bound on the number of bytes read/written.
316       if (const ConstantInt *LenCI =
317               dyn_cast<ConstantInt>(Call->getArgOperand(3)))
318         return MemoryLocation(
319             Arg, LocationSize::upperBound(LenCI->getZExtValue()), AATags);
320       return MemoryLocation::getAfter(Arg, AATags);
321     default:
322       break;
323     };
324   }
325 
326   return MemoryLocation::getBeforeOrAfter(Call->getArgOperand(ArgIdx), AATags);
327 }
328