xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Utils/VNCoercion.cpp (revision 1492c8c0d825d1dcd8a2b2975fe35e4269b0b7de)
1 #include "llvm/Transforms/Utils/VNCoercion.h"
2 #include "llvm/Analysis/ConstantFolding.h"
3 #include "llvm/Analysis/ValueTracking.h"
4 #include "llvm/IR/IRBuilder.h"
5 #include "llvm/Support/Debug.h"
6 
7 #define DEBUG_TYPE "vncoerce"
8 
9 namespace llvm {
10 namespace VNCoercion {
11 
12 static bool isFirstClassAggregateOrScalableType(Type *Ty) {
13   return Ty->isStructTy() || Ty->isArrayTy() || isa<ScalableVectorType>(Ty);
14 }
15 
16 /// Return true if coerceAvailableValueToLoadType will succeed.
17 bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy,
18                                      const DataLayout &DL) {
19   Type *StoredTy = StoredVal->getType();
20 
21   if (StoredTy == LoadTy)
22     return true;
23 
24   // If the loaded/stored value is a first class array/struct, or scalable type,
25   // don't try to transform them. We need to be able to bitcast to integer.
26   if (isFirstClassAggregateOrScalableType(LoadTy) ||
27       isFirstClassAggregateOrScalableType(StoredTy))
28     return false;
29 
30   uint64_t StoreSize = DL.getTypeSizeInBits(StoredTy).getFixedSize();
31 
32   // The store size must be byte-aligned to support future type casts.
33   if (llvm::alignTo(StoreSize, 8) != StoreSize)
34     return false;
35 
36   // The store has to be at least as big as the load.
37   if (StoreSize < DL.getTypeSizeInBits(LoadTy).getFixedSize())
38     return false;
39 
40   bool StoredNI = DL.isNonIntegralPointerType(StoredTy->getScalarType());
41   bool LoadNI = DL.isNonIntegralPointerType(LoadTy->getScalarType());
42   // Don't coerce non-integral pointers to integers or vice versa.
43   if (StoredNI != LoadNI) {
44     // As a special case, allow coercion of memset used to initialize
45     // an array w/null.  Despite non-integral pointers not generally having a
46     // specific bit pattern, we do assume null is zero.
47     if (auto *CI = dyn_cast<Constant>(StoredVal))
48       return CI->isNullValue();
49     return false;
50   } else if (StoredNI && LoadNI &&
51              StoredTy->getPointerAddressSpace() !=
52                  LoadTy->getPointerAddressSpace()) {
53     return false;
54   }
55 
56 
57   // The implementation below uses inttoptr for vectors of unequal size; we
58   // can't allow this for non integral pointers. We could teach it to extract
59   // exact subvectors if desired.
60   if (StoredNI && StoreSize != DL.getTypeSizeInBits(LoadTy).getFixedSize())
61     return false;
62 
63   return true;
64 }
65 
66 template <class T, class HelperClass>
67 static T *coerceAvailableValueToLoadTypeHelper(T *StoredVal, Type *LoadedTy,
68                                                HelperClass &Helper,
69                                                const DataLayout &DL) {
70   assert(canCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, DL) &&
71          "precondition violation - materialization can't fail");
72   if (auto *C = dyn_cast<Constant>(StoredVal))
73     StoredVal = ConstantFoldConstant(C, DL);
74 
75   // If this is already the right type, just return it.
76   Type *StoredValTy = StoredVal->getType();
77 
78   uint64_t StoredValSize = DL.getTypeSizeInBits(StoredValTy).getFixedSize();
79   uint64_t LoadedValSize = DL.getTypeSizeInBits(LoadedTy).getFixedSize();
80 
81   // If the store and reload are the same size, we can always reuse it.
82   if (StoredValSize == LoadedValSize) {
83     // Pointer to Pointer -> use bitcast.
84     if (StoredValTy->isPtrOrPtrVectorTy() && LoadedTy->isPtrOrPtrVectorTy()) {
85       StoredVal = Helper.CreateBitCast(StoredVal, LoadedTy);
86     } else {
87       // Convert source pointers to integers, which can be bitcast.
88       if (StoredValTy->isPtrOrPtrVectorTy()) {
89         StoredValTy = DL.getIntPtrType(StoredValTy);
90         StoredVal = Helper.CreatePtrToInt(StoredVal, StoredValTy);
91       }
92 
93       Type *TypeToCastTo = LoadedTy;
94       if (TypeToCastTo->isPtrOrPtrVectorTy())
95         TypeToCastTo = DL.getIntPtrType(TypeToCastTo);
96 
97       if (StoredValTy != TypeToCastTo)
98         StoredVal = Helper.CreateBitCast(StoredVal, TypeToCastTo);
99 
100       // Cast to pointer if the load needs a pointer type.
101       if (LoadedTy->isPtrOrPtrVectorTy())
102         StoredVal = Helper.CreateIntToPtr(StoredVal, LoadedTy);
103     }
104 
105     if (auto *C = dyn_cast<ConstantExpr>(StoredVal))
106       StoredVal = ConstantFoldConstant(C, DL);
107 
108     return StoredVal;
109   }
110   // If the loaded value is smaller than the available value, then we can
111   // extract out a piece from it.  If the available value is too small, then we
112   // can't do anything.
113   assert(StoredValSize >= LoadedValSize &&
114          "canCoerceMustAliasedValueToLoad fail");
115 
116   // Convert source pointers to integers, which can be manipulated.
117   if (StoredValTy->isPtrOrPtrVectorTy()) {
118     StoredValTy = DL.getIntPtrType(StoredValTy);
119     StoredVal = Helper.CreatePtrToInt(StoredVal, StoredValTy);
120   }
121 
122   // Convert vectors and fp to integer, which can be manipulated.
123   if (!StoredValTy->isIntegerTy()) {
124     StoredValTy = IntegerType::get(StoredValTy->getContext(), StoredValSize);
125     StoredVal = Helper.CreateBitCast(StoredVal, StoredValTy);
126   }
127 
128   // If this is a big-endian system, we need to shift the value down to the low
129   // bits so that a truncate will work.
130   if (DL.isBigEndian()) {
131     uint64_t ShiftAmt = DL.getTypeStoreSizeInBits(StoredValTy).getFixedSize() -
132                         DL.getTypeStoreSizeInBits(LoadedTy).getFixedSize();
133     StoredVal = Helper.CreateLShr(
134         StoredVal, ConstantInt::get(StoredVal->getType(), ShiftAmt));
135   }
136 
137   // Truncate the integer to the right size now.
138   Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadedValSize);
139   StoredVal = Helper.CreateTruncOrBitCast(StoredVal, NewIntTy);
140 
141   if (LoadedTy != NewIntTy) {
142     // If the result is a pointer, inttoptr.
143     if (LoadedTy->isPtrOrPtrVectorTy())
144       StoredVal = Helper.CreateIntToPtr(StoredVal, LoadedTy);
145     else
146       // Otherwise, bitcast.
147       StoredVal = Helper.CreateBitCast(StoredVal, LoadedTy);
148   }
149 
150   if (auto *C = dyn_cast<Constant>(StoredVal))
151     StoredVal = ConstantFoldConstant(C, DL);
152 
153   return StoredVal;
154 }
155 
156 /// If we saw a store of a value to memory, and
157 /// then a load from a must-aliased pointer of a different type, try to coerce
158 /// the stored value.  LoadedTy is the type of the load we want to replace.
159 /// IRB is IRBuilder used to insert new instructions.
160 ///
161 /// If we can't do it, return null.
162 Value *coerceAvailableValueToLoadType(Value *StoredVal, Type *LoadedTy,
163                                       IRBuilderBase &IRB,
164                                       const DataLayout &DL) {
165   return coerceAvailableValueToLoadTypeHelper(StoredVal, LoadedTy, IRB, DL);
166 }
167 
168 /// This function is called when we have a memdep query of a load that ends up
169 /// being a clobbering memory write (store, memset, memcpy, memmove).  This
170 /// means that the write *may* provide bits used by the load but we can't be
171 /// sure because the pointers don't must-alias.
172 ///
173 /// Check this case to see if there is anything more we can do before we give
174 /// up.  This returns -1 if we have to give up, or a byte number in the stored
175 /// value of the piece that feeds the load.
176 static int analyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
177                                           Value *WritePtr,
178                                           uint64_t WriteSizeInBits,
179                                           const DataLayout &DL) {
180   // If the loaded/stored value is a first class array/struct, or scalable type,
181   // don't try to transform them. We need to be able to bitcast to integer.
182   if (isFirstClassAggregateOrScalableType(LoadTy))
183     return -1;
184 
185   int64_t StoreOffset = 0, LoadOffset = 0;
186   Value *StoreBase =
187       GetPointerBaseWithConstantOffset(WritePtr, StoreOffset, DL);
188   Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, DL);
189   if (StoreBase != LoadBase)
190     return -1;
191 
192   // If the load and store are to the exact same address, they should have been
193   // a must alias.  AA must have gotten confused.
194   // FIXME: Study to see if/when this happens.  One case is forwarding a memset
195   // to a load from the base of the memset.
196 
197   // If the load and store don't overlap at all, the store doesn't provide
198   // anything to the load.  In this case, they really don't alias at all, AA
199   // must have gotten confused.
200   uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy).getFixedSize();
201 
202   if ((WriteSizeInBits & 7) | (LoadSize & 7))
203     return -1;
204   uint64_t StoreSize = WriteSizeInBits / 8; // Convert to bytes.
205   LoadSize /= 8;
206 
207   bool isAAFailure = false;
208   if (StoreOffset < LoadOffset)
209     isAAFailure = StoreOffset + int64_t(StoreSize) <= LoadOffset;
210   else
211     isAAFailure = LoadOffset + int64_t(LoadSize) <= StoreOffset;
212 
213   if (isAAFailure)
214     return -1;
215 
216   // If the Load isn't completely contained within the stored bits, we don't
217   // have all the bits to feed it.  We could do something crazy in the future
218   // (issue a smaller load then merge the bits in) but this seems unlikely to be
219   // valuable.
220   if (StoreOffset > LoadOffset ||
221       StoreOffset + StoreSize < LoadOffset + LoadSize)
222     return -1;
223 
224   // Okay, we can do this transformation.  Return the number of bytes into the
225   // store that the load is.
226   return LoadOffset - StoreOffset;
227 }
228 
229 /// This function is called when we have a
230 /// memdep query of a load that ends up being a clobbering store.
231 int analyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr,
232                                    StoreInst *DepSI, const DataLayout &DL) {
233   auto *StoredVal = DepSI->getValueOperand();
234 
235   // Cannot handle reading from store of first-class aggregate or scalable type.
236   if (isFirstClassAggregateOrScalableType(StoredVal->getType()))
237     return -1;
238 
239   if (!canCoerceMustAliasedValueToLoad(StoredVal, LoadTy, DL))
240     return -1;
241 
242   Value *StorePtr = DepSI->getPointerOperand();
243   uint64_t StoreSize =
244       DL.getTypeSizeInBits(DepSI->getValueOperand()->getType()).getFixedSize();
245   return analyzeLoadFromClobberingWrite(LoadTy, LoadPtr, StorePtr, StoreSize,
246                                         DL);
247 }
248 
249 /// Looks at a memory location for a load (specified by MemLocBase, Offs, and
250 /// Size) and compares it against a load.
251 ///
252 /// If the specified load could be safely widened to a larger integer load
253 /// that is 1) still efficient, 2) safe for the target, and 3) would provide
254 /// the specified memory location value, then this function returns the size
255 /// in bytes of the load width to use.  If not, this returns zero.
256 static unsigned getLoadLoadClobberFullWidthSize(const Value *MemLocBase,
257                                                 int64_t MemLocOffs,
258                                                 unsigned MemLocSize,
259                                                 const LoadInst *LI) {
260   // We can only extend simple integer loads.
261   if (!isa<IntegerType>(LI->getType()) || !LI->isSimple())
262     return 0;
263 
264   // Load widening is hostile to ThreadSanitizer: it may cause false positives
265   // or make the reports more cryptic (access sizes are wrong).
266   if (LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread))
267     return 0;
268 
269   const DataLayout &DL = LI->getModule()->getDataLayout();
270 
271   // Get the base of this load.
272   int64_t LIOffs = 0;
273   const Value *LIBase =
274       GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, DL);
275 
276   // If the two pointers are not based on the same pointer, we can't tell that
277   // they are related.
278   if (LIBase != MemLocBase)
279     return 0;
280 
281   // Okay, the two values are based on the same pointer, but returned as
282   // no-alias.  This happens when we have things like two byte loads at "P+1"
283   // and "P+3".  Check to see if increasing the size of the "LI" load up to its
284   // alignment (or the largest native integer type) will allow us to load all
285   // the bits required by MemLoc.
286 
287   // If MemLoc is before LI, then no widening of LI will help us out.
288   if (MemLocOffs < LIOffs)
289     return 0;
290 
291   // Get the alignment of the load in bytes.  We assume that it is safe to load
292   // any legal integer up to this size without a problem.  For example, if we're
293   // looking at an i8 load on x86-32 that is known 1024 byte aligned, we can
294   // widen it up to an i32 load.  If it is known 2-byte aligned, we can widen it
295   // to i16.
296   unsigned LoadAlign = LI->getAlignment();
297 
298   int64_t MemLocEnd = MemLocOffs + MemLocSize;
299 
300   // If no amount of rounding up will let MemLoc fit into LI, then bail out.
301   if (LIOffs + LoadAlign < MemLocEnd)
302     return 0;
303 
304   // This is the size of the load to try.  Start with the next larger power of
305   // two.
306   unsigned NewLoadByteSize = LI->getType()->getPrimitiveSizeInBits() / 8U;
307   NewLoadByteSize = NextPowerOf2(NewLoadByteSize);
308 
309   while (true) {
310     // If this load size is bigger than our known alignment or would not fit
311     // into a native integer register, then we fail.
312     if (NewLoadByteSize > LoadAlign ||
313         !DL.fitsInLegalInteger(NewLoadByteSize * 8))
314       return 0;
315 
316     if (LIOffs + NewLoadByteSize > MemLocEnd &&
317         (LI->getParent()->getParent()->hasFnAttribute(
318              Attribute::SanitizeAddress) ||
319          LI->getParent()->getParent()->hasFnAttribute(
320              Attribute::SanitizeHWAddress)))
321       // We will be reading past the location accessed by the original program.
322       // While this is safe in a regular build, Address Safety analysis tools
323       // may start reporting false warnings. So, don't do widening.
324       return 0;
325 
326     // If a load of this width would include all of MemLoc, then we succeed.
327     if (LIOffs + NewLoadByteSize >= MemLocEnd)
328       return NewLoadByteSize;
329 
330     NewLoadByteSize <<= 1;
331   }
332 }
333 
334 /// This function is called when we have a
335 /// memdep query of a load that ends up being clobbered by another load.  See if
336 /// the other load can feed into the second load.
337 int analyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI,
338                                   const DataLayout &DL) {
339   // Cannot handle reading from store of first-class aggregate yet.
340   if (DepLI->getType()->isStructTy() || DepLI->getType()->isArrayTy())
341     return -1;
342 
343   if (!canCoerceMustAliasedValueToLoad(DepLI, LoadTy, DL))
344     return -1;
345 
346   Value *DepPtr = DepLI->getPointerOperand();
347   uint64_t DepSize = DL.getTypeSizeInBits(DepLI->getType()).getFixedSize();
348   int R = analyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, DepSize, DL);
349   if (R != -1)
350     return R;
351 
352   // If we have a load/load clobber an DepLI can be widened to cover this load,
353   // then we should widen it!
354   int64_t LoadOffs = 0;
355   const Value *LoadBase =
356       GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, DL);
357   unsigned LoadSize = DL.getTypeStoreSize(LoadTy).getFixedSize();
358 
359   unsigned Size =
360       getLoadLoadClobberFullWidthSize(LoadBase, LoadOffs, LoadSize, DepLI);
361   if (Size == 0)
362     return -1;
363 
364   // Check non-obvious conditions enforced by MDA which we rely on for being
365   // able to materialize this potentially available value
366   assert(DepLI->isSimple() && "Cannot widen volatile/atomic load!");
367   assert(DepLI->getType()->isIntegerTy() && "Can't widen non-integer load");
368 
369   return analyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, Size * 8, DL);
370 }
371 
372 int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
373                                      MemIntrinsic *MI, const DataLayout &DL) {
374   // If the mem operation is a non-constant size, we can't handle it.
375   ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength());
376   if (!SizeCst)
377     return -1;
378   uint64_t MemSizeInBits = SizeCst->getZExtValue() * 8;
379 
380   // If this is memset, we just need to see if the offset is valid in the size
381   // of the memset..
382   if (MI->getIntrinsicID() == Intrinsic::memset) {
383     if (DL.isNonIntegralPointerType(LoadTy->getScalarType())) {
384       auto *CI = dyn_cast<ConstantInt>(cast<MemSetInst>(MI)->getValue());
385       if (!CI || !CI->isZero())
386         return -1;
387     }
388     return analyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(),
389                                           MemSizeInBits, DL);
390   }
391 
392   // If we have a memcpy/memmove, the only case we can handle is if this is a
393   // copy from constant memory.  In that case, we can read directly from the
394   // constant memory.
395   MemTransferInst *MTI = cast<MemTransferInst>(MI);
396 
397   Constant *Src = dyn_cast<Constant>(MTI->getSource());
398   if (!Src)
399     return -1;
400 
401   GlobalVariable *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(Src));
402   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
403     return -1;
404 
405   // See if the access is within the bounds of the transfer.
406   int Offset = analyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(),
407                                               MemSizeInBits, DL);
408   if (Offset == -1)
409     return Offset;
410 
411   unsigned AS = Src->getType()->getPointerAddressSpace();
412   // Otherwise, see if we can constant fold a load from the constant with the
413   // offset applied as appropriate.
414   if (Offset) {
415     Src = ConstantExpr::getBitCast(Src,
416                                    Type::getInt8PtrTy(Src->getContext(), AS));
417     Constant *OffsetCst =
418         ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
419     Src = ConstantExpr::getGetElementPtr(Type::getInt8Ty(Src->getContext()),
420                                          Src, OffsetCst);
421   }
422   Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS));
423   if (ConstantFoldLoadFromConstPtr(Src, LoadTy, DL))
424     return Offset;
425   return -1;
426 }
427 
428 template <class T, class HelperClass>
429 static T *getStoreValueForLoadHelper(T *SrcVal, unsigned Offset, Type *LoadTy,
430                                      HelperClass &Helper,
431                                      const DataLayout &DL) {
432   LLVMContext &Ctx = SrcVal->getType()->getContext();
433 
434   // If two pointers are in the same address space, they have the same size,
435   // so we don't need to do any truncation, etc. This avoids introducing
436   // ptrtoint instructions for pointers that may be non-integral.
437   if (SrcVal->getType()->isPointerTy() && LoadTy->isPointerTy() &&
438       cast<PointerType>(SrcVal->getType())->getAddressSpace() ==
439           cast<PointerType>(LoadTy)->getAddressSpace()) {
440     return SrcVal;
441   }
442 
443   uint64_t StoreSize =
444       (DL.getTypeSizeInBits(SrcVal->getType()).getFixedSize() + 7) / 8;
445   uint64_t LoadSize = (DL.getTypeSizeInBits(LoadTy).getFixedSize() + 7) / 8;
446   // Compute which bits of the stored value are being used by the load.  Convert
447   // to an integer type to start with.
448   if (SrcVal->getType()->isPtrOrPtrVectorTy())
449     SrcVal = Helper.CreatePtrToInt(SrcVal, DL.getIntPtrType(SrcVal->getType()));
450   if (!SrcVal->getType()->isIntegerTy())
451     SrcVal = Helper.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize * 8));
452 
453   // Shift the bits to the least significant depending on endianness.
454   unsigned ShiftAmt;
455   if (DL.isLittleEndian())
456     ShiftAmt = Offset * 8;
457   else
458     ShiftAmt = (StoreSize - LoadSize - Offset) * 8;
459   if (ShiftAmt)
460     SrcVal = Helper.CreateLShr(SrcVal,
461                                ConstantInt::get(SrcVal->getType(), ShiftAmt));
462 
463   if (LoadSize != StoreSize)
464     SrcVal = Helper.CreateTruncOrBitCast(SrcVal,
465                                          IntegerType::get(Ctx, LoadSize * 8));
466   return SrcVal;
467 }
468 
469 /// This function is called when we have a memdep query of a load that ends up
470 /// being a clobbering store.  This means that the store provides bits used by
471 /// the load but the pointers don't must-alias.  Check this case to see if
472 /// there is anything more we can do before we give up.
473 Value *getStoreValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy,
474                             Instruction *InsertPt, const DataLayout &DL) {
475 
476   IRBuilder<> Builder(InsertPt);
477   SrcVal = getStoreValueForLoadHelper(SrcVal, Offset, LoadTy, Builder, DL);
478   return coerceAvailableValueToLoadTypeHelper(SrcVal, LoadTy, Builder, DL);
479 }
480 
481 Constant *getConstantStoreValueForLoad(Constant *SrcVal, unsigned Offset,
482                                        Type *LoadTy, const DataLayout &DL) {
483   ConstantFolder F;
484   SrcVal = getStoreValueForLoadHelper(SrcVal, Offset, LoadTy, F, DL);
485   return coerceAvailableValueToLoadTypeHelper(SrcVal, LoadTy, F, DL);
486 }
487 
488 /// This function is called when we have a memdep query of a load that ends up
489 /// being a clobbering load.  This means that the load *may* provide bits used
490 /// by the load but we can't be sure because the pointers don't must-alias.
491 /// Check this case to see if there is anything more we can do before we give
492 /// up.
493 Value *getLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, Type *LoadTy,
494                            Instruction *InsertPt, const DataLayout &DL) {
495   // If Offset+LoadTy exceeds the size of SrcVal, then we must be wanting to
496   // widen SrcVal out to a larger load.
497   unsigned SrcValStoreSize =
498       DL.getTypeStoreSize(SrcVal->getType()).getFixedSize();
499   unsigned LoadSize = DL.getTypeStoreSize(LoadTy).getFixedSize();
500   if (Offset + LoadSize > SrcValStoreSize) {
501     assert(SrcVal->isSimple() && "Cannot widen volatile/atomic load!");
502     assert(SrcVal->getType()->isIntegerTy() && "Can't widen non-integer load");
503     // If we have a load/load clobber an DepLI can be widened to cover this
504     // load, then we should widen it to the next power of 2 size big enough!
505     unsigned NewLoadSize = Offset + LoadSize;
506     if (!isPowerOf2_32(NewLoadSize))
507       NewLoadSize = NextPowerOf2(NewLoadSize);
508 
509     Value *PtrVal = SrcVal->getPointerOperand();
510     // Insert the new load after the old load.  This ensures that subsequent
511     // memdep queries will find the new load.  We can't easily remove the old
512     // load completely because it is already in the value numbering table.
513     IRBuilder<> Builder(SrcVal->getParent(), ++BasicBlock::iterator(SrcVal));
514     Type *DestTy = IntegerType::get(LoadTy->getContext(), NewLoadSize * 8);
515     Type *DestPTy =
516         PointerType::get(DestTy, PtrVal->getType()->getPointerAddressSpace());
517     Builder.SetCurrentDebugLocation(SrcVal->getDebugLoc());
518     PtrVal = Builder.CreateBitCast(PtrVal, DestPTy);
519     LoadInst *NewLoad = Builder.CreateLoad(DestTy, PtrVal);
520     NewLoad->takeName(SrcVal);
521     NewLoad->setAlignment(SrcVal->getAlign());
522 
523     LLVM_DEBUG(dbgs() << "GVN WIDENED LOAD: " << *SrcVal << "\n");
524     LLVM_DEBUG(dbgs() << "TO: " << *NewLoad << "\n");
525 
526     // Replace uses of the original load with the wider load.  On a big endian
527     // system, we need to shift down to get the relevant bits.
528     Value *RV = NewLoad;
529     if (DL.isBigEndian())
530       RV = Builder.CreateLShr(RV, (NewLoadSize - SrcValStoreSize) * 8);
531     RV = Builder.CreateTrunc(RV, SrcVal->getType());
532     SrcVal->replaceAllUsesWith(RV);
533 
534     SrcVal = NewLoad;
535   }
536 
537   return getStoreValueForLoad(SrcVal, Offset, LoadTy, InsertPt, DL);
538 }
539 
540 Constant *getConstantLoadValueForLoad(Constant *SrcVal, unsigned Offset,
541                                       Type *LoadTy, const DataLayout &DL) {
542   unsigned SrcValStoreSize =
543       DL.getTypeStoreSize(SrcVal->getType()).getFixedSize();
544   unsigned LoadSize = DL.getTypeStoreSize(LoadTy).getFixedSize();
545   if (Offset + LoadSize > SrcValStoreSize)
546     return nullptr;
547   return getConstantStoreValueForLoad(SrcVal, Offset, LoadTy, DL);
548 }
549 
550 template <class T, class HelperClass>
551 T *getMemInstValueForLoadHelper(MemIntrinsic *SrcInst, unsigned Offset,
552                                 Type *LoadTy, HelperClass &Helper,
553                                 const DataLayout &DL) {
554   LLVMContext &Ctx = LoadTy->getContext();
555   uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy).getFixedSize() / 8;
556 
557   // We know that this method is only called when the mem transfer fully
558   // provides the bits for the load.
559   if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) {
560     // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and
561     // independently of what the offset is.
562     T *Val = cast<T>(MSI->getValue());
563     if (LoadSize != 1)
564       Val =
565           Helper.CreateZExtOrBitCast(Val, IntegerType::get(Ctx, LoadSize * 8));
566     T *OneElt = Val;
567 
568     // Splat the value out to the right number of bits.
569     for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize;) {
570       // If we can double the number of bytes set, do it.
571       if (NumBytesSet * 2 <= LoadSize) {
572         T *ShVal = Helper.CreateShl(
573             Val, ConstantInt::get(Val->getType(), NumBytesSet * 8));
574         Val = Helper.CreateOr(Val, ShVal);
575         NumBytesSet <<= 1;
576         continue;
577       }
578 
579       // Otherwise insert one byte at a time.
580       T *ShVal = Helper.CreateShl(Val, ConstantInt::get(Val->getType(), 1 * 8));
581       Val = Helper.CreateOr(OneElt, ShVal);
582       ++NumBytesSet;
583     }
584 
585     return coerceAvailableValueToLoadTypeHelper(Val, LoadTy, Helper, DL);
586   }
587 
588   // Otherwise, this is a memcpy/memmove from a constant global.
589   MemTransferInst *MTI = cast<MemTransferInst>(SrcInst);
590   Constant *Src = cast<Constant>(MTI->getSource());
591 
592   unsigned AS = Src->getType()->getPointerAddressSpace();
593   // Otherwise, see if we can constant fold a load from the constant with the
594   // offset applied as appropriate.
595   if (Offset) {
596     Src = ConstantExpr::getBitCast(Src,
597                                    Type::getInt8PtrTy(Src->getContext(), AS));
598     Constant *OffsetCst =
599         ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
600     Src = ConstantExpr::getGetElementPtr(Type::getInt8Ty(Src->getContext()),
601                                          Src, OffsetCst);
602   }
603   Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS));
604   return ConstantFoldLoadFromConstPtr(Src, LoadTy, DL);
605 }
606 
607 /// This function is called when we have a
608 /// memdep query of a load that ends up being a clobbering mem intrinsic.
609 Value *getMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
610                               Type *LoadTy, Instruction *InsertPt,
611                               const DataLayout &DL) {
612   IRBuilder<> Builder(InsertPt);
613   return getMemInstValueForLoadHelper<Value, IRBuilder<>>(SrcInst, Offset,
614                                                           LoadTy, Builder, DL);
615 }
616 
617 Constant *getConstantMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
618                                          Type *LoadTy, const DataLayout &DL) {
619   // The only case analyzeLoadFromClobberingMemInst cannot be converted to a
620   // constant is when it's a memset of a non-constant.
621   if (auto *MSI = dyn_cast<MemSetInst>(SrcInst))
622     if (!isa<Constant>(MSI->getValue()))
623       return nullptr;
624   ConstantFolder F;
625   return getMemInstValueForLoadHelper<Constant, ConstantFolder>(SrcInst, Offset,
626                                                                 LoadTy, F, DL);
627 }
628 } // namespace VNCoercion
629 } // namespace llvm
630