1 //===-- CodeGenTBAA.cpp - TBAA information for LLVM CodeGen ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the code that manages TBAA information and defines the TBAA policy
10 // for the optimizer to use. Relevant standards text includes:
11 //
12 // C99 6.5p7
13 // C++ [basic.lval] (p10 in n3126, p15 in some earlier versions)
14 //
15 //===----------------------------------------------------------------------===//
16
17 #include "CodeGenTBAA.h"
18 #include "ABIInfoImpl.h"
19 #include "CGCXXABI.h"
20 #include "CGRecordLayout.h"
21 #include "CodeGenTypes.h"
22 #include "clang/AST/ASTContext.h"
23 #include "clang/AST/Attr.h"
24 #include "clang/AST/Mangle.h"
25 #include "clang/AST/RecordLayout.h"
26 #include "clang/Basic/CodeGenOptions.h"
27 #include "clang/Basic/TargetInfo.h"
28 #include "llvm/ADT/SmallSet.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/LLVMContext.h"
31 #include "llvm/IR/Metadata.h"
32 #include "llvm/IR/Module.h"
33 #include "llvm/IR/Type.h"
34 #include "llvm/Support/Debug.h"
35 using namespace clang;
36 using namespace CodeGen;
37
CodeGenTBAA(ASTContext & Ctx,CodeGenTypes & CGTypes,llvm::Module & M,const CodeGenOptions & CGO,const LangOptions & Features)38 CodeGenTBAA::CodeGenTBAA(ASTContext &Ctx, CodeGenTypes &CGTypes,
39 llvm::Module &M, const CodeGenOptions &CGO,
40 const LangOptions &Features)
41 : Context(Ctx), CGTypes(CGTypes), Module(M), CodeGenOpts(CGO),
42 Features(Features), MDHelper(M.getContext()), Root(nullptr),
43 Char(nullptr) {}
44
~CodeGenTBAA()45 CodeGenTBAA::~CodeGenTBAA() {
46 }
47
getRoot()48 llvm::MDNode *CodeGenTBAA::getRoot() {
49 // Define the root of the tree. This identifies the tree, so that
50 // if our LLVM IR is linked with LLVM IR from a different front-end
51 // (or a different version of this front-end), their TBAA trees will
52 // remain distinct, and the optimizer will treat them conservatively.
53 if (!Root) {
54 if (Features.CPlusPlus)
55 Root = MDHelper.createTBAARoot("Simple C++ TBAA");
56 else
57 Root = MDHelper.createTBAARoot("Simple C/C++ TBAA");
58 }
59
60 return Root;
61 }
62
createScalarTypeNode(StringRef Name,llvm::MDNode * Parent,uint64_t Size)63 llvm::MDNode *CodeGenTBAA::createScalarTypeNode(StringRef Name,
64 llvm::MDNode *Parent,
65 uint64_t Size) {
66 if (CodeGenOpts.NewStructPathTBAA) {
67 llvm::Metadata *Id = MDHelper.createString(Name);
68 return MDHelper.createTBAATypeNode(Parent, Size, Id);
69 }
70 return MDHelper.createTBAAScalarTypeNode(Name, Parent);
71 }
72
getChar()73 llvm::MDNode *CodeGenTBAA::getChar() {
74 // Define the root of the tree for user-accessible memory. C and C++
75 // give special powers to char and certain similar types. However,
76 // these special powers only cover user-accessible memory, and doesn't
77 // include things like vtables.
78 if (!Char)
79 Char = createScalarTypeNode("omnipotent char", getRoot(), /* Size= */ 1);
80
81 return Char;
82 }
83
TypeHasMayAlias(QualType QTy)84 static bool TypeHasMayAlias(QualType QTy) {
85 // Tagged types have declarations, and therefore may have attributes.
86 if (auto *TD = QTy->getAsTagDecl())
87 if (TD->hasAttr<MayAliasAttr>())
88 return true;
89
90 // Also look for may_alias as a declaration attribute on a typedef.
91 // FIXME: We should follow GCC and model may_alias as a type attribute
92 // rather than as a declaration attribute.
93 while (auto *TT = QTy->getAs<TypedefType>()) {
94 if (TT->getDecl()->hasAttr<MayAliasAttr>())
95 return true;
96 QTy = TT->desugar();
97 }
98 return false;
99 }
100
101 /// Check if the given type is a valid base type to be used in access tags.
isValidBaseType(QualType QTy)102 static bool isValidBaseType(QualType QTy) {
103 if (const RecordType *TTy = QTy->getAs<RecordType>()) {
104 const RecordDecl *RD = TTy->getDecl()->getDefinition();
105 // Incomplete types are not valid base access types.
106 if (!RD)
107 return false;
108 if (RD->hasFlexibleArrayMember())
109 return false;
110 // RD can be struct, union, class, interface or enum.
111 // For now, we only handle struct and class.
112 if (RD->isStruct() || RD->isClass())
113 return true;
114 }
115 return false;
116 }
117
getTypeInfoHelper(const Type * Ty)118 llvm::MDNode *CodeGenTBAA::getTypeInfoHelper(const Type *Ty) {
119 uint64_t Size = Context.getTypeSizeInChars(Ty).getQuantity();
120
121 // Handle builtin types.
122 if (const BuiltinType *BTy = dyn_cast<BuiltinType>(Ty)) {
123 switch (BTy->getKind()) {
124 // Character types are special and can alias anything.
125 // In C++, this technically only includes "char" and "unsigned char",
126 // and not "signed char". In C, it includes all three. For now,
127 // the risk of exploiting this detail in C++ seems likely to outweigh
128 // the benefit.
129 case BuiltinType::Char_U:
130 case BuiltinType::Char_S:
131 case BuiltinType::UChar:
132 case BuiltinType::SChar:
133 return getChar();
134
135 // Unsigned types can alias their corresponding signed types.
136 case BuiltinType::UShort:
137 return getTypeInfo(Context.ShortTy);
138 case BuiltinType::UInt:
139 return getTypeInfo(Context.IntTy);
140 case BuiltinType::ULong:
141 return getTypeInfo(Context.LongTy);
142 case BuiltinType::ULongLong:
143 return getTypeInfo(Context.LongLongTy);
144 case BuiltinType::UInt128:
145 return getTypeInfo(Context.Int128Ty);
146
147 case BuiltinType::UShortFract:
148 return getTypeInfo(Context.ShortFractTy);
149 case BuiltinType::UFract:
150 return getTypeInfo(Context.FractTy);
151 case BuiltinType::ULongFract:
152 return getTypeInfo(Context.LongFractTy);
153
154 case BuiltinType::SatUShortFract:
155 return getTypeInfo(Context.SatShortFractTy);
156 case BuiltinType::SatUFract:
157 return getTypeInfo(Context.SatFractTy);
158 case BuiltinType::SatULongFract:
159 return getTypeInfo(Context.SatLongFractTy);
160
161 case BuiltinType::UShortAccum:
162 return getTypeInfo(Context.ShortAccumTy);
163 case BuiltinType::UAccum:
164 return getTypeInfo(Context.AccumTy);
165 case BuiltinType::ULongAccum:
166 return getTypeInfo(Context.LongAccumTy);
167
168 case BuiltinType::SatUShortAccum:
169 return getTypeInfo(Context.SatShortAccumTy);
170 case BuiltinType::SatUAccum:
171 return getTypeInfo(Context.SatAccumTy);
172 case BuiltinType::SatULongAccum:
173 return getTypeInfo(Context.SatLongAccumTy);
174
175 // Treat all other builtin types as distinct types. This includes
176 // treating wchar_t, char16_t, and char32_t as distinct from their
177 // "underlying types".
178 default:
179 return createScalarTypeNode(BTy->getName(Features), getChar(), Size);
180 }
181 }
182
183 // C++1z [basic.lval]p10: "If a program attempts to access the stored value of
184 // an object through a glvalue of other than one of the following types the
185 // behavior is undefined: [...] a char, unsigned char, or std::byte type."
186 if (Ty->isStdByteType())
187 return getChar();
188
189 // Handle pointers and references.
190 //
191 // C has a very strict rule for pointer aliasing. C23 6.7.6.1p2:
192 // For two pointer types to be compatible, both shall be identically
193 // qualified and both shall be pointers to compatible types.
194 //
195 // This rule is impractically strict; we want to at least ignore CVR
196 // qualifiers. Distinguishing by CVR qualifiers would make it UB to
197 // e.g. cast a `char **` to `const char * const *` and dereference it,
198 // which is too common and useful to invalidate. C++'s similar types
199 // rule permits qualifier differences in these nested positions; in fact,
200 // C++ even allows that cast as an implicit conversion.
201 //
202 // Other qualifiers could theoretically be distinguished, especially if
203 // they involve a significant representation difference. We don't
204 // currently do so, however.
205 //
206 // Computing the pointee type string recursively is implicitly more
207 // forgiving than the standards require. Effectively, we are turning
208 // the question "are these types compatible/similar" into "are
209 // accesses to these types allowed to alias". In both C and C++,
210 // the latter question has special carve-outs for signedness
211 // mismatches that only apply at the top level. As a result, we are
212 // allowing e.g. `int *` l-values to access `unsigned *` objects.
213 if (Ty->isPointerType() || Ty->isReferenceType()) {
214 llvm::MDNode *AnyPtr = createScalarTypeNode("any pointer", getChar(), Size);
215 if (!CodeGenOpts.PointerTBAA)
216 return AnyPtr;
217 // Compute the depth of the pointer and generate a tag of the form "p<depth>
218 // <base type tag>".
219 unsigned PtrDepth = 0;
220 do {
221 PtrDepth++;
222 Ty = Ty->getPointeeType().getTypePtr();
223 } while (Ty->isPointerType());
224 // TODO: Implement C++'s type "similarity" and consider dis-"similar"
225 // pointers distinct for non-builtin types.
226 if (isa<BuiltinType>(Ty)) {
227 llvm::MDNode *ScalarMD = getTypeInfoHelper(Ty);
228 StringRef Name =
229 cast<llvm::MDString>(
230 ScalarMD->getOperand(CodeGenOpts.NewStructPathTBAA ? 2 : 0))
231 ->getString();
232 SmallString<256> OutName("p");
233 OutName += std::to_string(PtrDepth);
234 OutName += " ";
235 OutName += Name;
236 return createScalarTypeNode(OutName, AnyPtr, Size);
237 }
238 return AnyPtr;
239 }
240
241 // Accesses to arrays are accesses to objects of their element types.
242 if (CodeGenOpts.NewStructPathTBAA && Ty->isArrayType())
243 return getTypeInfo(cast<ArrayType>(Ty)->getElementType());
244
245 // Enum types are distinct types. In C++ they have "underlying types",
246 // however they aren't related for TBAA.
247 if (const EnumType *ETy = dyn_cast<EnumType>(Ty)) {
248 if (!Features.CPlusPlus)
249 return getTypeInfo(ETy->getDecl()->getIntegerType());
250
251 // In C++ mode, types have linkage, so we can rely on the ODR and
252 // on their mangled names, if they're external.
253 // TODO: Is there a way to get a program-wide unique name for a
254 // decl with local linkage or no linkage?
255 if (!ETy->getDecl()->isExternallyVisible())
256 return getChar();
257
258 SmallString<256> OutName;
259 llvm::raw_svector_ostream Out(OutName);
260 CGTypes.getCXXABI().getMangleContext().mangleCanonicalTypeName(
261 QualType(ETy, 0), Out);
262 return createScalarTypeNode(OutName, getChar(), Size);
263 }
264
265 if (const auto *EIT = dyn_cast<BitIntType>(Ty)) {
266 SmallString<256> OutName;
267 llvm::raw_svector_ostream Out(OutName);
268 // Don't specify signed/unsigned since integer types can alias despite sign
269 // differences.
270 Out << "_BitInt(" << EIT->getNumBits() << ')';
271 return createScalarTypeNode(OutName, getChar(), Size);
272 }
273
274 // For now, handle any other kind of type conservatively.
275 return getChar();
276 }
277
getTypeInfo(QualType QTy)278 llvm::MDNode *CodeGenTBAA::getTypeInfo(QualType QTy) {
279 // At -O0 or relaxed aliasing, TBAA is not emitted for regular types.
280 if (CodeGenOpts.OptimizationLevel == 0 || CodeGenOpts.RelaxedAliasing)
281 return nullptr;
282
283 // If the type has the may_alias attribute (even on a typedef), it is
284 // effectively in the general char alias class.
285 if (TypeHasMayAlias(QTy))
286 return getChar();
287
288 // We need this function to not fall back to returning the "omnipotent char"
289 // type node for aggregate and union types. Otherwise, any dereference of an
290 // aggregate will result into the may-alias access descriptor, meaning all
291 // subsequent accesses to direct and indirect members of that aggregate will
292 // be considered may-alias too.
293 // TODO: Combine getTypeInfo() and getValidBaseTypeInfo() into a single
294 // function.
295 if (isValidBaseType(QTy))
296 return getValidBaseTypeInfo(QTy);
297
298 const Type *Ty = Context.getCanonicalType(QTy).getTypePtr();
299 if (llvm::MDNode *N = MetadataCache[Ty])
300 return N;
301
302 // Note that the following helper call is allowed to add new nodes to the
303 // cache, which invalidates all its previously obtained iterators. So we
304 // first generate the node for the type and then add that node to the cache.
305 llvm::MDNode *TypeNode = getTypeInfoHelper(Ty);
306 return MetadataCache[Ty] = TypeNode;
307 }
308
getAccessInfo(QualType AccessType)309 TBAAAccessInfo CodeGenTBAA::getAccessInfo(QualType AccessType) {
310 // Pointee values may have incomplete types, but they shall never be
311 // dereferenced.
312 if (AccessType->isIncompleteType())
313 return TBAAAccessInfo::getIncompleteInfo();
314
315 if (TypeHasMayAlias(AccessType))
316 return TBAAAccessInfo::getMayAliasInfo();
317
318 uint64_t Size = Context.getTypeSizeInChars(AccessType).getQuantity();
319 return TBAAAccessInfo(getTypeInfo(AccessType), Size);
320 }
321
getVTablePtrAccessInfo(llvm::Type * VTablePtrType)322 TBAAAccessInfo CodeGenTBAA::getVTablePtrAccessInfo(llvm::Type *VTablePtrType) {
323 llvm::DataLayout DL(&Module);
324 unsigned Size = DL.getPointerTypeSize(VTablePtrType);
325 return TBAAAccessInfo(createScalarTypeNode("vtable pointer", getRoot(), Size),
326 Size);
327 }
328
329 bool
CollectFields(uint64_t BaseOffset,QualType QTy,SmallVectorImpl<llvm::MDBuilder::TBAAStructField> & Fields,bool MayAlias)330 CodeGenTBAA::CollectFields(uint64_t BaseOffset,
331 QualType QTy,
332 SmallVectorImpl<llvm::MDBuilder::TBAAStructField> &
333 Fields,
334 bool MayAlias) {
335 /* Things not handled yet include: C++ base classes, bitfields, */
336
337 if (const RecordType *TTy = QTy->getAs<RecordType>()) {
338 if (TTy->isUnionType()) {
339 uint64_t Size = Context.getTypeSizeInChars(QTy).getQuantity();
340 llvm::MDNode *TBAAType = getChar();
341 llvm::MDNode *TBAATag = getAccessTagInfo(TBAAAccessInfo(TBAAType, Size));
342 Fields.push_back(
343 llvm::MDBuilder::TBAAStructField(BaseOffset, Size, TBAATag));
344 return true;
345 }
346 const RecordDecl *RD = TTy->getDecl()->getDefinition();
347 if (RD->hasFlexibleArrayMember())
348 return false;
349
350 // TODO: Handle C++ base classes.
351 if (const CXXRecordDecl *Decl = dyn_cast<CXXRecordDecl>(RD))
352 if (Decl->bases_begin() != Decl->bases_end())
353 return false;
354
355 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
356 const CGRecordLayout &CGRL = CGTypes.getCGRecordLayout(RD);
357
358 unsigned idx = 0;
359 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
360 i != e; ++i, ++idx) {
361 if (isEmptyFieldForLayout(Context, *i))
362 continue;
363
364 uint64_t Offset =
365 BaseOffset + Layout.getFieldOffset(idx) / Context.getCharWidth();
366
367 // Create a single field for consecutive named bitfields using char as
368 // base type.
369 if ((*i)->isBitField()) {
370 const CGBitFieldInfo &Info = CGRL.getBitFieldInfo(*i);
371 // For big endian targets the first bitfield in the consecutive run is
372 // at the most-significant end; see CGRecordLowering::setBitFieldInfo
373 // for more information.
374 bool IsBE = Context.getTargetInfo().isBigEndian();
375 bool IsFirst = IsBE ? Info.StorageSize - (Info.Offset + Info.Size) == 0
376 : Info.Offset == 0;
377 if (!IsFirst)
378 continue;
379 unsigned CurrentBitFieldSize = Info.StorageSize;
380 uint64_t Size =
381 llvm::divideCeil(CurrentBitFieldSize, Context.getCharWidth());
382 llvm::MDNode *TBAAType = getChar();
383 llvm::MDNode *TBAATag =
384 getAccessTagInfo(TBAAAccessInfo(TBAAType, Size));
385 Fields.push_back(
386 llvm::MDBuilder::TBAAStructField(Offset, Size, TBAATag));
387 continue;
388 }
389
390 QualType FieldQTy = i->getType();
391 if (!CollectFields(Offset, FieldQTy, Fields,
392 MayAlias || TypeHasMayAlias(FieldQTy)))
393 return false;
394 }
395 return true;
396 }
397
398 /* Otherwise, treat whatever it is as a field. */
399 uint64_t Offset = BaseOffset;
400 uint64_t Size = Context.getTypeSizeInChars(QTy).getQuantity();
401 llvm::MDNode *TBAAType = MayAlias ? getChar() : getTypeInfo(QTy);
402 llvm::MDNode *TBAATag = getAccessTagInfo(TBAAAccessInfo(TBAAType, Size));
403 Fields.push_back(llvm::MDBuilder::TBAAStructField(Offset, Size, TBAATag));
404 return true;
405 }
406
407 llvm::MDNode *
getTBAAStructInfo(QualType QTy)408 CodeGenTBAA::getTBAAStructInfo(QualType QTy) {
409 if (CodeGenOpts.OptimizationLevel == 0 || CodeGenOpts.RelaxedAliasing)
410 return nullptr;
411
412 const Type *Ty = Context.getCanonicalType(QTy).getTypePtr();
413
414 if (llvm::MDNode *N = StructMetadataCache[Ty])
415 return N;
416
417 SmallVector<llvm::MDBuilder::TBAAStructField, 4> Fields;
418 if (CollectFields(0, QTy, Fields, TypeHasMayAlias(QTy)))
419 return MDHelper.createTBAAStructNode(Fields);
420
421 // For now, handle any other kind of type conservatively.
422 return StructMetadataCache[Ty] = nullptr;
423 }
424
getBaseTypeInfoHelper(const Type * Ty)425 llvm::MDNode *CodeGenTBAA::getBaseTypeInfoHelper(const Type *Ty) {
426 if (auto *TTy = dyn_cast<RecordType>(Ty)) {
427 const RecordDecl *RD = TTy->getDecl()->getDefinition();
428 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
429 using TBAAStructField = llvm::MDBuilder::TBAAStructField;
430 SmallVector<TBAAStructField, 4> Fields;
431 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
432 // Handle C++ base classes. Non-virtual bases can treated a kind of
433 // field. Virtual bases are more complex and omitted, but avoid an
434 // incomplete view for NewStructPathTBAA.
435 if (CodeGenOpts.NewStructPathTBAA && CXXRD->getNumVBases() != 0)
436 return nullptr;
437 for (const CXXBaseSpecifier &B : CXXRD->bases()) {
438 if (B.isVirtual())
439 continue;
440 QualType BaseQTy = B.getType();
441 const CXXRecordDecl *BaseRD = BaseQTy->getAsCXXRecordDecl();
442 if (BaseRD->isEmpty())
443 continue;
444 llvm::MDNode *TypeNode = isValidBaseType(BaseQTy)
445 ? getValidBaseTypeInfo(BaseQTy)
446 : getTypeInfo(BaseQTy);
447 if (!TypeNode)
448 return nullptr;
449 uint64_t Offset = Layout.getBaseClassOffset(BaseRD).getQuantity();
450 uint64_t Size =
451 Context.getASTRecordLayout(BaseRD).getDataSize().getQuantity();
452 Fields.push_back(
453 llvm::MDBuilder::TBAAStructField(Offset, Size, TypeNode));
454 }
455 // The order in which base class subobjects are allocated is unspecified,
456 // so may differ from declaration order. In particular, Itanium ABI will
457 // allocate a primary base first.
458 // Since we exclude empty subobjects, the objects are not overlapping and
459 // their offsets are unique.
460 llvm::sort(Fields,
461 [](const TBAAStructField &A, const TBAAStructField &B) {
462 return A.Offset < B.Offset;
463 });
464 }
465 for (FieldDecl *Field : RD->fields()) {
466 if (Field->isZeroSize(Context) || Field->isUnnamedBitField())
467 continue;
468 QualType FieldQTy = Field->getType();
469 llvm::MDNode *TypeNode = isValidBaseType(FieldQTy)
470 ? getValidBaseTypeInfo(FieldQTy)
471 : getTypeInfo(FieldQTy);
472 if (!TypeNode)
473 return nullptr;
474
475 uint64_t BitOffset = Layout.getFieldOffset(Field->getFieldIndex());
476 uint64_t Offset = Context.toCharUnitsFromBits(BitOffset).getQuantity();
477 uint64_t Size = Context.getTypeSizeInChars(FieldQTy).getQuantity();
478 Fields.push_back(llvm::MDBuilder::TBAAStructField(Offset, Size,
479 TypeNode));
480 }
481
482 SmallString<256> OutName;
483 if (Features.CPlusPlus) {
484 // Don't use the mangler for C code.
485 llvm::raw_svector_ostream Out(OutName);
486 CGTypes.getCXXABI().getMangleContext().mangleCanonicalTypeName(
487 QualType(Ty, 0), Out);
488 } else {
489 OutName = RD->getName();
490 }
491
492 if (CodeGenOpts.NewStructPathTBAA) {
493 llvm::MDNode *Parent = getChar();
494 uint64_t Size = Context.getTypeSizeInChars(Ty).getQuantity();
495 llvm::Metadata *Id = MDHelper.createString(OutName);
496 return MDHelper.createTBAATypeNode(Parent, Size, Id, Fields);
497 }
498
499 // Create the struct type node with a vector of pairs (offset, type).
500 SmallVector<std::pair<llvm::MDNode*, uint64_t>, 4> OffsetsAndTypes;
501 for (const auto &Field : Fields)
502 OffsetsAndTypes.push_back(std::make_pair(Field.Type, Field.Offset));
503 return MDHelper.createTBAAStructTypeNode(OutName, OffsetsAndTypes);
504 }
505
506 return nullptr;
507 }
508
getValidBaseTypeInfo(QualType QTy)509 llvm::MDNode *CodeGenTBAA::getValidBaseTypeInfo(QualType QTy) {
510 assert(isValidBaseType(QTy) && "Must be a valid base type");
511
512 const Type *Ty = Context.getCanonicalType(QTy).getTypePtr();
513
514 // nullptr is a valid value in the cache, so use find rather than []
515 auto I = BaseTypeMetadataCache.find(Ty);
516 if (I != BaseTypeMetadataCache.end())
517 return I->second;
518
519 // First calculate the metadata, before recomputing the insertion point, as
520 // the helper can recursively call us.
521 llvm::MDNode *TypeNode = getBaseTypeInfoHelper(Ty);
522 LLVM_ATTRIBUTE_UNUSED auto inserted =
523 BaseTypeMetadataCache.insert({Ty, TypeNode});
524 assert(inserted.second && "BaseType metadata was already inserted");
525
526 return TypeNode;
527 }
528
getBaseTypeInfo(QualType QTy)529 llvm::MDNode *CodeGenTBAA::getBaseTypeInfo(QualType QTy) {
530 return isValidBaseType(QTy) ? getValidBaseTypeInfo(QTy) : nullptr;
531 }
532
getAccessTagInfo(TBAAAccessInfo Info)533 llvm::MDNode *CodeGenTBAA::getAccessTagInfo(TBAAAccessInfo Info) {
534 assert(!Info.isIncomplete() && "Access to an object of an incomplete type!");
535
536 if (Info.isMayAlias())
537 Info = TBAAAccessInfo(getChar(), Info.Size);
538
539 if (!Info.AccessType)
540 return nullptr;
541
542 if (!CodeGenOpts.StructPathTBAA)
543 Info = TBAAAccessInfo(Info.AccessType, Info.Size);
544
545 llvm::MDNode *&N = AccessTagMetadataCache[Info];
546 if (N)
547 return N;
548
549 if (!Info.BaseType) {
550 Info.BaseType = Info.AccessType;
551 assert(!Info.Offset && "Nonzero offset for an access with no base type!");
552 }
553 if (CodeGenOpts.NewStructPathTBAA) {
554 return N = MDHelper.createTBAAAccessTag(Info.BaseType, Info.AccessType,
555 Info.Offset, Info.Size);
556 }
557 return N = MDHelper.createTBAAStructTagNode(Info.BaseType, Info.AccessType,
558 Info.Offset);
559 }
560
mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo,TBAAAccessInfo TargetInfo)561 TBAAAccessInfo CodeGenTBAA::mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo,
562 TBAAAccessInfo TargetInfo) {
563 if (SourceInfo.isMayAlias() || TargetInfo.isMayAlias())
564 return TBAAAccessInfo::getMayAliasInfo();
565 return TargetInfo;
566 }
567
568 TBAAAccessInfo
mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA,TBAAAccessInfo InfoB)569 CodeGenTBAA::mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA,
570 TBAAAccessInfo InfoB) {
571 if (InfoA == InfoB)
572 return InfoA;
573
574 if (!InfoA || !InfoB)
575 return TBAAAccessInfo();
576
577 if (InfoA.isMayAlias() || InfoB.isMayAlias())
578 return TBAAAccessInfo::getMayAliasInfo();
579
580 // TODO: Implement the rest of the logic here. For example, two accesses
581 // with same final access types result in an access to an object of that final
582 // access type regardless of their base types.
583 return TBAAAccessInfo::getMayAliasInfo();
584 }
585
586 TBAAAccessInfo
mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo,TBAAAccessInfo SrcInfo)587 CodeGenTBAA::mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo,
588 TBAAAccessInfo SrcInfo) {
589 if (DestInfo == SrcInfo)
590 return DestInfo;
591
592 if (!DestInfo || !SrcInfo)
593 return TBAAAccessInfo();
594
595 if (DestInfo.isMayAlias() || SrcInfo.isMayAlias())
596 return TBAAAccessInfo::getMayAliasInfo();
597
598 // TODO: Implement the rest of the logic here. For example, two accesses
599 // with same final access types result in an access to an object of that final
600 // access type regardless of their base types.
601 return TBAAAccessInfo::getMayAliasInfo();
602 }
603