1 //===- ABIInfo.cpp --------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "ABIInfo.h"
10 #include "ABIInfoImpl.h"
11
12 using namespace clang;
13 using namespace clang::CodeGen;
14
15 // Pin the vtable to this file.
16 ABIInfo::~ABIInfo() = default;
17
getCXXABI() const18 CGCXXABI &ABIInfo::getCXXABI() const { return CGT.getCXXABI(); }
19
getContext() const20 ASTContext &ABIInfo::getContext() const { return CGT.getContext(); }
21
getVMContext() const22 llvm::LLVMContext &ABIInfo::getVMContext() const {
23 return CGT.getLLVMContext();
24 }
25
getDataLayout() const26 const llvm::DataLayout &ABIInfo::getDataLayout() const {
27 return CGT.getDataLayout();
28 }
29
getTarget() const30 const TargetInfo &ABIInfo::getTarget() const { return CGT.getTarget(); }
31
getCodeGenOpts() const32 const CodeGenOptions &ABIInfo::getCodeGenOpts() const {
33 return CGT.getCodeGenOpts();
34 }
35
isAndroid() const36 bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); }
37
isOHOSFamily() const38 bool ABIInfo::isOHOSFamily() const {
39 return getTarget().getTriple().isOHOSFamily();
40 }
41
EmitMSVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty,AggValueSlot Slot) const42 RValue ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
43 QualType Ty, AggValueSlot Slot) const {
44 return RValue::getIgnored();
45 }
46
isHomogeneousAggregateBaseType(QualType Ty) const47 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
48 return false;
49 }
50
isHomogeneousAggregateSmallEnough(const Type * Base,uint64_t Members) const51 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
52 uint64_t Members) const {
53 return false;
54 }
55
isZeroLengthBitfieldPermittedInHomogeneousAggregate() const56 bool ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() const {
57 // For compatibility with GCC, ignore empty bitfields in C++ mode.
58 return getContext().getLangOpts().CPlusPlus;
59 }
60
isHomogeneousAggregate(QualType Ty,const Type * & Base,uint64_t & Members) const61 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
62 uint64_t &Members) const {
63 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
64 uint64_t NElements = AT->getZExtSize();
65 if (NElements == 0)
66 return false;
67 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
68 return false;
69 Members *= NElements;
70 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
71 const RecordDecl *RD = RT->getDecl();
72 if (RD->hasFlexibleArrayMember())
73 return false;
74
75 Members = 0;
76
77 // If this is a C++ record, check the properties of the record such as
78 // bases and ABI specific restrictions
79 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
80 if (!getCXXABI().isPermittedToBeHomogeneousAggregate(CXXRD))
81 return false;
82
83 for (const auto &I : CXXRD->bases()) {
84 // Ignore empty records.
85 if (isEmptyRecord(getContext(), I.getType(), true))
86 continue;
87
88 uint64_t FldMembers;
89 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
90 return false;
91
92 Members += FldMembers;
93 }
94 }
95
96 for (const auto *FD : RD->fields()) {
97 // Ignore (non-zero arrays of) empty records.
98 QualType FT = FD->getType();
99 while (const ConstantArrayType *AT =
100 getContext().getAsConstantArrayType(FT)) {
101 if (AT->isZeroSize())
102 return false;
103 FT = AT->getElementType();
104 }
105 if (isEmptyRecord(getContext(), FT, true))
106 continue;
107
108 if (isZeroLengthBitfieldPermittedInHomogeneousAggregate() &&
109 FD->isZeroLengthBitField(getContext()))
110 continue;
111
112 uint64_t FldMembers;
113 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
114 return false;
115
116 Members = (RD->isUnion() ?
117 std::max(Members, FldMembers) : Members + FldMembers);
118 }
119
120 if (!Base)
121 return false;
122
123 // Ensure there is no padding.
124 if (getContext().getTypeSize(Base) * Members !=
125 getContext().getTypeSize(Ty))
126 return false;
127 } else {
128 Members = 1;
129 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
130 Members = 2;
131 Ty = CT->getElementType();
132 }
133
134 // Most ABIs only support float, double, and some vector type widths.
135 if (!isHomogeneousAggregateBaseType(Ty))
136 return false;
137
138 // The base type must be the same for all members. Types that
139 // agree in both total size and mode (float vs. vector) are
140 // treated as being equivalent here.
141 const Type *TyPtr = Ty.getTypePtr();
142 if (!Base) {
143 Base = TyPtr;
144 // If it's a non-power-of-2 vector, its size is already a power-of-2,
145 // so make sure to widen it explicitly.
146 if (const VectorType *VT = Base->getAs<VectorType>()) {
147 QualType EltTy = VT->getElementType();
148 unsigned NumElements =
149 getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy);
150 Base = getContext()
151 .getVectorType(EltTy, NumElements, VT->getVectorKind())
152 .getTypePtr();
153 }
154 }
155
156 if (Base->isVectorType() != TyPtr->isVectorType() ||
157 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
158 return false;
159 }
160 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
161 }
162
isPromotableIntegerTypeForABI(QualType Ty) const163 bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
164 if (getContext().isPromotableIntegerType(Ty))
165 return true;
166
167 if (const auto *EIT = Ty->getAs<BitIntType>())
168 if (EIT->getNumBits() < getContext().getTypeSize(getContext().IntTy))
169 return true;
170
171 return false;
172 }
173
getNaturalAlignIndirect(QualType Ty,bool ByVal,bool Realign,llvm::Type * Padding) const174 ABIArgInfo ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByVal,
175 bool Realign,
176 llvm::Type *Padding) const {
177 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty), ByVal,
178 Realign, Padding);
179 }
180
getNaturalAlignIndirectInReg(QualType Ty,bool Realign) const181 ABIArgInfo ABIInfo::getNaturalAlignIndirectInReg(QualType Ty,
182 bool Realign) const {
183 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
184 /*ByVal*/ false, Realign);
185 }
186
appendAttributeMangling(TargetAttr * Attr,raw_ostream & Out) const187 void ABIInfo::appendAttributeMangling(TargetAttr *Attr,
188 raw_ostream &Out) const {
189 if (Attr->isDefaultVersion())
190 return;
191 appendAttributeMangling(Attr->getFeaturesStr(), Out);
192 }
193
appendAttributeMangling(TargetVersionAttr * Attr,raw_ostream & Out) const194 void ABIInfo::appendAttributeMangling(TargetVersionAttr *Attr,
195 raw_ostream &Out) const {
196 appendAttributeMangling(Attr->getNamesStr(), Out);
197 }
198
appendAttributeMangling(TargetClonesAttr * Attr,unsigned Index,raw_ostream & Out) const199 void ABIInfo::appendAttributeMangling(TargetClonesAttr *Attr, unsigned Index,
200 raw_ostream &Out) const {
201 appendAttributeMangling(Attr->getFeatureStr(Index), Out);
202 Out << '.' << Attr->getMangledIndex(Index);
203 }
204
appendAttributeMangling(StringRef AttrStr,raw_ostream & Out) const205 void ABIInfo::appendAttributeMangling(StringRef AttrStr,
206 raw_ostream &Out) const {
207 if (AttrStr == "default") {
208 Out << ".default";
209 return;
210 }
211
212 Out << '.';
213 const TargetInfo &TI = CGT.getTarget();
214 ParsedTargetAttr Info = TI.parseTargetAttr(AttrStr);
215
216 llvm::sort(Info.Features, [&TI](StringRef LHS, StringRef RHS) {
217 // Multiversioning doesn't allow "no-${feature}", so we can
218 // only have "+" prefixes here.
219 assert(LHS.starts_with("+") && RHS.starts_with("+") &&
220 "Features should always have a prefix.");
221 return TI.multiVersionSortPriority(LHS.substr(1)) >
222 TI.multiVersionSortPriority(RHS.substr(1));
223 });
224
225 bool IsFirst = true;
226 if (!Info.CPU.empty()) {
227 IsFirst = false;
228 Out << "arch_" << Info.CPU;
229 }
230
231 for (StringRef Feat : Info.Features) {
232 if (!IsFirst)
233 Out << '_';
234 IsFirst = false;
235 Out << Feat.substr(1);
236 }
237 }
238
239 // Pin the vtable to this file.
240 SwiftABIInfo::~SwiftABIInfo() = default;
241
242 /// Does the given lowering require more than the given number of
243 /// registers when expanded?
244 ///
245 /// This is intended to be the basis of a reasonable basic implementation
246 /// of should{Pass,Return}Indirectly.
247 ///
248 /// For most targets, a limit of four total registers is reasonable; this
249 /// limits the amount of code required in order to move around the value
250 /// in case it wasn't produced immediately prior to the call by the caller
251 /// (or wasn't produced in exactly the right registers) or isn't used
252 /// immediately within the callee. But some targets may need to further
253 /// limit the register count due to an inability to support that many
254 /// return registers.
occupiesMoreThan(ArrayRef<llvm::Type * > scalarTypes,unsigned maxAllRegisters) const255 bool SwiftABIInfo::occupiesMoreThan(ArrayRef<llvm::Type *> scalarTypes,
256 unsigned maxAllRegisters) const {
257 unsigned intCount = 0, fpCount = 0;
258 for (llvm::Type *type : scalarTypes) {
259 if (type->isPointerTy()) {
260 intCount++;
261 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
262 auto ptrWidth = CGT.getTarget().getPointerWidth(LangAS::Default);
263 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
264 } else {
265 assert(type->isVectorTy() || type->isFloatingPointTy());
266 fpCount++;
267 }
268 }
269
270 return (intCount + fpCount > maxAllRegisters);
271 }
272
shouldPassIndirectly(ArrayRef<llvm::Type * > ComponentTys,bool AsReturnValue) const273 bool SwiftABIInfo::shouldPassIndirectly(ArrayRef<llvm::Type *> ComponentTys,
274 bool AsReturnValue) const {
275 return occupiesMoreThan(ComponentTys, /*total=*/4);
276 }
277
isLegalVectorType(CharUnits VectorSize,llvm::Type * EltTy,unsigned NumElts) const278 bool SwiftABIInfo::isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
279 unsigned NumElts) const {
280 // The default implementation of this assumes that the target guarantees
281 // 128-bit SIMD support but nothing more.
282 return (VectorSize.getQuantity() > 8 && VectorSize.getQuantity() <= 16);
283 }
284