1 //===- PPC.cpp ------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "ABIInfoImpl.h"
10 #include "TargetInfo.h"
11 #include "clang/Basic/DiagnosticFrontend.h"
12
13 using namespace clang;
14 using namespace clang::CodeGen;
15
complexTempStructure(CodeGenFunction & CGF,Address VAListAddr,QualType Ty,CharUnits SlotSize,CharUnits EltSize,const ComplexType * CTy)16 static RValue complexTempStructure(CodeGenFunction &CGF, Address VAListAddr,
17 QualType Ty, CharUnits SlotSize,
18 CharUnits EltSize, const ComplexType *CTy) {
19 Address Addr =
20 emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty, SlotSize * 2,
21 SlotSize, SlotSize, /*AllowHigher*/ true);
22
23 Address RealAddr = Addr;
24 Address ImagAddr = RealAddr;
25 if (CGF.CGM.getDataLayout().isBigEndian()) {
26 RealAddr =
27 CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize - EltSize);
28 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
29 2 * SlotSize - EltSize);
30 } else {
31 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
32 }
33
34 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
35 RealAddr = RealAddr.withElementType(EltTy);
36 ImagAddr = ImagAddr.withElementType(EltTy);
37 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
38 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
39
40 return RValue::getComplex(Real, Imag);
41 }
42
PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address,bool Is64Bit,bool IsAIX)43 static bool PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
44 llvm::Value *Address, bool Is64Bit,
45 bool IsAIX) {
46 // This is calculated from the LLVM and GCC tables and verified
47 // against gcc output. AFAIK all PPC ABIs use the same encoding.
48
49 CodeGen::CGBuilderTy &Builder = CGF.Builder;
50
51 llvm::IntegerType *i8 = CGF.Int8Ty;
52 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
53 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
54 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
55
56 // 0-31: r0-31, the 4-byte or 8-byte general-purpose registers
57 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 0, 31);
58
59 // 32-63: fp0-31, the 8-byte floating-point registers
60 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
61
62 // 64-67 are various 4-byte or 8-byte special-purpose registers:
63 // 64: mq
64 // 65: lr
65 // 66: ctr
66 // 67: ap
67 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 64, 67);
68
69 // 68-76 are various 4-byte special-purpose registers:
70 // 68-75 cr0-7
71 // 76: xer
72 AssignToArrayRange(Builder, Address, Four8, 68, 76);
73
74 // 77-108: v0-31, the 16-byte vector registers
75 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
76
77 // 109: vrsave
78 // 110: vscr
79 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 109, 110);
80
81 // AIX does not utilize the rest of the registers.
82 if (IsAIX)
83 return false;
84
85 // 111: spe_acc
86 // 112: spefscr
87 // 113: sfp
88 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 111, 113);
89
90 if (!Is64Bit)
91 return false;
92
93 // TODO: Need to verify if these registers are used on 64 bit AIX with Power8
94 // or above CPU.
95 // 64-bit only registers:
96 // 114: tfhar
97 // 115: tfiar
98 // 116: texasr
99 AssignToArrayRange(Builder, Address, Eight8, 114, 116);
100
101 return false;
102 }
103
104 // AIX
105 namespace {
106 /// AIXABIInfo - The AIX XCOFF ABI information.
107 class AIXABIInfo : public ABIInfo {
108 const bool Is64Bit;
109 const unsigned PtrByteSize;
110 CharUnits getParamTypeAlignment(QualType Ty) const;
111
112 public:
AIXABIInfo(CodeGen::CodeGenTypes & CGT,bool Is64Bit)113 AIXABIInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
114 : ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {}
115
116 bool isPromotableTypeForABI(QualType Ty) const;
117
118 ABIArgInfo classifyReturnType(QualType RetTy) const;
119 ABIArgInfo classifyArgumentType(QualType Ty) const;
120
computeInfo(CGFunctionInfo & FI) const121 void computeInfo(CGFunctionInfo &FI) const override {
122 if (!getCXXABI().classifyReturnType(FI))
123 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
124
125 for (auto &I : FI.arguments())
126 I.info = classifyArgumentType(I.type);
127 }
128
129 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
130 AggValueSlot Slot) const override;
131 };
132
133 class AIXTargetCodeGenInfo : public TargetCodeGenInfo {
134 const bool Is64Bit;
135
136 public:
AIXTargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,bool Is64Bit)137 AIXTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
138 : TargetCodeGenInfo(std::make_unique<AIXABIInfo>(CGT, Is64Bit)),
139 Is64Bit(Is64Bit) {}
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const140 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
141 return 1; // r1 is the dedicated stack pointer
142 }
143
144 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
145 llvm::Value *Address) const override;
146
147 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
148 CodeGen::CodeGenModule &M) const override;
149 };
150 } // namespace
151
152 // Return true if the ABI requires Ty to be passed sign- or zero-
153 // extended to 32/64 bits.
isPromotableTypeForABI(QualType Ty) const154 bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const {
155 // Treat an enum type as its underlying type.
156 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
157 Ty = EnumTy->getDecl()->getIntegerType();
158
159 // Promotable integer types are required to be promoted by the ABI.
160 if (getContext().isPromotableIntegerType(Ty))
161 return true;
162
163 if (!Is64Bit)
164 return false;
165
166 // For 64 bit mode, in addition to the usual promotable integer types, we also
167 // need to extend all 32-bit types, since the ABI requires promotion to 64
168 // bits.
169 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
170 switch (BT->getKind()) {
171 case BuiltinType::Int:
172 case BuiltinType::UInt:
173 return true;
174 default:
175 break;
176 }
177
178 return false;
179 }
180
classifyReturnType(QualType RetTy) const181 ABIArgInfo AIXABIInfo::classifyReturnType(QualType RetTy) const {
182 if (RetTy->isAnyComplexType())
183 return ABIArgInfo::getDirect();
184
185 if (RetTy->isVectorType())
186 return ABIArgInfo::getDirect();
187
188 if (RetTy->isVoidType())
189 return ABIArgInfo::getIgnore();
190
191 if (isAggregateTypeForABI(RetTy))
192 return getNaturalAlignIndirect(RetTy);
193
194 return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
195 : ABIArgInfo::getDirect());
196 }
197
classifyArgumentType(QualType Ty) const198 ABIArgInfo AIXABIInfo::classifyArgumentType(QualType Ty) const {
199 Ty = useFirstFieldIfTransparentUnion(Ty);
200
201 if (Ty->isAnyComplexType())
202 return ABIArgInfo::getDirect();
203
204 if (Ty->isVectorType())
205 return ABIArgInfo::getDirect();
206
207 if (isAggregateTypeForABI(Ty)) {
208 // Records with non-trivial destructors/copy-constructors should not be
209 // passed by value.
210 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
211 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
212
213 CharUnits CCAlign = getParamTypeAlignment(Ty);
214 CharUnits TyAlign = getContext().getTypeAlignInChars(Ty);
215
216 return ABIArgInfo::getIndirect(CCAlign, /*ByVal*/ true,
217 /*Realign*/ TyAlign > CCAlign);
218 }
219
220 return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
221 : ABIArgInfo::getDirect());
222 }
223
getParamTypeAlignment(QualType Ty) const224 CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const {
225 // Complex types are passed just like their elements.
226 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
227 Ty = CTy->getElementType();
228
229 if (Ty->isVectorType())
230 return CharUnits::fromQuantity(16);
231
232 // If the structure contains a vector type, the alignment is 16.
233 if (isRecordWithSIMDVectorType(getContext(), Ty))
234 return CharUnits::fromQuantity(16);
235
236 return CharUnits::fromQuantity(PtrByteSize);
237 }
238
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty,AggValueSlot Slot) const239 RValue AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
240 QualType Ty, AggValueSlot Slot) const {
241
242 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
243 TypeInfo.Align = getParamTypeAlignment(Ty);
244
245 CharUnits SlotSize = CharUnits::fromQuantity(PtrByteSize);
246
247 // If we have a complex type and the base type is smaller than the register
248 // size, the ABI calls for the real and imaginary parts to be right-adjusted
249 // in separate words in 32bit mode or doublewords in 64bit mode. However,
250 // Clang expects us to produce a pointer to a structure with the two parts
251 // packed tightly. So generate loads of the real and imaginary parts relative
252 // to the va_list pointer, and store them to a temporary structure. We do the
253 // same as the PPC64ABI here.
254 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
255 CharUnits EltSize = TypeInfo.Width / 2;
256 if (EltSize < SlotSize)
257 return complexTempStructure(CGF, VAListAddr, Ty, SlotSize, EltSize, CTy);
258 }
259
260 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
261 SlotSize, /*AllowHigher*/ true, Slot);
262 }
263
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const264 bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable(
265 CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const {
266 return PPC_initDwarfEHRegSizeTable(CGF, Address, Is64Bit, /*IsAIX*/ true);
267 }
268
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & M) const269 void AIXTargetCodeGenInfo::setTargetAttributes(
270 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
271 if (!isa<llvm::GlobalVariable>(GV))
272 return;
273
274 auto *GVar = cast<llvm::GlobalVariable>(GV);
275 auto GVId = GV->getName();
276
277 // Is this a global variable specified by the user as toc-data?
278 bool UserSpecifiedTOC =
279 llvm::binary_search(M.getCodeGenOpts().TocDataVarsUserSpecified, GVId);
280 // Assumes the same variable cannot be in both TocVarsUserSpecified and
281 // NoTocVars.
282 if (UserSpecifiedTOC ||
283 ((M.getCodeGenOpts().AllTocData) &&
284 !llvm::binary_search(M.getCodeGenOpts().NoTocDataVars, GVId))) {
285 const unsigned long PointerSize =
286 GV->getParent()->getDataLayout().getPointerSizeInBits() / 8;
287 auto *VarD = dyn_cast<VarDecl>(D);
288 assert(VarD && "Invalid declaration of global variable.");
289
290 ASTContext &Context = D->getASTContext();
291 unsigned Alignment = Context.toBits(Context.getDeclAlign(D)) / 8;
292 const auto *Ty = VarD->getType().getTypePtr();
293 const RecordDecl *RDecl =
294 Ty->isRecordType() ? Ty->getAs<RecordType>()->getDecl() : nullptr;
295
296 bool EmitDiagnostic = UserSpecifiedTOC && GV->hasExternalLinkage();
297 auto reportUnsupportedWarning = [&](bool ShouldEmitWarning, StringRef Msg) {
298 if (ShouldEmitWarning)
299 M.getDiags().Report(D->getLocation(), diag::warn_toc_unsupported_type)
300 << GVId << Msg;
301 };
302 if (!Ty || Ty->isIncompleteType())
303 reportUnsupportedWarning(EmitDiagnostic, "of incomplete type");
304 else if (RDecl && RDecl->hasFlexibleArrayMember())
305 reportUnsupportedWarning(EmitDiagnostic,
306 "it contains a flexible array member");
307 else if (VarD->getTLSKind() != VarDecl::TLS_None)
308 reportUnsupportedWarning(EmitDiagnostic, "of thread local storage");
309 else if (PointerSize < Context.getTypeInfo(VarD->getType()).Width / 8)
310 reportUnsupportedWarning(EmitDiagnostic,
311 "variable is larger than a pointer");
312 else if (PointerSize < Alignment)
313 reportUnsupportedWarning(EmitDiagnostic,
314 "variable is aligned wider than a pointer");
315 else if (D->hasAttr<SectionAttr>())
316 reportUnsupportedWarning(EmitDiagnostic,
317 "variable has a section attribute");
318 else if (GV->hasExternalLinkage() ||
319 (M.getCodeGenOpts().AllTocData && !GV->hasLocalLinkage()))
320 GVar->addAttribute("toc-data");
321 }
322 }
323
324 // PowerPC-32
325 namespace {
326 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
327 class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
328 bool IsSoftFloatABI;
329 bool IsRetSmallStructInRegABI;
330
331 CharUnits getParamTypeAlignment(QualType Ty) const;
332
333 public:
PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes & CGT,bool SoftFloatABI,bool RetSmallStructInRegABI)334 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI,
335 bool RetSmallStructInRegABI)
336 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI),
337 IsRetSmallStructInRegABI(RetSmallStructInRegABI) {}
338
339 ABIArgInfo classifyReturnType(QualType RetTy) const;
340
computeInfo(CGFunctionInfo & FI) const341 void computeInfo(CGFunctionInfo &FI) const override {
342 if (!getCXXABI().classifyReturnType(FI))
343 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
344 for (auto &I : FI.arguments())
345 I.info = classifyArgumentType(I.type);
346 }
347
348 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
349 AggValueSlot Slot) const override;
350 };
351
352 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
353 public:
PPC32TargetCodeGenInfo(CodeGenTypes & CGT,bool SoftFloatABI,bool RetSmallStructInRegABI)354 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI,
355 bool RetSmallStructInRegABI)
356 : TargetCodeGenInfo(std::make_unique<PPC32_SVR4_ABIInfo>(
357 CGT, SoftFloatABI, RetSmallStructInRegABI)) {}
358
359 static bool isStructReturnInRegABI(const llvm::Triple &Triple,
360 const CodeGenOptions &Opts);
361
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const362 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
363 // This is recovered from gcc output.
364 return 1; // r1 is the dedicated stack pointer
365 }
366
367 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
368 llvm::Value *Address) const override;
369 };
370 }
371
getParamTypeAlignment(QualType Ty) const372 CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
373 // Complex types are passed just like their elements.
374 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
375 Ty = CTy->getElementType();
376
377 if (Ty->isVectorType())
378 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16
379 : 4);
380
381 // For single-element float/vector structs, we consider the whole type
382 // to have the same alignment requirements as its single element.
383 const Type *AlignTy = nullptr;
384 if (const Type *EltType = isSingleElementStruct(Ty, getContext())) {
385 const BuiltinType *BT = EltType->getAs<BuiltinType>();
386 if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
387 (BT && BT->isFloatingPoint()))
388 AlignTy = EltType;
389 }
390
391 if (AlignTy)
392 return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4);
393 return CharUnits::fromQuantity(4);
394 }
395
classifyReturnType(QualType RetTy) const396 ABIArgInfo PPC32_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
397 uint64_t Size;
398
399 // -msvr4-struct-return puts small aggregates in GPR3 and GPR4.
400 if (isAggregateTypeForABI(RetTy) && IsRetSmallStructInRegABI &&
401 (Size = getContext().getTypeSize(RetTy)) <= 64) {
402 // System V ABI (1995), page 3-22, specified:
403 // > A structure or union whose size is less than or equal to 8 bytes
404 // > shall be returned in r3 and r4, as if it were first stored in the
405 // > 8-byte aligned memory area and then the low addressed word were
406 // > loaded into r3 and the high-addressed word into r4. Bits beyond
407 // > the last member of the structure or union are not defined.
408 //
409 // GCC for big-endian PPC32 inserts the pad before the first member,
410 // not "beyond the last member" of the struct. To stay compatible
411 // with GCC, we coerce the struct to an integer of the same size.
412 // LLVM will extend it and return i32 in r3, or i64 in r3:r4.
413 if (Size == 0)
414 return ABIArgInfo::getIgnore();
415 else {
416 llvm::Type *CoerceTy = llvm::Type::getIntNTy(getVMContext(), Size);
417 return ABIArgInfo::getDirect(CoerceTy);
418 }
419 }
420
421 return DefaultABIInfo::classifyReturnType(RetTy);
422 }
423
424 // TODO: this implementation is now likely redundant with
425 // DefaultABIInfo::EmitVAArg.
EmitVAArg(CodeGenFunction & CGF,Address VAList,QualType Ty,AggValueSlot Slot) const426 RValue PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
427 QualType Ty, AggValueSlot Slot) const {
428 if (getTarget().getTriple().isOSDarwin()) {
429 auto TI = getContext().getTypeInfoInChars(Ty);
430 TI.Align = getParamTypeAlignment(Ty);
431
432 CharUnits SlotSize = CharUnits::fromQuantity(4);
433 return emitVoidPtrVAArg(CGF, VAList, Ty,
434 classifyArgumentType(Ty).isIndirect(), TI, SlotSize,
435 /*AllowHigherAlign=*/true, Slot);
436 }
437
438 const unsigned OverflowLimit = 8;
439 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
440 // TODO: Implement this. For now ignore.
441 (void)CTy;
442 return RValue::getAggregate(Address::invalid()); // FIXME?
443 }
444
445 // struct __va_list_tag {
446 // unsigned char gpr;
447 // unsigned char fpr;
448 // unsigned short reserved;
449 // void *overflow_arg_area;
450 // void *reg_save_area;
451 // };
452
453 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
454 bool isInt = !Ty->isFloatingType();
455 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
456
457 // All aggregates are passed indirectly? That doesn't seem consistent
458 // with the argument-lowering code.
459 bool isIndirect = isAggregateTypeForABI(Ty);
460
461 CGBuilderTy &Builder = CGF.Builder;
462
463 // The calling convention either uses 1-2 GPRs or 1 FPR.
464 Address NumRegsAddr = Address::invalid();
465 if (isInt || IsSoftFloatABI) {
466 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr");
467 } else {
468 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr");
469 }
470
471 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
472
473 // "Align" the register count when TY is i64.
474 if (isI64 || (isF64 && IsSoftFloatABI)) {
475 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
476 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
477 }
478
479 llvm::Value *CC =
480 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond");
481
482 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
483 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
484 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
485
486 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
487
488 llvm::Type *DirectTy = CGF.ConvertType(Ty), *ElementTy = DirectTy;
489 if (isIndirect)
490 DirectTy = CGF.UnqualPtrTy;
491
492 // Case 1: consume registers.
493 Address RegAddr = Address::invalid();
494 {
495 CGF.EmitBlock(UsingRegs);
496
497 Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4);
498 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr), CGF.Int8Ty,
499 CharUnits::fromQuantity(8));
500 assert(RegAddr.getElementType() == CGF.Int8Ty);
501
502 // Floating-point registers start after the general-purpose registers.
503 if (!(isInt || IsSoftFloatABI)) {
504 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
505 CharUnits::fromQuantity(32));
506 }
507
508 // Get the address of the saved value by scaling the number of
509 // registers we've used by the number of
510 CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
511 llvm::Value *RegOffset =
512 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
513 RegAddr = Address(Builder.CreateInBoundsGEP(
514 CGF.Int8Ty, RegAddr.emitRawPointer(CGF), RegOffset),
515 DirectTy,
516 RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
517
518 // Increase the used-register count.
519 NumRegs =
520 Builder.CreateAdd(NumRegs,
521 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
522 Builder.CreateStore(NumRegs, NumRegsAddr);
523
524 CGF.EmitBranch(Cont);
525 }
526
527 // Case 2: consume space in the overflow area.
528 Address MemAddr = Address::invalid();
529 {
530 CGF.EmitBlock(UsingOverflow);
531
532 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
533
534 // Everything in the overflow area is rounded up to a size of at least 4.
535 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
536
537 CharUnits Size;
538 if (!isIndirect) {
539 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
540 Size = TypeInfo.Width.alignTo(OverflowAreaAlign);
541 } else {
542 Size = CGF.getPointerSize();
543 }
544
545 Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3);
546 Address OverflowArea =
547 Address(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"), CGF.Int8Ty,
548 OverflowAreaAlign);
549 // Round up address of argument to alignment
550 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
551 if (Align > OverflowAreaAlign) {
552 llvm::Value *Ptr = OverflowArea.emitRawPointer(CGF);
553 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
554 OverflowArea.getElementType(), Align);
555 }
556
557 MemAddr = OverflowArea.withElementType(DirectTy);
558
559 // Increase the overflow area.
560 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
561 Builder.CreateStore(OverflowArea.emitRawPointer(CGF), OverflowAreaAddr);
562 CGF.EmitBranch(Cont);
563 }
564
565 CGF.EmitBlock(Cont);
566
567 // Merge the cases with a phi.
568 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
569 "vaarg.addr");
570
571 // Load the pointer if the argument was passed indirectly.
572 if (isIndirect) {
573 Result = Address(Builder.CreateLoad(Result, "aggr"), ElementTy,
574 getContext().getTypeAlignInChars(Ty));
575 }
576
577 return CGF.EmitLoadOfAnyValue(CGF.MakeAddrLValue(Result, Ty), Slot);
578 }
579
isStructReturnInRegABI(const llvm::Triple & Triple,const CodeGenOptions & Opts)580 bool PPC32TargetCodeGenInfo::isStructReturnInRegABI(
581 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
582 assert(Triple.isPPC32());
583
584 switch (Opts.getStructReturnConvention()) {
585 case CodeGenOptions::SRCK_Default:
586 break;
587 case CodeGenOptions::SRCK_OnStack: // -maix-struct-return
588 return false;
589 case CodeGenOptions::SRCK_InRegs: // -msvr4-struct-return
590 return true;
591 }
592
593 if (Triple.isOSBinFormatELF() && !Triple.isOSLinux())
594 return true;
595
596 return false;
597 }
598
599 bool
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const600 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
601 llvm::Value *Address) const {
602 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ false,
603 /*IsAIX*/ false);
604 }
605
606 // PowerPC-64
607
608 namespace {
609
610 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
611 class PPC64_SVR4_ABIInfo : public ABIInfo {
612 static const unsigned GPRBits = 64;
613 PPC64_SVR4_ABIKind Kind;
614 bool IsSoftFloatABI;
615
616 public:
PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes & CGT,PPC64_SVR4_ABIKind Kind,bool SoftFloatABI)617 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, PPC64_SVR4_ABIKind Kind,
618 bool SoftFloatABI)
619 : ABIInfo(CGT), Kind(Kind), IsSoftFloatABI(SoftFloatABI) {}
620
621 bool isPromotableTypeForABI(QualType Ty) const;
622 CharUnits getParamTypeAlignment(QualType Ty) const;
623
624 ABIArgInfo classifyReturnType(QualType RetTy) const;
625 ABIArgInfo classifyArgumentType(QualType Ty) const;
626
627 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
628 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
629 uint64_t Members) const override;
630
631 // TODO: We can add more logic to computeInfo to improve performance.
632 // Example: For aggregate arguments that fit in a register, we could
633 // use getDirectInReg (as is done below for structs containing a single
634 // floating-point value) to avoid pushing them to memory on function
635 // entry. This would require changing the logic in PPCISelLowering
636 // when lowering the parameters in the caller and args in the callee.
computeInfo(CGFunctionInfo & FI) const637 void computeInfo(CGFunctionInfo &FI) const override {
638 if (!getCXXABI().classifyReturnType(FI))
639 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
640 for (auto &I : FI.arguments()) {
641 // We rely on the default argument classification for the most part.
642 // One exception: An aggregate containing a single floating-point
643 // or vector item must be passed in a register if one is available.
644 const Type *T = isSingleElementStruct(I.type, getContext());
645 if (T) {
646 const BuiltinType *BT = T->getAs<BuiltinType>();
647 if ((T->isVectorType() && getContext().getTypeSize(T) == 128) ||
648 (BT && BT->isFloatingPoint())) {
649 QualType QT(T, 0);
650 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
651 continue;
652 }
653 }
654 I.info = classifyArgumentType(I.type);
655 }
656 }
657
658 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
659 AggValueSlot Slot) const override;
660 };
661
662 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
663
664 public:
PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes & CGT,PPC64_SVR4_ABIKind Kind,bool SoftFloatABI)665 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT, PPC64_SVR4_ABIKind Kind,
666 bool SoftFloatABI)
667 : TargetCodeGenInfo(
668 std::make_unique<PPC64_SVR4_ABIInfo>(CGT, Kind, SoftFloatABI)) {
669 SwiftInfo =
670 std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/false);
671 }
672
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const673 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
674 // This is recovered from gcc output.
675 return 1; // r1 is the dedicated stack pointer
676 }
677
678 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
679 llvm::Value *Address) const override;
680 void emitTargetMetadata(CodeGen::CodeGenModule &CGM,
681 const llvm::MapVector<GlobalDecl, StringRef>
682 &MangledDeclNames) const override;
683 };
684
685 class PPC64TargetCodeGenInfo : public TargetCodeGenInfo {
686 public:
PPC64TargetCodeGenInfo(CodeGenTypes & CGT)687 PPC64TargetCodeGenInfo(CodeGenTypes &CGT)
688 : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
689
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const690 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
691 // This is recovered from gcc output.
692 return 1; // r1 is the dedicated stack pointer
693 }
694
695 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
696 llvm::Value *Address) const override;
697 };
698 }
699
700 // Return true if the ABI requires Ty to be passed sign- or zero-
701 // extended to 64 bits.
702 bool
isPromotableTypeForABI(QualType Ty) const703 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
704 // Treat an enum type as its underlying type.
705 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
706 Ty = EnumTy->getDecl()->getIntegerType();
707
708 // Promotable integer types are required to be promoted by the ABI.
709 if (isPromotableIntegerTypeForABI(Ty))
710 return true;
711
712 // In addition to the usual promotable integer types, we also need to
713 // extend all 32-bit types, since the ABI requires promotion to 64 bits.
714 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
715 switch (BT->getKind()) {
716 case BuiltinType::Int:
717 case BuiltinType::UInt:
718 return true;
719 default:
720 break;
721 }
722
723 if (const auto *EIT = Ty->getAs<BitIntType>())
724 if (EIT->getNumBits() < 64)
725 return true;
726
727 return false;
728 }
729
730 /// isAlignedParamType - Determine whether a type requires 16-byte or
731 /// higher alignment in the parameter area. Always returns at least 8.
getParamTypeAlignment(QualType Ty) const732 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
733 // Complex types are passed just like their elements.
734 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
735 Ty = CTy->getElementType();
736
737 auto FloatUsesVector = [this](QualType Ty){
738 return Ty->isRealFloatingType() && &getContext().getFloatTypeSemantics(
739 Ty) == &llvm::APFloat::IEEEquad();
740 };
741
742 // Only vector types of size 16 bytes need alignment (larger types are
743 // passed via reference, smaller types are not aligned).
744 if (Ty->isVectorType()) {
745 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
746 } else if (FloatUsesVector(Ty)) {
747 // According to ABI document section 'Optional Save Areas': If extended
748 // precision floating-point values in IEEE BINARY 128 QUADRUPLE PRECISION
749 // format are supported, map them to a single quadword, quadword aligned.
750 return CharUnits::fromQuantity(16);
751 }
752
753 // For single-element float/vector structs, we consider the whole type
754 // to have the same alignment requirements as its single element.
755 const Type *AlignAsType = nullptr;
756 const Type *EltType = isSingleElementStruct(Ty, getContext());
757 if (EltType) {
758 const BuiltinType *BT = EltType->getAs<BuiltinType>();
759 if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
760 (BT && BT->isFloatingPoint()))
761 AlignAsType = EltType;
762 }
763
764 // Likewise for ELFv2 homogeneous aggregates.
765 const Type *Base = nullptr;
766 uint64_t Members = 0;
767 if (!AlignAsType && Kind == PPC64_SVR4_ABIKind::ELFv2 &&
768 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
769 AlignAsType = Base;
770
771 // With special case aggregates, only vector base types need alignment.
772 if (AlignAsType) {
773 bool UsesVector = AlignAsType->isVectorType() ||
774 FloatUsesVector(QualType(AlignAsType, 0));
775 return CharUnits::fromQuantity(UsesVector ? 16 : 8);
776 }
777
778 // Otherwise, we only need alignment for any aggregate type that
779 // has an alignment requirement of >= 16 bytes.
780 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
781 return CharUnits::fromQuantity(16);
782 }
783
784 return CharUnits::fromQuantity(8);
785 }
786
isHomogeneousAggregateBaseType(QualType Ty) const787 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
788 // Homogeneous aggregates for ELFv2 must have base types of float,
789 // double, long double, or 128-bit vectors.
790 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
791 if (BT->getKind() == BuiltinType::Float ||
792 BT->getKind() == BuiltinType::Double ||
793 BT->getKind() == BuiltinType::LongDouble ||
794 BT->getKind() == BuiltinType::Ibm128 ||
795 (getContext().getTargetInfo().hasFloat128Type() &&
796 (BT->getKind() == BuiltinType::Float128))) {
797 if (IsSoftFloatABI)
798 return false;
799 return true;
800 }
801 }
802 if (const VectorType *VT = Ty->getAs<VectorType>()) {
803 if (getContext().getTypeSize(VT) == 128)
804 return true;
805 }
806 return false;
807 }
808
isHomogeneousAggregateSmallEnough(const Type * Base,uint64_t Members) const809 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
810 const Type *Base, uint64_t Members) const {
811 // Vector and fp128 types require one register, other floating point types
812 // require one or two registers depending on their size.
813 uint32_t NumRegs =
814 ((getContext().getTargetInfo().hasFloat128Type() &&
815 Base->isFloat128Type()) ||
816 Base->isVectorType()) ? 1
817 : (getContext().getTypeSize(Base) + 63) / 64;
818
819 // Homogeneous Aggregates may occupy at most 8 registers.
820 return Members * NumRegs <= 8;
821 }
822
823 ABIArgInfo
classifyArgumentType(QualType Ty) const824 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
825 Ty = useFirstFieldIfTransparentUnion(Ty);
826
827 if (Ty->isAnyComplexType())
828 return ABIArgInfo::getDirect();
829
830 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
831 // or via reference (larger than 16 bytes).
832 if (Ty->isVectorType()) {
833 uint64_t Size = getContext().getTypeSize(Ty);
834 if (Size > 128)
835 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
836 else if (Size < 128) {
837 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
838 return ABIArgInfo::getDirect(CoerceTy);
839 }
840 }
841
842 if (const auto *EIT = Ty->getAs<BitIntType>())
843 if (EIT->getNumBits() > 128)
844 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
845
846 if (isAggregateTypeForABI(Ty)) {
847 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
848 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
849
850 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
851 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
852
853 // ELFv2 homogeneous aggregates are passed as array types.
854 const Type *Base = nullptr;
855 uint64_t Members = 0;
856 if (Kind == PPC64_SVR4_ABIKind::ELFv2 &&
857 isHomogeneousAggregate(Ty, Base, Members)) {
858 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
859 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
860 return ABIArgInfo::getDirect(CoerceTy);
861 }
862
863 // If an aggregate may end up fully in registers, we do not
864 // use the ByVal method, but pass the aggregate as array.
865 // This is usually beneficial since we avoid forcing the
866 // back-end to store the argument to memory.
867 uint64_t Bits = getContext().getTypeSize(Ty);
868 if (Bits > 0 && Bits <= 8 * GPRBits) {
869 llvm::Type *CoerceTy;
870
871 // Types up to 8 bytes are passed as integer type (which will be
872 // properly aligned in the argument save area doubleword).
873 if (Bits <= GPRBits)
874 CoerceTy =
875 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
876 // Larger types are passed as arrays, with the base type selected
877 // according to the required alignment in the save area.
878 else {
879 uint64_t RegBits = ABIAlign * 8;
880 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
881 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
882 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
883 }
884
885 return ABIArgInfo::getDirect(CoerceTy);
886 }
887
888 // All other aggregates are passed ByVal.
889 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
890 /*ByVal=*/true,
891 /*Realign=*/TyAlign > ABIAlign);
892 }
893
894 return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
895 : ABIArgInfo::getDirect());
896 }
897
898 ABIArgInfo
classifyReturnType(QualType RetTy) const899 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
900 if (RetTy->isVoidType())
901 return ABIArgInfo::getIgnore();
902
903 if (RetTy->isAnyComplexType())
904 return ABIArgInfo::getDirect();
905
906 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
907 // or via reference (larger than 16 bytes).
908 if (RetTy->isVectorType()) {
909 uint64_t Size = getContext().getTypeSize(RetTy);
910 if (Size > 128)
911 return getNaturalAlignIndirect(RetTy);
912 else if (Size < 128) {
913 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
914 return ABIArgInfo::getDirect(CoerceTy);
915 }
916 }
917
918 if (const auto *EIT = RetTy->getAs<BitIntType>())
919 if (EIT->getNumBits() > 128)
920 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
921
922 if (isAggregateTypeForABI(RetTy)) {
923 // ELFv2 homogeneous aggregates are returned as array types.
924 const Type *Base = nullptr;
925 uint64_t Members = 0;
926 if (Kind == PPC64_SVR4_ABIKind::ELFv2 &&
927 isHomogeneousAggregate(RetTy, Base, Members)) {
928 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
929 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
930 return ABIArgInfo::getDirect(CoerceTy);
931 }
932
933 // ELFv2 small aggregates are returned in up to two registers.
934 uint64_t Bits = getContext().getTypeSize(RetTy);
935 if (Kind == PPC64_SVR4_ABIKind::ELFv2 && Bits <= 2 * GPRBits) {
936 if (Bits == 0)
937 return ABIArgInfo::getIgnore();
938
939 llvm::Type *CoerceTy;
940 if (Bits > GPRBits) {
941 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
942 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
943 } else
944 CoerceTy =
945 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
946 return ABIArgInfo::getDirect(CoerceTy);
947 }
948
949 // All other aggregates are returned indirectly.
950 return getNaturalAlignIndirect(RetTy);
951 }
952
953 return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
954 : ABIArgInfo::getDirect());
955 }
956
957 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty,AggValueSlot Slot) const958 RValue PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
959 QualType Ty, AggValueSlot Slot) const {
960 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
961 TypeInfo.Align = getParamTypeAlignment(Ty);
962
963 CharUnits SlotSize = CharUnits::fromQuantity(8);
964
965 // If we have a complex type and the base type is smaller than 8 bytes,
966 // the ABI calls for the real and imaginary parts to be right-adjusted
967 // in separate doublewords. However, Clang expects us to produce a
968 // pointer to a structure with the two parts packed tightly. So generate
969 // loads of the real and imaginary parts relative to the va_list pointer,
970 // and store them to a temporary structure.
971 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
972 CharUnits EltSize = TypeInfo.Width / 2;
973 if (EltSize < SlotSize)
974 return complexTempStructure(CGF, VAListAddr, Ty, SlotSize, EltSize, CTy);
975 }
976
977 // Otherwise, just use the general rule.
978 //
979 // The PPC64 ABI passes some arguments in integer registers, even to variadic
980 // functions. To allow va_list to use the simple "void*" representation,
981 // variadic calls allocate space in the argument area for the integer argument
982 // registers, and variadic functions spill their integer argument registers to
983 // this area in their prologues. When aggregates smaller than a register are
984 // passed this way, they are passed in the least significant bits of the
985 // register, which means that after spilling on big-endian targets they will
986 // be right-aligned in their argument slot. This is uncommon; for a variety of
987 // reasons, other big-endian targets don't end up right-aligning aggregate
988 // types this way, and so right-alignment only applies to fundamental types.
989 // So on PPC64, we must force the use of right-alignment even for aggregates.
990 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
991 SlotSize, /*AllowHigher*/ true, Slot,
992 /*ForceRightAdjust*/ true);
993 }
994
995 bool
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const996 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
997 CodeGen::CodeGenFunction &CGF,
998 llvm::Value *Address) const {
999 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
1000 /*IsAIX*/ false);
1001 }
1002
emitTargetMetadata(CodeGen::CodeGenModule & CGM,const llvm::MapVector<GlobalDecl,StringRef> & MangledDeclNames) const1003 void PPC64_SVR4_TargetCodeGenInfo::emitTargetMetadata(
1004 CodeGen::CodeGenModule &CGM,
1005 const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const {
1006 if (CGM.getTypes().isLongDoubleReferenced()) {
1007 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
1008 const auto *flt = &CGM.getTarget().getLongDoubleFormat();
1009 if (flt == &llvm::APFloat::PPCDoubleDouble())
1010 CGM.getModule().addModuleFlag(llvm::Module::Error, "float-abi",
1011 llvm::MDString::get(Ctx, "doubledouble"));
1012 else if (flt == &llvm::APFloat::IEEEquad())
1013 CGM.getModule().addModuleFlag(llvm::Module::Error, "float-abi",
1014 llvm::MDString::get(Ctx, "ieeequad"));
1015 else if (flt == &llvm::APFloat::IEEEdouble())
1016 CGM.getModule().addModuleFlag(llvm::Module::Error, "float-abi",
1017 llvm::MDString::get(Ctx, "ieeedouble"));
1018 }
1019 }
1020
1021 bool
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const1022 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1023 llvm::Value *Address) const {
1024 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
1025 /*IsAIX*/ false);
1026 }
1027
1028 std::unique_ptr<TargetCodeGenInfo>
createAIXTargetCodeGenInfo(CodeGenModule & CGM,bool Is64Bit)1029 CodeGen::createAIXTargetCodeGenInfo(CodeGenModule &CGM, bool Is64Bit) {
1030 return std::make_unique<AIXTargetCodeGenInfo>(CGM.getTypes(), Is64Bit);
1031 }
1032
1033 std::unique_ptr<TargetCodeGenInfo>
createPPC32TargetCodeGenInfo(CodeGenModule & CGM,bool SoftFloatABI)1034 CodeGen::createPPC32TargetCodeGenInfo(CodeGenModule &CGM, bool SoftFloatABI) {
1035 bool RetSmallStructInRegABI = PPC32TargetCodeGenInfo::isStructReturnInRegABI(
1036 CGM.getTriple(), CGM.getCodeGenOpts());
1037 return std::make_unique<PPC32TargetCodeGenInfo>(CGM.getTypes(), SoftFloatABI,
1038 RetSmallStructInRegABI);
1039 }
1040
1041 std::unique_ptr<TargetCodeGenInfo>
createPPC64TargetCodeGenInfo(CodeGenModule & CGM)1042 CodeGen::createPPC64TargetCodeGenInfo(CodeGenModule &CGM) {
1043 return std::make_unique<PPC64TargetCodeGenInfo>(CGM.getTypes());
1044 }
1045
createPPC64_SVR4_TargetCodeGenInfo(CodeGenModule & CGM,PPC64_SVR4_ABIKind Kind,bool SoftFloatABI)1046 std::unique_ptr<TargetCodeGenInfo> CodeGen::createPPC64_SVR4_TargetCodeGenInfo(
1047 CodeGenModule &CGM, PPC64_SVR4_ABIKind Kind, bool SoftFloatABI) {
1048 return std::make_unique<PPC64_SVR4_TargetCodeGenInfo>(CGM.getTypes(), Kind,
1049 SoftFloatABI);
1050 }
1051