xref: /freebsd/contrib/llvm-project/clang/lib/CodeGen/Targets/PPC.cpp (revision aa1a8ff2d6dbc51ef058f46f3db5a8bb77967145)
1 //===- PPC.cpp ------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ABIInfoImpl.h"
10 #include "TargetInfo.h"
11 
12 using namespace clang;
13 using namespace clang::CodeGen;
14 
15 static Address complexTempStructure(CodeGenFunction &CGF, Address VAListAddr,
16                                     QualType Ty, CharUnits SlotSize,
17                                     CharUnits EltSize, const ComplexType *CTy) {
18   Address Addr =
19       emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty, SlotSize * 2,
20                              SlotSize, SlotSize, /*AllowHigher*/ true);
21 
22   Address RealAddr = Addr;
23   Address ImagAddr = RealAddr;
24   if (CGF.CGM.getDataLayout().isBigEndian()) {
25     RealAddr =
26         CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize - EltSize);
27     ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
28                                                       2 * SlotSize - EltSize);
29   } else {
30     ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
31   }
32 
33   llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
34   RealAddr = RealAddr.withElementType(EltTy);
35   ImagAddr = ImagAddr.withElementType(EltTy);
36   llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
37   llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
38 
39   Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
40   CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
41                          /*init*/ true);
42   return Temp;
43 }
44 
45 static bool PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
46                                         llvm::Value *Address, bool Is64Bit,
47                                         bool IsAIX) {
48   // This is calculated from the LLVM and GCC tables and verified
49   // against gcc output.  AFAIK all PPC ABIs use the same encoding.
50 
51   CodeGen::CGBuilderTy &Builder = CGF.Builder;
52 
53   llvm::IntegerType *i8 = CGF.Int8Ty;
54   llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
55   llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
56   llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
57 
58   // 0-31: r0-31, the 4-byte or 8-byte general-purpose registers
59   AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 0, 31);
60 
61   // 32-63: fp0-31, the 8-byte floating-point registers
62   AssignToArrayRange(Builder, Address, Eight8, 32, 63);
63 
64   // 64-67 are various 4-byte or 8-byte special-purpose registers:
65   // 64: mq
66   // 65: lr
67   // 66: ctr
68   // 67: ap
69   AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 64, 67);
70 
71   // 68-76 are various 4-byte special-purpose registers:
72   // 68-75 cr0-7
73   // 76: xer
74   AssignToArrayRange(Builder, Address, Four8, 68, 76);
75 
76   // 77-108: v0-31, the 16-byte vector registers
77   AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
78 
79   // 109: vrsave
80   // 110: vscr
81   AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 109, 110);
82 
83   // AIX does not utilize the rest of the registers.
84   if (IsAIX)
85     return false;
86 
87   // 111: spe_acc
88   // 112: spefscr
89   // 113: sfp
90   AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 111, 113);
91 
92   if (!Is64Bit)
93     return false;
94 
95   // TODO: Need to verify if these registers are used on 64 bit AIX with Power8
96   // or above CPU.
97   // 64-bit only registers:
98   // 114: tfhar
99   // 115: tfiar
100   // 116: texasr
101   AssignToArrayRange(Builder, Address, Eight8, 114, 116);
102 
103   return false;
104 }
105 
106 // AIX
107 namespace {
108 /// AIXABIInfo - The AIX XCOFF ABI information.
109 class AIXABIInfo : public ABIInfo {
110   const bool Is64Bit;
111   const unsigned PtrByteSize;
112   CharUnits getParamTypeAlignment(QualType Ty) const;
113 
114 public:
115   AIXABIInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
116       : ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {}
117 
118   bool isPromotableTypeForABI(QualType Ty) const;
119 
120   ABIArgInfo classifyReturnType(QualType RetTy) const;
121   ABIArgInfo classifyArgumentType(QualType Ty) const;
122 
123   void computeInfo(CGFunctionInfo &FI) const override {
124     if (!getCXXABI().classifyReturnType(FI))
125       FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
126 
127     for (auto &I : FI.arguments())
128       I.info = classifyArgumentType(I.type);
129   }
130 
131   Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
132                     QualType Ty) const override;
133 };
134 
135 class AIXTargetCodeGenInfo : public TargetCodeGenInfo {
136   const bool Is64Bit;
137 
138 public:
139   AIXTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
140       : TargetCodeGenInfo(std::make_unique<AIXABIInfo>(CGT, Is64Bit)),
141         Is64Bit(Is64Bit) {}
142   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
143     return 1; // r1 is the dedicated stack pointer
144   }
145 
146   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
147                                llvm::Value *Address) const override;
148 };
149 } // namespace
150 
151 // Return true if the ABI requires Ty to be passed sign- or zero-
152 // extended to 32/64 bits.
153 bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const {
154   // Treat an enum type as its underlying type.
155   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
156     Ty = EnumTy->getDecl()->getIntegerType();
157 
158   // Promotable integer types are required to be promoted by the ABI.
159   if (getContext().isPromotableIntegerType(Ty))
160     return true;
161 
162   if (!Is64Bit)
163     return false;
164 
165   // For 64 bit mode, in addition to the usual promotable integer types, we also
166   // need to extend all 32-bit types, since the ABI requires promotion to 64
167   // bits.
168   if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
169     switch (BT->getKind()) {
170     case BuiltinType::Int:
171     case BuiltinType::UInt:
172       return true;
173     default:
174       break;
175     }
176 
177   return false;
178 }
179 
180 ABIArgInfo AIXABIInfo::classifyReturnType(QualType RetTy) const {
181   if (RetTy->isAnyComplexType())
182     return ABIArgInfo::getDirect();
183 
184   if (RetTy->isVectorType())
185     return ABIArgInfo::getDirect();
186 
187   if (RetTy->isVoidType())
188     return ABIArgInfo::getIgnore();
189 
190   if (isAggregateTypeForABI(RetTy))
191     return getNaturalAlignIndirect(RetTy);
192 
193   return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
194                                         : ABIArgInfo::getDirect());
195 }
196 
197 ABIArgInfo AIXABIInfo::classifyArgumentType(QualType Ty) const {
198   Ty = useFirstFieldIfTransparentUnion(Ty);
199 
200   if (Ty->isAnyComplexType())
201     return ABIArgInfo::getDirect();
202 
203   if (Ty->isVectorType())
204     return ABIArgInfo::getDirect();
205 
206   if (isAggregateTypeForABI(Ty)) {
207     // Records with non-trivial destructors/copy-constructors should not be
208     // passed by value.
209     if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
210       return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
211 
212     CharUnits CCAlign = getParamTypeAlignment(Ty);
213     CharUnits TyAlign = getContext().getTypeAlignInChars(Ty);
214 
215     return ABIArgInfo::getIndirect(CCAlign, /*ByVal*/ true,
216                                    /*Realign*/ TyAlign > CCAlign);
217   }
218 
219   return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
220                                      : ABIArgInfo::getDirect());
221 }
222 
223 CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const {
224   // Complex types are passed just like their elements.
225   if (const ComplexType *CTy = Ty->getAs<ComplexType>())
226     Ty = CTy->getElementType();
227 
228   if (Ty->isVectorType())
229     return CharUnits::fromQuantity(16);
230 
231   // If the structure contains a vector type, the alignment is 16.
232   if (isRecordWithSIMDVectorType(getContext(), Ty))
233     return CharUnits::fromQuantity(16);
234 
235   return CharUnits::fromQuantity(PtrByteSize);
236 }
237 
238 Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
239                               QualType Ty) const {
240 
241   auto TypeInfo = getContext().getTypeInfoInChars(Ty);
242   TypeInfo.Align = getParamTypeAlignment(Ty);
243 
244   CharUnits SlotSize = CharUnits::fromQuantity(PtrByteSize);
245 
246   // If we have a complex type and the base type is smaller than the register
247   // size, the ABI calls for the real and imaginary parts to be right-adjusted
248   // in separate words in 32bit mode or doublewords in 64bit mode. However,
249   // Clang expects us to produce a pointer to a structure with the two parts
250   // packed tightly. So generate loads of the real and imaginary parts relative
251   // to the va_list pointer, and store them to a temporary structure. We do the
252   // same as the PPC64ABI here.
253   if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
254     CharUnits EltSize = TypeInfo.Width / 2;
255     if (EltSize < SlotSize)
256       return complexTempStructure(CGF, VAListAddr, Ty, SlotSize, EltSize, CTy);
257   }
258 
259   return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
260                           SlotSize, /*AllowHigher*/ true);
261 }
262 
263 bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable(
264     CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const {
265   return PPC_initDwarfEHRegSizeTable(CGF, Address, Is64Bit, /*IsAIX*/ true);
266 }
267 
268 // PowerPC-32
269 namespace {
270 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
271 class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
272   bool IsSoftFloatABI;
273   bool IsRetSmallStructInRegABI;
274 
275   CharUnits getParamTypeAlignment(QualType Ty) const;
276 
277 public:
278   PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI,
279                      bool RetSmallStructInRegABI)
280       : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI),
281         IsRetSmallStructInRegABI(RetSmallStructInRegABI) {}
282 
283   ABIArgInfo classifyReturnType(QualType RetTy) const;
284 
285   void computeInfo(CGFunctionInfo &FI) const override {
286     if (!getCXXABI().classifyReturnType(FI))
287       FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
288     for (auto &I : FI.arguments())
289       I.info = classifyArgumentType(I.type);
290   }
291 
292   Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
293                     QualType Ty) const override;
294 };
295 
296 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
297 public:
298   PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI,
299                          bool RetSmallStructInRegABI)
300       : TargetCodeGenInfo(std::make_unique<PPC32_SVR4_ABIInfo>(
301             CGT, SoftFloatABI, RetSmallStructInRegABI)) {}
302 
303   static bool isStructReturnInRegABI(const llvm::Triple &Triple,
304                                      const CodeGenOptions &Opts);
305 
306   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
307     // This is recovered from gcc output.
308     return 1; // r1 is the dedicated stack pointer
309   }
310 
311   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
312                                llvm::Value *Address) const override;
313 };
314 }
315 
316 CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
317   // Complex types are passed just like their elements.
318   if (const ComplexType *CTy = Ty->getAs<ComplexType>())
319     Ty = CTy->getElementType();
320 
321   if (Ty->isVectorType())
322     return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16
323                                                                        : 4);
324 
325   // For single-element float/vector structs, we consider the whole type
326   // to have the same alignment requirements as its single element.
327   const Type *AlignTy = nullptr;
328   if (const Type *EltType = isSingleElementStruct(Ty, getContext())) {
329     const BuiltinType *BT = EltType->getAs<BuiltinType>();
330     if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
331         (BT && BT->isFloatingPoint()))
332       AlignTy = EltType;
333   }
334 
335   if (AlignTy)
336     return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4);
337   return CharUnits::fromQuantity(4);
338 }
339 
340 ABIArgInfo PPC32_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
341   uint64_t Size;
342 
343   // -msvr4-struct-return puts small aggregates in GPR3 and GPR4.
344   if (isAggregateTypeForABI(RetTy) && IsRetSmallStructInRegABI &&
345       (Size = getContext().getTypeSize(RetTy)) <= 64) {
346     // System V ABI (1995), page 3-22, specified:
347     // > A structure or union whose size is less than or equal to 8 bytes
348     // > shall be returned in r3 and r4, as if it were first stored in the
349     // > 8-byte aligned memory area and then the low addressed word were
350     // > loaded into r3 and the high-addressed word into r4.  Bits beyond
351     // > the last member of the structure or union are not defined.
352     //
353     // GCC for big-endian PPC32 inserts the pad before the first member,
354     // not "beyond the last member" of the struct.  To stay compatible
355     // with GCC, we coerce the struct to an integer of the same size.
356     // LLVM will extend it and return i32 in r3, or i64 in r3:r4.
357     if (Size == 0)
358       return ABIArgInfo::getIgnore();
359     else {
360       llvm::Type *CoerceTy = llvm::Type::getIntNTy(getVMContext(), Size);
361       return ABIArgInfo::getDirect(CoerceTy);
362     }
363   }
364 
365   return DefaultABIInfo::classifyReturnType(RetTy);
366 }
367 
368 // TODO: this implementation is now likely redundant with
369 // DefaultABIInfo::EmitVAArg.
370 Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
371                                       QualType Ty) const {
372   if (getTarget().getTriple().isOSDarwin()) {
373     auto TI = getContext().getTypeInfoInChars(Ty);
374     TI.Align = getParamTypeAlignment(Ty);
375 
376     CharUnits SlotSize = CharUnits::fromQuantity(4);
377     return emitVoidPtrVAArg(CGF, VAList, Ty,
378                             classifyArgumentType(Ty).isIndirect(), TI, SlotSize,
379                             /*AllowHigherAlign=*/true);
380   }
381 
382   const unsigned OverflowLimit = 8;
383   if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
384     // TODO: Implement this. For now ignore.
385     (void)CTy;
386     return Address::invalid(); // FIXME?
387   }
388 
389   // struct __va_list_tag {
390   //   unsigned char gpr;
391   //   unsigned char fpr;
392   //   unsigned short reserved;
393   //   void *overflow_arg_area;
394   //   void *reg_save_area;
395   // };
396 
397   bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
398   bool isInt = !Ty->isFloatingType();
399   bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
400 
401   // All aggregates are passed indirectly?  That doesn't seem consistent
402   // with the argument-lowering code.
403   bool isIndirect = isAggregateTypeForABI(Ty);
404 
405   CGBuilderTy &Builder = CGF.Builder;
406 
407   // The calling convention either uses 1-2 GPRs or 1 FPR.
408   Address NumRegsAddr = Address::invalid();
409   if (isInt || IsSoftFloatABI) {
410     NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr");
411   } else {
412     NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr");
413   }
414 
415   llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
416 
417   // "Align" the register count when TY is i64.
418   if (isI64 || (isF64 && IsSoftFloatABI)) {
419     NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
420     NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
421   }
422 
423   llvm::Value *CC =
424       Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond");
425 
426   llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
427   llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
428   llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
429 
430   Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
431 
432   llvm::Type *DirectTy = CGF.ConvertType(Ty), *ElementTy = DirectTy;
433   if (isIndirect)
434     DirectTy = CGF.UnqualPtrTy;
435 
436   // Case 1: consume registers.
437   Address RegAddr = Address::invalid();
438   {
439     CGF.EmitBlock(UsingRegs);
440 
441     Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4);
442     RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr), CGF.Int8Ty,
443                       CharUnits::fromQuantity(8));
444     assert(RegAddr.getElementType() == CGF.Int8Ty);
445 
446     // Floating-point registers start after the general-purpose registers.
447     if (!(isInt || IsSoftFloatABI)) {
448       RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
449                                                    CharUnits::fromQuantity(32));
450     }
451 
452     // Get the address of the saved value by scaling the number of
453     // registers we've used by the number of
454     CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
455     llvm::Value *RegOffset =
456         Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
457     RegAddr = Address(
458         Builder.CreateInBoundsGEP(CGF.Int8Ty, RegAddr.getPointer(), RegOffset),
459         DirectTy, RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
460 
461     // Increase the used-register count.
462     NumRegs =
463       Builder.CreateAdd(NumRegs,
464                         Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
465     Builder.CreateStore(NumRegs, NumRegsAddr);
466 
467     CGF.EmitBranch(Cont);
468   }
469 
470   // Case 2: consume space in the overflow area.
471   Address MemAddr = Address::invalid();
472   {
473     CGF.EmitBlock(UsingOverflow);
474 
475     Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
476 
477     // Everything in the overflow area is rounded up to a size of at least 4.
478     CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
479 
480     CharUnits Size;
481     if (!isIndirect) {
482       auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
483       Size = TypeInfo.Width.alignTo(OverflowAreaAlign);
484     } else {
485       Size = CGF.getPointerSize();
486     }
487 
488     Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3);
489     Address OverflowArea =
490         Address(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"), CGF.Int8Ty,
491                 OverflowAreaAlign);
492     // Round up address of argument to alignment
493     CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
494     if (Align > OverflowAreaAlign) {
495       llvm::Value *Ptr = OverflowArea.getPointer();
496       OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
497                              OverflowArea.getElementType(), Align);
498     }
499 
500     MemAddr = OverflowArea.withElementType(DirectTy);
501 
502     // Increase the overflow area.
503     OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
504     Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
505     CGF.EmitBranch(Cont);
506   }
507 
508   CGF.EmitBlock(Cont);
509 
510   // Merge the cases with a phi.
511   Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
512                                 "vaarg.addr");
513 
514   // Load the pointer if the argument was passed indirectly.
515   if (isIndirect) {
516     Result = Address(Builder.CreateLoad(Result, "aggr"), ElementTy,
517                      getContext().getTypeAlignInChars(Ty));
518   }
519 
520   return Result;
521 }
522 
523 bool PPC32TargetCodeGenInfo::isStructReturnInRegABI(
524     const llvm::Triple &Triple, const CodeGenOptions &Opts) {
525   assert(Triple.isPPC32());
526 
527   switch (Opts.getStructReturnConvention()) {
528   case CodeGenOptions::SRCK_Default:
529     break;
530   case CodeGenOptions::SRCK_OnStack: // -maix-struct-return
531     return false;
532   case CodeGenOptions::SRCK_InRegs: // -msvr4-struct-return
533     return true;
534   }
535 
536   if (Triple.isOSBinFormatELF() && !Triple.isOSLinux())
537     return true;
538 
539   return false;
540 }
541 
542 bool
543 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
544                                                 llvm::Value *Address) const {
545   return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ false,
546                                      /*IsAIX*/ false);
547 }
548 
549 // PowerPC-64
550 
551 namespace {
552 
553 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
554 class PPC64_SVR4_ABIInfo : public ABIInfo {
555   static const unsigned GPRBits = 64;
556   PPC64_SVR4_ABIKind Kind;
557   bool IsSoftFloatABI;
558 
559 public:
560   PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, PPC64_SVR4_ABIKind Kind,
561                      bool SoftFloatABI)
562       : ABIInfo(CGT), Kind(Kind), IsSoftFloatABI(SoftFloatABI) {}
563 
564   bool isPromotableTypeForABI(QualType Ty) const;
565   CharUnits getParamTypeAlignment(QualType Ty) const;
566 
567   ABIArgInfo classifyReturnType(QualType RetTy) const;
568   ABIArgInfo classifyArgumentType(QualType Ty) const;
569 
570   bool isHomogeneousAggregateBaseType(QualType Ty) const override;
571   bool isHomogeneousAggregateSmallEnough(const Type *Ty,
572                                          uint64_t Members) const override;
573 
574   // TODO: We can add more logic to computeInfo to improve performance.
575   // Example: For aggregate arguments that fit in a register, we could
576   // use getDirectInReg (as is done below for structs containing a single
577   // floating-point value) to avoid pushing them to memory on function
578   // entry.  This would require changing the logic in PPCISelLowering
579   // when lowering the parameters in the caller and args in the callee.
580   void computeInfo(CGFunctionInfo &FI) const override {
581     if (!getCXXABI().classifyReturnType(FI))
582       FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
583     for (auto &I : FI.arguments()) {
584       // We rely on the default argument classification for the most part.
585       // One exception:  An aggregate containing a single floating-point
586       // or vector item must be passed in a register if one is available.
587       const Type *T = isSingleElementStruct(I.type, getContext());
588       if (T) {
589         const BuiltinType *BT = T->getAs<BuiltinType>();
590         if ((T->isVectorType() && getContext().getTypeSize(T) == 128) ||
591             (BT && BT->isFloatingPoint())) {
592           QualType QT(T, 0);
593           I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
594           continue;
595         }
596       }
597       I.info = classifyArgumentType(I.type);
598     }
599   }
600 
601   Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
602                     QualType Ty) const override;
603 };
604 
605 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
606 
607 public:
608   PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT, PPC64_SVR4_ABIKind Kind,
609                                bool SoftFloatABI)
610       : TargetCodeGenInfo(
611             std::make_unique<PPC64_SVR4_ABIInfo>(CGT, Kind, SoftFloatABI)) {
612     SwiftInfo =
613         std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/false);
614   }
615 
616   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
617     // This is recovered from gcc output.
618     return 1; // r1 is the dedicated stack pointer
619   }
620 
621   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
622                                llvm::Value *Address) const override;
623   void emitTargetMetadata(CodeGen::CodeGenModule &CGM,
624                           const llvm::MapVector<GlobalDecl, StringRef>
625                               &MangledDeclNames) const override;
626 };
627 
628 class PPC64TargetCodeGenInfo : public TargetCodeGenInfo {
629 public:
630   PPC64TargetCodeGenInfo(CodeGenTypes &CGT)
631       : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
632 
633   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
634     // This is recovered from gcc output.
635     return 1; // r1 is the dedicated stack pointer
636   }
637 
638   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
639                                llvm::Value *Address) const override;
640 };
641 }
642 
643 // Return true if the ABI requires Ty to be passed sign- or zero-
644 // extended to 64 bits.
645 bool
646 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
647   // Treat an enum type as its underlying type.
648   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
649     Ty = EnumTy->getDecl()->getIntegerType();
650 
651   // Promotable integer types are required to be promoted by the ABI.
652   if (isPromotableIntegerTypeForABI(Ty))
653     return true;
654 
655   // In addition to the usual promotable integer types, we also need to
656   // extend all 32-bit types, since the ABI requires promotion to 64 bits.
657   if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
658     switch (BT->getKind()) {
659     case BuiltinType::Int:
660     case BuiltinType::UInt:
661       return true;
662     default:
663       break;
664     }
665 
666   if (const auto *EIT = Ty->getAs<BitIntType>())
667     if (EIT->getNumBits() < 64)
668       return true;
669 
670   return false;
671 }
672 
673 /// isAlignedParamType - Determine whether a type requires 16-byte or
674 /// higher alignment in the parameter area.  Always returns at least 8.
675 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
676   // Complex types are passed just like their elements.
677   if (const ComplexType *CTy = Ty->getAs<ComplexType>())
678     Ty = CTy->getElementType();
679 
680   auto FloatUsesVector = [this](QualType Ty){
681     return Ty->isRealFloatingType() && &getContext().getFloatTypeSemantics(
682                                            Ty) == &llvm::APFloat::IEEEquad();
683   };
684 
685   // Only vector types of size 16 bytes need alignment (larger types are
686   // passed via reference, smaller types are not aligned).
687   if (Ty->isVectorType()) {
688     return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
689   } else if (FloatUsesVector(Ty)) {
690     // According to ABI document section 'Optional Save Areas': If extended
691     // precision floating-point values in IEEE BINARY 128 QUADRUPLE PRECISION
692     // format are supported, map them to a single quadword, quadword aligned.
693     return CharUnits::fromQuantity(16);
694   }
695 
696   // For single-element float/vector structs, we consider the whole type
697   // to have the same alignment requirements as its single element.
698   const Type *AlignAsType = nullptr;
699   const Type *EltType = isSingleElementStruct(Ty, getContext());
700   if (EltType) {
701     const BuiltinType *BT = EltType->getAs<BuiltinType>();
702     if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
703         (BT && BT->isFloatingPoint()))
704       AlignAsType = EltType;
705   }
706 
707   // Likewise for ELFv2 homogeneous aggregates.
708   const Type *Base = nullptr;
709   uint64_t Members = 0;
710   if (!AlignAsType && Kind == PPC64_SVR4_ABIKind::ELFv2 &&
711       isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
712     AlignAsType = Base;
713 
714   // With special case aggregates, only vector base types need alignment.
715   if (AlignAsType) {
716     bool UsesVector = AlignAsType->isVectorType() ||
717                       FloatUsesVector(QualType(AlignAsType, 0));
718     return CharUnits::fromQuantity(UsesVector ? 16 : 8);
719   }
720 
721   // Otherwise, we only need alignment for any aggregate type that
722   // has an alignment requirement of >= 16 bytes.
723   if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
724     return CharUnits::fromQuantity(16);
725   }
726 
727   return CharUnits::fromQuantity(8);
728 }
729 
730 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
731   // Homogeneous aggregates for ELFv2 must have base types of float,
732   // double, long double, or 128-bit vectors.
733   if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
734     if (BT->getKind() == BuiltinType::Float ||
735         BT->getKind() == BuiltinType::Double ||
736         BT->getKind() == BuiltinType::LongDouble ||
737         BT->getKind() == BuiltinType::Ibm128 ||
738         (getContext().getTargetInfo().hasFloat128Type() &&
739          (BT->getKind() == BuiltinType::Float128))) {
740       if (IsSoftFloatABI)
741         return false;
742       return true;
743     }
744   }
745   if (const VectorType *VT = Ty->getAs<VectorType>()) {
746     if (getContext().getTypeSize(VT) == 128)
747       return true;
748   }
749   return false;
750 }
751 
752 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
753     const Type *Base, uint64_t Members) const {
754   // Vector and fp128 types require one register, other floating point types
755   // require one or two registers depending on their size.
756   uint32_t NumRegs =
757       ((getContext().getTargetInfo().hasFloat128Type() &&
758           Base->isFloat128Type()) ||
759         Base->isVectorType()) ? 1
760                               : (getContext().getTypeSize(Base) + 63) / 64;
761 
762   // Homogeneous Aggregates may occupy at most 8 registers.
763   return Members * NumRegs <= 8;
764 }
765 
766 ABIArgInfo
767 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
768   Ty = useFirstFieldIfTransparentUnion(Ty);
769 
770   if (Ty->isAnyComplexType())
771     return ABIArgInfo::getDirect();
772 
773   // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
774   // or via reference (larger than 16 bytes).
775   if (Ty->isVectorType()) {
776     uint64_t Size = getContext().getTypeSize(Ty);
777     if (Size > 128)
778       return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
779     else if (Size < 128) {
780       llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
781       return ABIArgInfo::getDirect(CoerceTy);
782     }
783   }
784 
785   if (const auto *EIT = Ty->getAs<BitIntType>())
786     if (EIT->getNumBits() > 128)
787       return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
788 
789   if (isAggregateTypeForABI(Ty)) {
790     if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
791       return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
792 
793     uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
794     uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
795 
796     // ELFv2 homogeneous aggregates are passed as array types.
797     const Type *Base = nullptr;
798     uint64_t Members = 0;
799     if (Kind == PPC64_SVR4_ABIKind::ELFv2 &&
800         isHomogeneousAggregate(Ty, Base, Members)) {
801       llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
802       llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
803       return ABIArgInfo::getDirect(CoerceTy);
804     }
805 
806     // If an aggregate may end up fully in registers, we do not
807     // use the ByVal method, but pass the aggregate as array.
808     // This is usually beneficial since we avoid forcing the
809     // back-end to store the argument to memory.
810     uint64_t Bits = getContext().getTypeSize(Ty);
811     if (Bits > 0 && Bits <= 8 * GPRBits) {
812       llvm::Type *CoerceTy;
813 
814       // Types up to 8 bytes are passed as integer type (which will be
815       // properly aligned in the argument save area doubleword).
816       if (Bits <= GPRBits)
817         CoerceTy =
818             llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
819       // Larger types are passed as arrays, with the base type selected
820       // according to the required alignment in the save area.
821       else {
822         uint64_t RegBits = ABIAlign * 8;
823         uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
824         llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
825         CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
826       }
827 
828       return ABIArgInfo::getDirect(CoerceTy);
829     }
830 
831     // All other aggregates are passed ByVal.
832     return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
833                                    /*ByVal=*/true,
834                                    /*Realign=*/TyAlign > ABIAlign);
835   }
836 
837   return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
838                                      : ABIArgInfo::getDirect());
839 }
840 
841 ABIArgInfo
842 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
843   if (RetTy->isVoidType())
844     return ABIArgInfo::getIgnore();
845 
846   if (RetTy->isAnyComplexType())
847     return ABIArgInfo::getDirect();
848 
849   // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
850   // or via reference (larger than 16 bytes).
851   if (RetTy->isVectorType()) {
852     uint64_t Size = getContext().getTypeSize(RetTy);
853     if (Size > 128)
854       return getNaturalAlignIndirect(RetTy);
855     else if (Size < 128) {
856       llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
857       return ABIArgInfo::getDirect(CoerceTy);
858     }
859   }
860 
861   if (const auto *EIT = RetTy->getAs<BitIntType>())
862     if (EIT->getNumBits() > 128)
863       return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
864 
865   if (isAggregateTypeForABI(RetTy)) {
866     // ELFv2 homogeneous aggregates are returned as array types.
867     const Type *Base = nullptr;
868     uint64_t Members = 0;
869     if (Kind == PPC64_SVR4_ABIKind::ELFv2 &&
870         isHomogeneousAggregate(RetTy, Base, Members)) {
871       llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
872       llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
873       return ABIArgInfo::getDirect(CoerceTy);
874     }
875 
876     // ELFv2 small aggregates are returned in up to two registers.
877     uint64_t Bits = getContext().getTypeSize(RetTy);
878     if (Kind == PPC64_SVR4_ABIKind::ELFv2 && Bits <= 2 * GPRBits) {
879       if (Bits == 0)
880         return ABIArgInfo::getIgnore();
881 
882       llvm::Type *CoerceTy;
883       if (Bits > GPRBits) {
884         CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
885         CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
886       } else
887         CoerceTy =
888             llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
889       return ABIArgInfo::getDirect(CoerceTy);
890     }
891 
892     // All other aggregates are returned indirectly.
893     return getNaturalAlignIndirect(RetTy);
894   }
895 
896   return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
897                                         : ABIArgInfo::getDirect());
898 }
899 
900 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
901 Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
902                                       QualType Ty) const {
903   auto TypeInfo = getContext().getTypeInfoInChars(Ty);
904   TypeInfo.Align = getParamTypeAlignment(Ty);
905 
906   CharUnits SlotSize = CharUnits::fromQuantity(8);
907 
908   // If we have a complex type and the base type is smaller than 8 bytes,
909   // the ABI calls for the real and imaginary parts to be right-adjusted
910   // in separate doublewords.  However, Clang expects us to produce a
911   // pointer to a structure with the two parts packed tightly.  So generate
912   // loads of the real and imaginary parts relative to the va_list pointer,
913   // and store them to a temporary structure.
914   if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
915     CharUnits EltSize = TypeInfo.Width / 2;
916     if (EltSize < SlotSize)
917       return complexTempStructure(CGF, VAListAddr, Ty, SlotSize, EltSize, CTy);
918   }
919 
920   // Otherwise, just use the general rule.
921   //
922   // The PPC64 ABI passes some arguments in integer registers, even to variadic
923   // functions. To allow va_list to use the simple "void*" representation,
924   // variadic calls allocate space in the argument area for the integer argument
925   // registers, and variadic functions spill their integer argument registers to
926   // this area in their prologues. When aggregates smaller than a register are
927   // passed this way, they are passed in the least significant bits of the
928   // register, which means that after spilling on big-endian targets they will
929   // be right-aligned in their argument slot. This is uncommon; for a variety of
930   // reasons, other big-endian targets don't end up right-aligning aggregate
931   // types this way, and so right-alignment only applies to fundamental types.
932   // So on PPC64, we must force the use of right-alignment even for aggregates.
933   return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
934                           SlotSize, /*AllowHigher*/ true,
935                           /*ForceRightAdjust*/ true);
936 }
937 
938 bool
939 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
940   CodeGen::CodeGenFunction &CGF,
941   llvm::Value *Address) const {
942   return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
943                                      /*IsAIX*/ false);
944 }
945 
946 void PPC64_SVR4_TargetCodeGenInfo::emitTargetMetadata(
947     CodeGen::CodeGenModule &CGM,
948     const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const {
949   if (CGM.getTypes().isLongDoubleReferenced()) {
950     llvm::LLVMContext &Ctx = CGM.getLLVMContext();
951     const auto *flt = &CGM.getTarget().getLongDoubleFormat();
952     if (flt == &llvm::APFloat::PPCDoubleDouble())
953       CGM.getModule().addModuleFlag(llvm::Module::Error, "float-abi",
954                                     llvm::MDString::get(Ctx, "doubledouble"));
955     else if (flt == &llvm::APFloat::IEEEquad())
956       CGM.getModule().addModuleFlag(llvm::Module::Error, "float-abi",
957                                     llvm::MDString::get(Ctx, "ieeequad"));
958     else if (flt == &llvm::APFloat::IEEEdouble())
959       CGM.getModule().addModuleFlag(llvm::Module::Error, "float-abi",
960                                     llvm::MDString::get(Ctx, "ieeedouble"));
961   }
962 }
963 
964 bool
965 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
966                                                 llvm::Value *Address) const {
967   return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
968                                      /*IsAIX*/ false);
969 }
970 
971 std::unique_ptr<TargetCodeGenInfo>
972 CodeGen::createAIXTargetCodeGenInfo(CodeGenModule &CGM, bool Is64Bit) {
973   return std::make_unique<AIXTargetCodeGenInfo>(CGM.getTypes(), Is64Bit);
974 }
975 
976 std::unique_ptr<TargetCodeGenInfo>
977 CodeGen::createPPC32TargetCodeGenInfo(CodeGenModule &CGM, bool SoftFloatABI) {
978   bool RetSmallStructInRegABI = PPC32TargetCodeGenInfo::isStructReturnInRegABI(
979       CGM.getTriple(), CGM.getCodeGenOpts());
980   return std::make_unique<PPC32TargetCodeGenInfo>(CGM.getTypes(), SoftFloatABI,
981                                                   RetSmallStructInRegABI);
982 }
983 
984 std::unique_ptr<TargetCodeGenInfo>
985 CodeGen::createPPC64TargetCodeGenInfo(CodeGenModule &CGM) {
986   return std::make_unique<PPC64TargetCodeGenInfo>(CGM.getTypes());
987 }
988 
989 std::unique_ptr<TargetCodeGenInfo> CodeGen::createPPC64_SVR4_TargetCodeGenInfo(
990     CodeGenModule &CGM, PPC64_SVR4_ABIKind Kind, bool SoftFloatABI) {
991   return std::make_unique<PPC64_SVR4_TargetCodeGenInfo>(CGM.getTypes(), Kind,
992                                                         SoftFloatABI);
993 }
994