xref: /freebsd/contrib/llvm-project/clang/lib/CodeGen/Targets/PPC.cpp (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 //===- PPC.cpp ------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ABIInfoImpl.h"
10 #include "TargetInfo.h"
11 
12 using namespace clang;
13 using namespace clang::CodeGen;
14 
15 static Address complexTempStructure(CodeGenFunction &CGF, Address VAListAddr,
16                                     QualType Ty, CharUnits SlotSize,
17                                     CharUnits EltSize, const ComplexType *CTy) {
18   Address Addr =
19       emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty, SlotSize * 2,
20                              SlotSize, SlotSize, /*AllowHigher*/ true);
21 
22   Address RealAddr = Addr;
23   Address ImagAddr = RealAddr;
24   if (CGF.CGM.getDataLayout().isBigEndian()) {
25     RealAddr =
26         CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize - EltSize);
27     ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
28                                                       2 * SlotSize - EltSize);
29   } else {
30     ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
31   }
32 
33   llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
34   RealAddr = RealAddr.withElementType(EltTy);
35   ImagAddr = ImagAddr.withElementType(EltTy);
36   llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
37   llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
38 
39   Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
40   CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
41                          /*init*/ true);
42   return Temp;
43 }
44 
45 static bool PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
46                                         llvm::Value *Address, bool Is64Bit,
47                                         bool IsAIX) {
48   // This is calculated from the LLVM and GCC tables and verified
49   // against gcc output.  AFAIK all PPC ABIs use the same encoding.
50 
51   CodeGen::CGBuilderTy &Builder = CGF.Builder;
52 
53   llvm::IntegerType *i8 = CGF.Int8Ty;
54   llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
55   llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
56   llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
57 
58   // 0-31: r0-31, the 4-byte or 8-byte general-purpose registers
59   AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 0, 31);
60 
61   // 32-63: fp0-31, the 8-byte floating-point registers
62   AssignToArrayRange(Builder, Address, Eight8, 32, 63);
63 
64   // 64-67 are various 4-byte or 8-byte special-purpose registers:
65   // 64: mq
66   // 65: lr
67   // 66: ctr
68   // 67: ap
69   AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 64, 67);
70 
71   // 68-76 are various 4-byte special-purpose registers:
72   // 68-75 cr0-7
73   // 76: xer
74   AssignToArrayRange(Builder, Address, Four8, 68, 76);
75 
76   // 77-108: v0-31, the 16-byte vector registers
77   AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
78 
79   // 109: vrsave
80   // 110: vscr
81   AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 109, 110);
82 
83   // AIX does not utilize the rest of the registers.
84   if (IsAIX)
85     return false;
86 
87   // 111: spe_acc
88   // 112: spefscr
89   // 113: sfp
90   AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 111, 113);
91 
92   if (!Is64Bit)
93     return false;
94 
95   // TODO: Need to verify if these registers are used on 64 bit AIX with Power8
96   // or above CPU.
97   // 64-bit only registers:
98   // 114: tfhar
99   // 115: tfiar
100   // 116: texasr
101   AssignToArrayRange(Builder, Address, Eight8, 114, 116);
102 
103   return false;
104 }
105 
106 // AIX
107 namespace {
108 /// AIXABIInfo - The AIX XCOFF ABI information.
109 class AIXABIInfo : public ABIInfo {
110   const bool Is64Bit;
111   const unsigned PtrByteSize;
112   CharUnits getParamTypeAlignment(QualType Ty) const;
113 
114 public:
115   AIXABIInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
116       : ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {}
117 
118   bool isPromotableTypeForABI(QualType Ty) const;
119 
120   ABIArgInfo classifyReturnType(QualType RetTy) const;
121   ABIArgInfo classifyArgumentType(QualType Ty) const;
122 
123   void computeInfo(CGFunctionInfo &FI) const override {
124     if (!getCXXABI().classifyReturnType(FI))
125       FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
126 
127     for (auto &I : FI.arguments())
128       I.info = classifyArgumentType(I.type);
129   }
130 
131   Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
132                     QualType Ty) const override;
133 };
134 
135 class AIXTargetCodeGenInfo : public TargetCodeGenInfo {
136   const bool Is64Bit;
137 
138 public:
139   AIXTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
140       : TargetCodeGenInfo(std::make_unique<AIXABIInfo>(CGT, Is64Bit)),
141         Is64Bit(Is64Bit) {}
142   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
143     return 1; // r1 is the dedicated stack pointer
144   }
145 
146   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
147                                llvm::Value *Address) const override;
148 };
149 } // namespace
150 
151 // Return true if the ABI requires Ty to be passed sign- or zero-
152 // extended to 32/64 bits.
153 bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const {
154   // Treat an enum type as its underlying type.
155   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
156     Ty = EnumTy->getDecl()->getIntegerType();
157 
158   // Promotable integer types are required to be promoted by the ABI.
159   if (getContext().isPromotableIntegerType(Ty))
160     return true;
161 
162   if (!Is64Bit)
163     return false;
164 
165   // For 64 bit mode, in addition to the usual promotable integer types, we also
166   // need to extend all 32-bit types, since the ABI requires promotion to 64
167   // bits.
168   if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
169     switch (BT->getKind()) {
170     case BuiltinType::Int:
171     case BuiltinType::UInt:
172       return true;
173     default:
174       break;
175     }
176 
177   return false;
178 }
179 
180 ABIArgInfo AIXABIInfo::classifyReturnType(QualType RetTy) const {
181   if (RetTy->isAnyComplexType())
182     return ABIArgInfo::getDirect();
183 
184   if (RetTy->isVectorType())
185     return ABIArgInfo::getDirect();
186 
187   if (RetTy->isVoidType())
188     return ABIArgInfo::getIgnore();
189 
190   if (isAggregateTypeForABI(RetTy))
191     return getNaturalAlignIndirect(RetTy);
192 
193   return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
194                                         : ABIArgInfo::getDirect());
195 }
196 
197 ABIArgInfo AIXABIInfo::classifyArgumentType(QualType Ty) const {
198   Ty = useFirstFieldIfTransparentUnion(Ty);
199 
200   if (Ty->isAnyComplexType())
201     return ABIArgInfo::getDirect();
202 
203   if (Ty->isVectorType())
204     return ABIArgInfo::getDirect();
205 
206   if (isAggregateTypeForABI(Ty)) {
207     // Records with non-trivial destructors/copy-constructors should not be
208     // passed by value.
209     if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
210       return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
211 
212     CharUnits CCAlign = getParamTypeAlignment(Ty);
213     CharUnits TyAlign = getContext().getTypeAlignInChars(Ty);
214 
215     return ABIArgInfo::getIndirect(CCAlign, /*ByVal*/ true,
216                                    /*Realign*/ TyAlign > CCAlign);
217   }
218 
219   return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
220                                      : ABIArgInfo::getDirect());
221 }
222 
223 CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const {
224   // Complex types are passed just like their elements.
225   if (const ComplexType *CTy = Ty->getAs<ComplexType>())
226     Ty = CTy->getElementType();
227 
228   if (Ty->isVectorType())
229     return CharUnits::fromQuantity(16);
230 
231   // If the structure contains a vector type, the alignment is 16.
232   if (isRecordWithSIMDVectorType(getContext(), Ty))
233     return CharUnits::fromQuantity(16);
234 
235   return CharUnits::fromQuantity(PtrByteSize);
236 }
237 
238 Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
239                               QualType Ty) const {
240 
241   auto TypeInfo = getContext().getTypeInfoInChars(Ty);
242   TypeInfo.Align = getParamTypeAlignment(Ty);
243 
244   CharUnits SlotSize = CharUnits::fromQuantity(PtrByteSize);
245 
246   // If we have a complex type and the base type is smaller than the register
247   // size, the ABI calls for the real and imaginary parts to be right-adjusted
248   // in separate words in 32bit mode or doublewords in 64bit mode. However,
249   // Clang expects us to produce a pointer to a structure with the two parts
250   // packed tightly. So generate loads of the real and imaginary parts relative
251   // to the va_list pointer, and store them to a temporary structure. We do the
252   // same as the PPC64ABI here.
253   if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
254     CharUnits EltSize = TypeInfo.Width / 2;
255     if (EltSize < SlotSize)
256       return complexTempStructure(CGF, VAListAddr, Ty, SlotSize, EltSize, CTy);
257   }
258 
259   return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
260                           SlotSize, /*AllowHigher*/ true);
261 }
262 
263 bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable(
264     CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const {
265   return PPC_initDwarfEHRegSizeTable(CGF, Address, Is64Bit, /*IsAIX*/ true);
266 }
267 
268 // PowerPC-32
269 namespace {
270 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
271 class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
272   bool IsSoftFloatABI;
273   bool IsRetSmallStructInRegABI;
274 
275   CharUnits getParamTypeAlignment(QualType Ty) const;
276 
277 public:
278   PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI,
279                      bool RetSmallStructInRegABI)
280       : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI),
281         IsRetSmallStructInRegABI(RetSmallStructInRegABI) {}
282 
283   ABIArgInfo classifyReturnType(QualType RetTy) const;
284 
285   void computeInfo(CGFunctionInfo &FI) const override {
286     if (!getCXXABI().classifyReturnType(FI))
287       FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
288     for (auto &I : FI.arguments())
289       I.info = classifyArgumentType(I.type);
290   }
291 
292   Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
293                     QualType Ty) const override;
294 };
295 
296 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
297 public:
298   PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI,
299                          bool RetSmallStructInRegABI)
300       : TargetCodeGenInfo(std::make_unique<PPC32_SVR4_ABIInfo>(
301             CGT, SoftFloatABI, RetSmallStructInRegABI)) {}
302 
303   static bool isStructReturnInRegABI(const llvm::Triple &Triple,
304                                      const CodeGenOptions &Opts);
305 
306   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
307     // This is recovered from gcc output.
308     return 1; // r1 is the dedicated stack pointer
309   }
310 
311   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
312                                llvm::Value *Address) const override;
313 };
314 }
315 
316 CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
317   // Complex types are passed just like their elements.
318   if (const ComplexType *CTy = Ty->getAs<ComplexType>())
319     Ty = CTy->getElementType();
320 
321   if (Ty->isVectorType())
322     return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16
323                                                                        : 4);
324 
325   // For single-element float/vector structs, we consider the whole type
326   // to have the same alignment requirements as its single element.
327   const Type *AlignTy = nullptr;
328   if (const Type *EltType = isSingleElementStruct(Ty, getContext())) {
329     const BuiltinType *BT = EltType->getAs<BuiltinType>();
330     if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
331         (BT && BT->isFloatingPoint()))
332       AlignTy = EltType;
333   }
334 
335   if (AlignTy)
336     return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4);
337   return CharUnits::fromQuantity(4);
338 }
339 
340 ABIArgInfo PPC32_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
341   uint64_t Size;
342 
343   // -msvr4-struct-return puts small aggregates in GPR3 and GPR4.
344   if (isAggregateTypeForABI(RetTy) && IsRetSmallStructInRegABI &&
345       (Size = getContext().getTypeSize(RetTy)) <= 64) {
346     // System V ABI (1995), page 3-22, specified:
347     // > A structure or union whose size is less than or equal to 8 bytes
348     // > shall be returned in r3 and r4, as if it were first stored in the
349     // > 8-byte aligned memory area and then the low addressed word were
350     // > loaded into r3 and the high-addressed word into r4.  Bits beyond
351     // > the last member of the structure or union are not defined.
352     //
353     // GCC for big-endian PPC32 inserts the pad before the first member,
354     // not "beyond the last member" of the struct.  To stay compatible
355     // with GCC, we coerce the struct to an integer of the same size.
356     // LLVM will extend it and return i32 in r3, or i64 in r3:r4.
357     if (Size == 0)
358       return ABIArgInfo::getIgnore();
359     else {
360       llvm::Type *CoerceTy = llvm::Type::getIntNTy(getVMContext(), Size);
361       return ABIArgInfo::getDirect(CoerceTy);
362     }
363   }
364 
365   return DefaultABIInfo::classifyReturnType(RetTy);
366 }
367 
368 // TODO: this implementation is now likely redundant with
369 // DefaultABIInfo::EmitVAArg.
370 Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
371                                       QualType Ty) const {
372   if (getTarget().getTriple().isOSDarwin()) {
373     auto TI = getContext().getTypeInfoInChars(Ty);
374     TI.Align = getParamTypeAlignment(Ty);
375 
376     CharUnits SlotSize = CharUnits::fromQuantity(4);
377     return emitVoidPtrVAArg(CGF, VAList, Ty,
378                             classifyArgumentType(Ty).isIndirect(), TI, SlotSize,
379                             /*AllowHigherAlign=*/true);
380   }
381 
382   const unsigned OverflowLimit = 8;
383   if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
384     // TODO: Implement this. For now ignore.
385     (void)CTy;
386     return Address::invalid(); // FIXME?
387   }
388 
389   // struct __va_list_tag {
390   //   unsigned char gpr;
391   //   unsigned char fpr;
392   //   unsigned short reserved;
393   //   void *overflow_arg_area;
394   //   void *reg_save_area;
395   // };
396 
397   bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
398   bool isInt = !Ty->isFloatingType();
399   bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
400 
401   // All aggregates are passed indirectly?  That doesn't seem consistent
402   // with the argument-lowering code.
403   bool isIndirect = isAggregateTypeForABI(Ty);
404 
405   CGBuilderTy &Builder = CGF.Builder;
406 
407   // The calling convention either uses 1-2 GPRs or 1 FPR.
408   Address NumRegsAddr = Address::invalid();
409   if (isInt || IsSoftFloatABI) {
410     NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr");
411   } else {
412     NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr");
413   }
414 
415   llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
416 
417   // "Align" the register count when TY is i64.
418   if (isI64 || (isF64 && IsSoftFloatABI)) {
419     NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
420     NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
421   }
422 
423   llvm::Value *CC =
424       Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond");
425 
426   llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
427   llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
428   llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
429 
430   Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
431 
432   llvm::Type *DirectTy = CGF.ConvertType(Ty), *ElementTy = DirectTy;
433   if (isIndirect)
434     DirectTy = llvm::PointerType::getUnqual(CGF.getLLVMContext());
435 
436   // Case 1: consume registers.
437   Address RegAddr = Address::invalid();
438   {
439     CGF.EmitBlock(UsingRegs);
440 
441     Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4);
442     RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr), CGF.Int8Ty,
443                       CharUnits::fromQuantity(8));
444     assert(RegAddr.getElementType() == CGF.Int8Ty);
445 
446     // Floating-point registers start after the general-purpose registers.
447     if (!(isInt || IsSoftFloatABI)) {
448       RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
449                                                    CharUnits::fromQuantity(32));
450     }
451 
452     // Get the address of the saved value by scaling the number of
453     // registers we've used by the number of
454     CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
455     llvm::Value *RegOffset =
456         Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
457     RegAddr = Address(
458         Builder.CreateInBoundsGEP(CGF.Int8Ty, RegAddr.getPointer(), RegOffset),
459         DirectTy, RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
460 
461     // Increase the used-register count.
462     NumRegs =
463       Builder.CreateAdd(NumRegs,
464                         Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
465     Builder.CreateStore(NumRegs, NumRegsAddr);
466 
467     CGF.EmitBranch(Cont);
468   }
469 
470   // Case 2: consume space in the overflow area.
471   Address MemAddr = Address::invalid();
472   {
473     CGF.EmitBlock(UsingOverflow);
474 
475     Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
476 
477     // Everything in the overflow area is rounded up to a size of at least 4.
478     CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
479 
480     CharUnits Size;
481     if (!isIndirect) {
482       auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
483       Size = TypeInfo.Width.alignTo(OverflowAreaAlign);
484     } else {
485       Size = CGF.getPointerSize();
486     }
487 
488     Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3);
489     Address OverflowArea =
490         Address(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"), CGF.Int8Ty,
491                 OverflowAreaAlign);
492     // Round up address of argument to alignment
493     CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
494     if (Align > OverflowAreaAlign) {
495       llvm::Value *Ptr = OverflowArea.getPointer();
496       OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
497                              OverflowArea.getElementType(), Align);
498     }
499 
500     MemAddr = OverflowArea.withElementType(DirectTy);
501 
502     // Increase the overflow area.
503     OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
504     Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
505     CGF.EmitBranch(Cont);
506   }
507 
508   CGF.EmitBlock(Cont);
509 
510   // Merge the cases with a phi.
511   Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
512                                 "vaarg.addr");
513 
514   // Load the pointer if the argument was passed indirectly.
515   if (isIndirect) {
516     Result = Address(Builder.CreateLoad(Result, "aggr"), ElementTy,
517                      getContext().getTypeAlignInChars(Ty));
518   }
519 
520   return Result;
521 }
522 
523 bool PPC32TargetCodeGenInfo::isStructReturnInRegABI(
524     const llvm::Triple &Triple, const CodeGenOptions &Opts) {
525   assert(Triple.isPPC32());
526 
527   switch (Opts.getStructReturnConvention()) {
528   case CodeGenOptions::SRCK_Default:
529     break;
530   case CodeGenOptions::SRCK_OnStack: // -maix-struct-return
531     return false;
532   case CodeGenOptions::SRCK_InRegs: // -msvr4-struct-return
533     return true;
534   }
535 
536   if (Triple.isOSBinFormatELF() && !Triple.isOSLinux())
537     return true;
538 
539   return false;
540 }
541 
542 bool
543 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
544                                                 llvm::Value *Address) const {
545   return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ false,
546                                      /*IsAIX*/ false);
547 }
548 
549 // PowerPC-64
550 
551 namespace {
552 
553 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
554 class PPC64_SVR4_ABIInfo : public ABIInfo {
555   static const unsigned GPRBits = 64;
556   PPC64_SVR4_ABIKind Kind;
557   bool IsSoftFloatABI;
558 
559 public:
560   PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, PPC64_SVR4_ABIKind Kind,
561                      bool SoftFloatABI)
562       : ABIInfo(CGT), Kind(Kind), IsSoftFloatABI(SoftFloatABI) {}
563 
564   bool isPromotableTypeForABI(QualType Ty) const;
565   CharUnits getParamTypeAlignment(QualType Ty) const;
566 
567   ABIArgInfo classifyReturnType(QualType RetTy) const;
568   ABIArgInfo classifyArgumentType(QualType Ty) const;
569 
570   bool isHomogeneousAggregateBaseType(QualType Ty) const override;
571   bool isHomogeneousAggregateSmallEnough(const Type *Ty,
572                                          uint64_t Members) const override;
573 
574   // TODO: We can add more logic to computeInfo to improve performance.
575   // Example: For aggregate arguments that fit in a register, we could
576   // use getDirectInReg (as is done below for structs containing a single
577   // floating-point value) to avoid pushing them to memory on function
578   // entry.  This would require changing the logic in PPCISelLowering
579   // when lowering the parameters in the caller and args in the callee.
580   void computeInfo(CGFunctionInfo &FI) const override {
581     if (!getCXXABI().classifyReturnType(FI))
582       FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
583     for (auto &I : FI.arguments()) {
584       // We rely on the default argument classification for the most part.
585       // One exception:  An aggregate containing a single floating-point
586       // or vector item must be passed in a register if one is available.
587       const Type *T = isSingleElementStruct(I.type, getContext());
588       if (T) {
589         const BuiltinType *BT = T->getAs<BuiltinType>();
590         if ((T->isVectorType() && getContext().getTypeSize(T) == 128) ||
591             (BT && BT->isFloatingPoint())) {
592           QualType QT(T, 0);
593           I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
594           continue;
595         }
596       }
597       I.info = classifyArgumentType(I.type);
598     }
599   }
600 
601   Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
602                     QualType Ty) const override;
603 };
604 
605 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
606 
607 public:
608   PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT, PPC64_SVR4_ABIKind Kind,
609                                bool SoftFloatABI)
610       : TargetCodeGenInfo(
611             std::make_unique<PPC64_SVR4_ABIInfo>(CGT, Kind, SoftFloatABI)) {
612     SwiftInfo =
613         std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/false);
614   }
615 
616   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
617     // This is recovered from gcc output.
618     return 1; // r1 is the dedicated stack pointer
619   }
620 
621   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
622                                llvm::Value *Address) const override;
623 };
624 
625 class PPC64TargetCodeGenInfo : public TargetCodeGenInfo {
626 public:
627   PPC64TargetCodeGenInfo(CodeGenTypes &CGT)
628       : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
629 
630   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
631     // This is recovered from gcc output.
632     return 1; // r1 is the dedicated stack pointer
633   }
634 
635   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
636                                llvm::Value *Address) const override;
637 };
638 }
639 
640 // Return true if the ABI requires Ty to be passed sign- or zero-
641 // extended to 64 bits.
642 bool
643 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
644   // Treat an enum type as its underlying type.
645   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
646     Ty = EnumTy->getDecl()->getIntegerType();
647 
648   // Promotable integer types are required to be promoted by the ABI.
649   if (isPromotableIntegerTypeForABI(Ty))
650     return true;
651 
652   // In addition to the usual promotable integer types, we also need to
653   // extend all 32-bit types, since the ABI requires promotion to 64 bits.
654   if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
655     switch (BT->getKind()) {
656     case BuiltinType::Int:
657     case BuiltinType::UInt:
658       return true;
659     default:
660       break;
661     }
662 
663   if (const auto *EIT = Ty->getAs<BitIntType>())
664     if (EIT->getNumBits() < 64)
665       return true;
666 
667   return false;
668 }
669 
670 /// isAlignedParamType - Determine whether a type requires 16-byte or
671 /// higher alignment in the parameter area.  Always returns at least 8.
672 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
673   // Complex types are passed just like their elements.
674   if (const ComplexType *CTy = Ty->getAs<ComplexType>())
675     Ty = CTy->getElementType();
676 
677   auto FloatUsesVector = [this](QualType Ty){
678     return Ty->isRealFloatingType() && &getContext().getFloatTypeSemantics(
679                                            Ty) == &llvm::APFloat::IEEEquad();
680   };
681 
682   // Only vector types of size 16 bytes need alignment (larger types are
683   // passed via reference, smaller types are not aligned).
684   if (Ty->isVectorType()) {
685     return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
686   } else if (FloatUsesVector(Ty)) {
687     // According to ABI document section 'Optional Save Areas': If extended
688     // precision floating-point values in IEEE BINARY 128 QUADRUPLE PRECISION
689     // format are supported, map them to a single quadword, quadword aligned.
690     return CharUnits::fromQuantity(16);
691   }
692 
693   // For single-element float/vector structs, we consider the whole type
694   // to have the same alignment requirements as its single element.
695   const Type *AlignAsType = nullptr;
696   const Type *EltType = isSingleElementStruct(Ty, getContext());
697   if (EltType) {
698     const BuiltinType *BT = EltType->getAs<BuiltinType>();
699     if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
700         (BT && BT->isFloatingPoint()))
701       AlignAsType = EltType;
702   }
703 
704   // Likewise for ELFv2 homogeneous aggregates.
705   const Type *Base = nullptr;
706   uint64_t Members = 0;
707   if (!AlignAsType && Kind == PPC64_SVR4_ABIKind::ELFv2 &&
708       isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
709     AlignAsType = Base;
710 
711   // With special case aggregates, only vector base types need alignment.
712   if (AlignAsType) {
713     bool UsesVector = AlignAsType->isVectorType() ||
714                       FloatUsesVector(QualType(AlignAsType, 0));
715     return CharUnits::fromQuantity(UsesVector ? 16 : 8);
716   }
717 
718   // Otherwise, we only need alignment for any aggregate type that
719   // has an alignment requirement of >= 16 bytes.
720   if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
721     return CharUnits::fromQuantity(16);
722   }
723 
724   return CharUnits::fromQuantity(8);
725 }
726 
727 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
728   // Homogeneous aggregates for ELFv2 must have base types of float,
729   // double, long double, or 128-bit vectors.
730   if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
731     if (BT->getKind() == BuiltinType::Float ||
732         BT->getKind() == BuiltinType::Double ||
733         BT->getKind() == BuiltinType::LongDouble ||
734         BT->getKind() == BuiltinType::Ibm128 ||
735         (getContext().getTargetInfo().hasFloat128Type() &&
736          (BT->getKind() == BuiltinType::Float128))) {
737       if (IsSoftFloatABI)
738         return false;
739       return true;
740     }
741   }
742   if (const VectorType *VT = Ty->getAs<VectorType>()) {
743     if (getContext().getTypeSize(VT) == 128)
744       return true;
745   }
746   return false;
747 }
748 
749 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
750     const Type *Base, uint64_t Members) const {
751   // Vector and fp128 types require one register, other floating point types
752   // require one or two registers depending on their size.
753   uint32_t NumRegs =
754       ((getContext().getTargetInfo().hasFloat128Type() &&
755           Base->isFloat128Type()) ||
756         Base->isVectorType()) ? 1
757                               : (getContext().getTypeSize(Base) + 63) / 64;
758 
759   // Homogeneous Aggregates may occupy at most 8 registers.
760   return Members * NumRegs <= 8;
761 }
762 
763 ABIArgInfo
764 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
765   Ty = useFirstFieldIfTransparentUnion(Ty);
766 
767   if (Ty->isAnyComplexType())
768     return ABIArgInfo::getDirect();
769 
770   // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
771   // or via reference (larger than 16 bytes).
772   if (Ty->isVectorType()) {
773     uint64_t Size = getContext().getTypeSize(Ty);
774     if (Size > 128)
775       return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
776     else if (Size < 128) {
777       llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
778       return ABIArgInfo::getDirect(CoerceTy);
779     }
780   }
781 
782   if (const auto *EIT = Ty->getAs<BitIntType>())
783     if (EIT->getNumBits() > 128)
784       return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
785 
786   if (isAggregateTypeForABI(Ty)) {
787     if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
788       return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
789 
790     uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
791     uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
792 
793     // ELFv2 homogeneous aggregates are passed as array types.
794     const Type *Base = nullptr;
795     uint64_t Members = 0;
796     if (Kind == PPC64_SVR4_ABIKind::ELFv2 &&
797         isHomogeneousAggregate(Ty, Base, Members)) {
798       llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
799       llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
800       return ABIArgInfo::getDirect(CoerceTy);
801     }
802 
803     // If an aggregate may end up fully in registers, we do not
804     // use the ByVal method, but pass the aggregate as array.
805     // This is usually beneficial since we avoid forcing the
806     // back-end to store the argument to memory.
807     uint64_t Bits = getContext().getTypeSize(Ty);
808     if (Bits > 0 && Bits <= 8 * GPRBits) {
809       llvm::Type *CoerceTy;
810 
811       // Types up to 8 bytes are passed as integer type (which will be
812       // properly aligned in the argument save area doubleword).
813       if (Bits <= GPRBits)
814         CoerceTy =
815             llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
816       // Larger types are passed as arrays, with the base type selected
817       // according to the required alignment in the save area.
818       else {
819         uint64_t RegBits = ABIAlign * 8;
820         uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
821         llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
822         CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
823       }
824 
825       return ABIArgInfo::getDirect(CoerceTy);
826     }
827 
828     // All other aggregates are passed ByVal.
829     return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
830                                    /*ByVal=*/true,
831                                    /*Realign=*/TyAlign > ABIAlign);
832   }
833 
834   return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
835                                      : ABIArgInfo::getDirect());
836 }
837 
838 ABIArgInfo
839 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
840   if (RetTy->isVoidType())
841     return ABIArgInfo::getIgnore();
842 
843   if (RetTy->isAnyComplexType())
844     return ABIArgInfo::getDirect();
845 
846   // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
847   // or via reference (larger than 16 bytes).
848   if (RetTy->isVectorType()) {
849     uint64_t Size = getContext().getTypeSize(RetTy);
850     if (Size > 128)
851       return getNaturalAlignIndirect(RetTy);
852     else if (Size < 128) {
853       llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
854       return ABIArgInfo::getDirect(CoerceTy);
855     }
856   }
857 
858   if (const auto *EIT = RetTy->getAs<BitIntType>())
859     if (EIT->getNumBits() > 128)
860       return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
861 
862   if (isAggregateTypeForABI(RetTy)) {
863     // ELFv2 homogeneous aggregates are returned as array types.
864     const Type *Base = nullptr;
865     uint64_t Members = 0;
866     if (Kind == PPC64_SVR4_ABIKind::ELFv2 &&
867         isHomogeneousAggregate(RetTy, Base, Members)) {
868       llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
869       llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
870       return ABIArgInfo::getDirect(CoerceTy);
871     }
872 
873     // ELFv2 small aggregates are returned in up to two registers.
874     uint64_t Bits = getContext().getTypeSize(RetTy);
875     if (Kind == PPC64_SVR4_ABIKind::ELFv2 && Bits <= 2 * GPRBits) {
876       if (Bits == 0)
877         return ABIArgInfo::getIgnore();
878 
879       llvm::Type *CoerceTy;
880       if (Bits > GPRBits) {
881         CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
882         CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
883       } else
884         CoerceTy =
885             llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
886       return ABIArgInfo::getDirect(CoerceTy);
887     }
888 
889     // All other aggregates are returned indirectly.
890     return getNaturalAlignIndirect(RetTy);
891   }
892 
893   return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
894                                         : ABIArgInfo::getDirect());
895 }
896 
897 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
898 Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
899                                       QualType Ty) const {
900   auto TypeInfo = getContext().getTypeInfoInChars(Ty);
901   TypeInfo.Align = getParamTypeAlignment(Ty);
902 
903   CharUnits SlotSize = CharUnits::fromQuantity(8);
904 
905   // If we have a complex type and the base type is smaller than 8 bytes,
906   // the ABI calls for the real and imaginary parts to be right-adjusted
907   // in separate doublewords.  However, Clang expects us to produce a
908   // pointer to a structure with the two parts packed tightly.  So generate
909   // loads of the real and imaginary parts relative to the va_list pointer,
910   // and store them to a temporary structure.
911   if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
912     CharUnits EltSize = TypeInfo.Width / 2;
913     if (EltSize < SlotSize)
914       return complexTempStructure(CGF, VAListAddr, Ty, SlotSize, EltSize, CTy);
915   }
916 
917   // Otherwise, just use the general rule.
918   //
919   // The PPC64 ABI passes some arguments in integer registers, even to variadic
920   // functions. To allow va_list to use the simple "void*" representation,
921   // variadic calls allocate space in the argument area for the integer argument
922   // registers, and variadic functions spill their integer argument registers to
923   // this area in their prologues. When aggregates smaller than a register are
924   // passed this way, they are passed in the least significant bits of the
925   // register, which means that after spilling on big-endian targets they will
926   // be right-aligned in their argument slot. This is uncommon; for a variety of
927   // reasons, other big-endian targets don't end up right-aligning aggregate
928   // types this way, and so right-alignment only applies to fundamental types.
929   // So on PPC64, we must force the use of right-alignment even for aggregates.
930   return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
931                           SlotSize, /*AllowHigher*/ true,
932                           /*ForceRightAdjust*/ true);
933 }
934 
935 bool
936 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
937   CodeGen::CodeGenFunction &CGF,
938   llvm::Value *Address) const {
939   return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
940                                      /*IsAIX*/ false);
941 }
942 
943 bool
944 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
945                                                 llvm::Value *Address) const {
946   return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
947                                      /*IsAIX*/ false);
948 }
949 
950 std::unique_ptr<TargetCodeGenInfo>
951 CodeGen::createAIXTargetCodeGenInfo(CodeGenModule &CGM, bool Is64Bit) {
952   return std::make_unique<AIXTargetCodeGenInfo>(CGM.getTypes(), Is64Bit);
953 }
954 
955 std::unique_ptr<TargetCodeGenInfo>
956 CodeGen::createPPC32TargetCodeGenInfo(CodeGenModule &CGM, bool SoftFloatABI) {
957   bool RetSmallStructInRegABI = PPC32TargetCodeGenInfo::isStructReturnInRegABI(
958       CGM.getTriple(), CGM.getCodeGenOpts());
959   return std::make_unique<PPC32TargetCodeGenInfo>(CGM.getTypes(), SoftFloatABI,
960                                                   RetSmallStructInRegABI);
961 }
962 
963 std::unique_ptr<TargetCodeGenInfo>
964 CodeGen::createPPC64TargetCodeGenInfo(CodeGenModule &CGM) {
965   return std::make_unique<PPC64TargetCodeGenInfo>(CGM.getTypes());
966 }
967 
968 std::unique_ptr<TargetCodeGenInfo> CodeGen::createPPC64_SVR4_TargetCodeGenInfo(
969     CodeGenModule &CGM, PPC64_SVR4_ABIKind Kind, bool SoftFloatABI) {
970   return std::make_unique<PPC64_SVR4_TargetCodeGenInfo>(CGM.getTypes(), Kind,
971                                                         SoftFloatABI);
972 }
973