1 //===- Sparc.cpp ----------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "ABIInfoImpl.h" 10 #include "TargetInfo.h" 11 12 using namespace clang; 13 using namespace clang::CodeGen; 14 15 //===----------------------------------------------------------------------===// 16 // SPARC v8 ABI Implementation. 17 // Based on the SPARC Compliance Definition version 2.4.1. 18 // 19 // Ensures that complex values are passed in registers. 20 // 21 namespace { 22 class SparcV8ABIInfo : public DefaultABIInfo { 23 public: 24 SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 25 26 private: 27 ABIArgInfo classifyReturnType(QualType RetTy) const; 28 void computeInfo(CGFunctionInfo &FI) const override; 29 }; 30 } // end anonymous namespace 31 32 33 ABIArgInfo 34 SparcV8ABIInfo::classifyReturnType(QualType Ty) const { 35 if (Ty->isAnyComplexType()) { 36 return ABIArgInfo::getDirect(); 37 } 38 else { 39 return DefaultABIInfo::classifyReturnType(Ty); 40 } 41 } 42 43 void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const { 44 45 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 46 for (auto &Arg : FI.arguments()) 47 Arg.info = classifyArgumentType(Arg.type); 48 } 49 50 namespace { 51 class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo { 52 public: 53 SparcV8TargetCodeGenInfo(CodeGenTypes &CGT) 54 : TargetCodeGenInfo(std::make_unique<SparcV8ABIInfo>(CGT)) {} 55 56 llvm::Value *decodeReturnAddress(CodeGen::CodeGenFunction &CGF, 57 llvm::Value *Address) const override { 58 int Offset; 59 if (isAggregateTypeForABI(CGF.CurFnInfo->getReturnType())) 60 Offset = 12; 61 else 62 Offset = 8; 63 return CGF.Builder.CreateGEP(CGF.Int8Ty, Address, 64 llvm::ConstantInt::get(CGF.Int32Ty, Offset)); 65 } 66 67 llvm::Value *encodeReturnAddress(CodeGen::CodeGenFunction &CGF, 68 llvm::Value *Address) const override { 69 int Offset; 70 if (isAggregateTypeForABI(CGF.CurFnInfo->getReturnType())) 71 Offset = -12; 72 else 73 Offset = -8; 74 return CGF.Builder.CreateGEP(CGF.Int8Ty, Address, 75 llvm::ConstantInt::get(CGF.Int32Ty, Offset)); 76 } 77 }; 78 } // end anonymous namespace 79 80 //===----------------------------------------------------------------------===// 81 // SPARC v9 ABI Implementation. 82 // Based on the SPARC Compliance Definition version 2.4.1. 83 // 84 // Function arguments a mapped to a nominal "parameter array" and promoted to 85 // registers depending on their type. Each argument occupies 8 or 16 bytes in 86 // the array, structs larger than 16 bytes are passed indirectly. 87 // 88 // One case requires special care: 89 // 90 // struct mixed { 91 // int i; 92 // float f; 93 // }; 94 // 95 // When a struct mixed is passed by value, it only occupies 8 bytes in the 96 // parameter array, but the int is passed in an integer register, and the float 97 // is passed in a floating point register. This is represented as two arguments 98 // with the LLVM IR inreg attribute: 99 // 100 // declare void f(i32 inreg %i, float inreg %f) 101 // 102 // The code generator will only allocate 4 bytes from the parameter array for 103 // the inreg arguments. All other arguments are allocated a multiple of 8 104 // bytes. 105 // 106 namespace { 107 class SparcV9ABIInfo : public ABIInfo { 108 public: 109 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 110 111 private: 112 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const; 113 void computeInfo(CGFunctionInfo &FI) const override; 114 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, 115 AggValueSlot Slot) const override; 116 117 // Coercion type builder for structs passed in registers. The coercion type 118 // serves two purposes: 119 // 120 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned' 121 // in registers. 122 // 2. Expose aligned floating point elements as first-level elements, so the 123 // code generator knows to pass them in floating point registers. 124 // 125 // We also compute the InReg flag which indicates that the struct contains 126 // aligned 32-bit floats. 127 // 128 struct CoerceBuilder { 129 llvm::LLVMContext &Context; 130 const llvm::DataLayout &DL; 131 SmallVector<llvm::Type*, 8> Elems; 132 uint64_t Size; 133 bool InReg; 134 135 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl) 136 : Context(c), DL(dl), Size(0), InReg(false) {} 137 138 // Pad Elems with integers until Size is ToSize. 139 void pad(uint64_t ToSize) { 140 assert(ToSize >= Size && "Cannot remove elements"); 141 if (ToSize == Size) 142 return; 143 144 // Finish the current 64-bit word. 145 uint64_t Aligned = llvm::alignTo(Size, 64); 146 if (Aligned > Size && Aligned <= ToSize) { 147 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size)); 148 Size = Aligned; 149 } 150 151 // Add whole 64-bit words. 152 while (Size + 64 <= ToSize) { 153 Elems.push_back(llvm::Type::getInt64Ty(Context)); 154 Size += 64; 155 } 156 157 // Final in-word padding. 158 if (Size < ToSize) { 159 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size)); 160 Size = ToSize; 161 } 162 } 163 164 // Add a floating point element at Offset. 165 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) { 166 // Unaligned floats are treated as integers. 167 if (Offset % Bits) 168 return; 169 // The InReg flag is only required if there are any floats < 64 bits. 170 if (Bits < 64) 171 InReg = true; 172 pad(Offset); 173 Elems.push_back(Ty); 174 Size = Offset + Bits; 175 } 176 177 // Add a struct type to the coercion type, starting at Offset (in bits). 178 void addStruct(uint64_t Offset, llvm::StructType *StrTy) { 179 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy); 180 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) { 181 llvm::Type *ElemTy = StrTy->getElementType(i); 182 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i); 183 switch (ElemTy->getTypeID()) { 184 case llvm::Type::StructTyID: 185 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy)); 186 break; 187 case llvm::Type::FloatTyID: 188 addFloat(ElemOffset, ElemTy, 32); 189 break; 190 case llvm::Type::DoubleTyID: 191 addFloat(ElemOffset, ElemTy, 64); 192 break; 193 case llvm::Type::FP128TyID: 194 addFloat(ElemOffset, ElemTy, 128); 195 break; 196 case llvm::Type::PointerTyID: 197 if (ElemOffset % 64 == 0) { 198 pad(ElemOffset); 199 Elems.push_back(ElemTy); 200 Size += 64; 201 } 202 break; 203 default: 204 break; 205 } 206 } 207 } 208 209 // Check if Ty is a usable substitute for the coercion type. 210 bool isUsableType(llvm::StructType *Ty) const { 211 return llvm::ArrayRef(Elems) == Ty->elements(); 212 } 213 214 // Get the coercion type as a literal struct type. 215 llvm::Type *getType() const { 216 if (Elems.size() == 1) 217 return Elems.front(); 218 else 219 return llvm::StructType::get(Context, Elems); 220 } 221 }; 222 }; 223 } // end anonymous namespace 224 225 ABIArgInfo 226 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const { 227 if (Ty->isVoidType()) 228 return ABIArgInfo::getIgnore(); 229 230 uint64_t Size = getContext().getTypeSize(Ty); 231 232 // Anything too big to fit in registers is passed with an explicit indirect 233 // pointer / sret pointer. 234 if (Size > SizeLimit) 235 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 236 237 // Treat an enum type as its underlying type. 238 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 239 Ty = EnumTy->getDecl()->getIntegerType(); 240 241 // Integer types smaller than a register are extended. 242 if (Size < 64 && Ty->isIntegerType()) 243 return ABIArgInfo::getExtend(Ty); 244 245 if (const auto *EIT = Ty->getAs<BitIntType>()) 246 if (EIT->getNumBits() < 64) 247 return ABIArgInfo::getExtend(Ty); 248 249 // Other non-aggregates go in registers. 250 if (!isAggregateTypeForABI(Ty)) 251 return ABIArgInfo::getDirect(); 252 253 // If a C++ object has either a non-trivial copy constructor or a non-trivial 254 // destructor, it is passed with an explicit indirect pointer / sret pointer. 255 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 256 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 257 258 // This is a small aggregate type that should be passed in registers. 259 // Build a coercion type from the LLVM struct type. 260 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty)); 261 if (!StrTy) 262 return ABIArgInfo::getDirect(); 263 264 CoerceBuilder CB(getVMContext(), getDataLayout()); 265 CB.addStruct(0, StrTy); 266 // All structs, even empty ones, should take up a register argument slot, 267 // so pin the minimum struct size to one bit. 268 CB.pad(llvm::alignTo( 269 std::max(CB.DL.getTypeSizeInBits(StrTy).getKnownMinValue(), uint64_t(1)), 270 64)); 271 272 // Try to use the original type for coercion. 273 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType(); 274 275 if (CB.InReg) 276 return ABIArgInfo::getDirectInReg(CoerceTy); 277 else 278 return ABIArgInfo::getDirect(CoerceTy); 279 } 280 281 RValue SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 282 QualType Ty, AggValueSlot Slot) const { 283 ABIArgInfo AI = classifyType(Ty, 16 * 8); 284 llvm::Type *ArgTy = CGT.ConvertType(Ty); 285 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 286 AI.setCoerceToType(ArgTy); 287 288 CharUnits SlotSize = CharUnits::fromQuantity(8); 289 290 CGBuilderTy &Builder = CGF.Builder; 291 Address Addr = Address(Builder.CreateLoad(VAListAddr, "ap.cur"), 292 getVAListElementType(CGF), SlotSize); 293 llvm::Type *ArgPtrTy = CGF.UnqualPtrTy; 294 295 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 296 297 Address ArgAddr = Address::invalid(); 298 CharUnits Stride; 299 switch (AI.getKind()) { 300 case ABIArgInfo::Expand: 301 case ABIArgInfo::CoerceAndExpand: 302 case ABIArgInfo::InAlloca: 303 llvm_unreachable("Unsupported ABI kind for va_arg"); 304 305 case ABIArgInfo::Extend: { 306 Stride = SlotSize; 307 CharUnits Offset = SlotSize - TypeInfo.Width; 308 ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend"); 309 break; 310 } 311 312 case ABIArgInfo::Direct: { 313 auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType()); 314 Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize); 315 ArgAddr = Addr; 316 break; 317 } 318 319 case ABIArgInfo::Indirect: 320 case ABIArgInfo::IndirectAliased: 321 Stride = SlotSize; 322 ArgAddr = Addr.withElementType(ArgPtrTy); 323 ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"), ArgTy, 324 TypeInfo.Align); 325 break; 326 327 case ABIArgInfo::Ignore: 328 return Slot.asRValue(); 329 } 330 331 // Update VAList. 332 Address NextPtr = Builder.CreateConstInBoundsByteGEP(Addr, Stride, "ap.next"); 333 Builder.CreateStore(NextPtr.emitRawPointer(CGF), VAListAddr); 334 335 return CGF.EmitLoadOfAnyValue( 336 CGF.MakeAddrLValue(ArgAddr.withElementType(ArgTy), Ty), Slot); 337 } 338 339 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const { 340 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8); 341 for (auto &I : FI.arguments()) 342 I.info = classifyType(I.type, 16 * 8); 343 } 344 345 namespace { 346 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo { 347 public: 348 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT) 349 : TargetCodeGenInfo(std::make_unique<SparcV9ABIInfo>(CGT)) {} 350 351 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 352 return 14; 353 } 354 355 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 356 llvm::Value *Address) const override; 357 358 llvm::Value *decodeReturnAddress(CodeGen::CodeGenFunction &CGF, 359 llvm::Value *Address) const override { 360 return CGF.Builder.CreateGEP(CGF.Int8Ty, Address, 361 llvm::ConstantInt::get(CGF.Int32Ty, 8)); 362 } 363 364 llvm::Value *encodeReturnAddress(CodeGen::CodeGenFunction &CGF, 365 llvm::Value *Address) const override { 366 return CGF.Builder.CreateGEP(CGF.Int8Ty, Address, 367 llvm::ConstantInt::get(CGF.Int32Ty, -8)); 368 } 369 }; 370 } // end anonymous namespace 371 372 bool 373 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 374 llvm::Value *Address) const { 375 // This is calculated from the LLVM and GCC tables and verified 376 // against gcc output. AFAIK all ABIs use the same encoding. 377 378 CodeGen::CGBuilderTy &Builder = CGF.Builder; 379 380 llvm::IntegerType *i8 = CGF.Int8Ty; 381 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 382 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 383 384 // 0-31: the 8-byte general-purpose registers 385 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 386 387 // 32-63: f0-31, the 4-byte floating-point registers 388 AssignToArrayRange(Builder, Address, Four8, 32, 63); 389 390 // Y = 64 391 // PSR = 65 392 // WIM = 66 393 // TBR = 67 394 // PC = 68 395 // NPC = 69 396 // FSR = 70 397 // CSR = 71 398 AssignToArrayRange(Builder, Address, Eight8, 64, 71); 399 400 // 72-87: d0-15, the 8-byte floating-point registers 401 AssignToArrayRange(Builder, Address, Eight8, 72, 87); 402 403 return false; 404 } 405 406 std::unique_ptr<TargetCodeGenInfo> 407 CodeGen::createSparcV8TargetCodeGenInfo(CodeGenModule &CGM) { 408 return std::make_unique<SparcV8TargetCodeGenInfo>(CGM.getTypes()); 409 } 410 411 std::unique_ptr<TargetCodeGenInfo> 412 CodeGen::createSparcV9TargetCodeGenInfo(CodeGenModule &CGM) { 413 return std::make_unique<SparcV9TargetCodeGenInfo>(CGM.getTypes()); 414 } 415