xref: /freebsd/contrib/llvm-project/clang/lib/CodeGen/SwiftCallingConv.cpp (revision ebacd8013fe5f7fdf9f6a5b286f6680dd2891036)
1 //===--- SwiftCallingConv.cpp - Lowering for the Swift calling convention -===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Implementation of the abstract lowering for the Swift calling convention.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "clang/CodeGen/SwiftCallingConv.h"
14 #include "ABIInfo.h"
15 #include "CodeGenModule.h"
16 #include "TargetInfo.h"
17 #include "clang/Basic/TargetInfo.h"
18 
19 using namespace clang;
20 using namespace CodeGen;
21 using namespace swiftcall;
22 
23 static const SwiftABIInfo &getSwiftABIInfo(CodeGenModule &CGM) {
24   return cast<SwiftABIInfo>(CGM.getTargetCodeGenInfo().getABIInfo());
25 }
26 
27 static bool isPowerOf2(unsigned n) {
28   return n == (n & -n);
29 }
30 
31 /// Given two types with the same size, try to find a common type.
32 static llvm::Type *getCommonType(llvm::Type *first, llvm::Type *second) {
33   assert(first != second);
34 
35   // Allow pointers to merge with integers, but prefer the integer type.
36   if (first->isIntegerTy()) {
37     if (second->isPointerTy()) return first;
38   } else if (first->isPointerTy()) {
39     if (second->isIntegerTy()) return second;
40     if (second->isPointerTy()) return first;
41 
42   // Allow two vectors to be merged (given that they have the same size).
43   // This assumes that we never have two different vector register sets.
44   } else if (auto firstVecTy = dyn_cast<llvm::VectorType>(first)) {
45     if (auto secondVecTy = dyn_cast<llvm::VectorType>(second)) {
46       if (auto commonTy = getCommonType(firstVecTy->getElementType(),
47                                         secondVecTy->getElementType())) {
48         return (commonTy == firstVecTy->getElementType() ? first : second);
49       }
50     }
51   }
52 
53   return nullptr;
54 }
55 
56 static CharUnits getTypeStoreSize(CodeGenModule &CGM, llvm::Type *type) {
57   return CharUnits::fromQuantity(CGM.getDataLayout().getTypeStoreSize(type));
58 }
59 
60 static CharUnits getTypeAllocSize(CodeGenModule &CGM, llvm::Type *type) {
61   return CharUnits::fromQuantity(CGM.getDataLayout().getTypeAllocSize(type));
62 }
63 
64 void SwiftAggLowering::addTypedData(QualType type, CharUnits begin) {
65   // Deal with various aggregate types as special cases:
66 
67   // Record types.
68   if (auto recType = type->getAs<RecordType>()) {
69     addTypedData(recType->getDecl(), begin);
70 
71   // Array types.
72   } else if (type->isArrayType()) {
73     // Incomplete array types (flexible array members?) don't provide
74     // data to lay out, and the other cases shouldn't be possible.
75     auto arrayType = CGM.getContext().getAsConstantArrayType(type);
76     if (!arrayType) return;
77 
78     QualType eltType = arrayType->getElementType();
79     auto eltSize = CGM.getContext().getTypeSizeInChars(eltType);
80     for (uint64_t i = 0, e = arrayType->getSize().getZExtValue(); i != e; ++i) {
81       addTypedData(eltType, begin + i * eltSize);
82     }
83 
84   // Complex types.
85   } else if (auto complexType = type->getAs<ComplexType>()) {
86     auto eltType = complexType->getElementType();
87     auto eltSize = CGM.getContext().getTypeSizeInChars(eltType);
88     auto eltLLVMType = CGM.getTypes().ConvertType(eltType);
89     addTypedData(eltLLVMType, begin, begin + eltSize);
90     addTypedData(eltLLVMType, begin + eltSize, begin + 2 * eltSize);
91 
92   // Member pointer types.
93   } else if (type->getAs<MemberPointerType>()) {
94     // Just add it all as opaque.
95     addOpaqueData(begin, begin + CGM.getContext().getTypeSizeInChars(type));
96 
97     // Atomic types.
98   } else if (const auto *atomicType = type->getAs<AtomicType>()) {
99     auto valueType = atomicType->getValueType();
100     auto atomicSize = CGM.getContext().getTypeSizeInChars(atomicType);
101     auto valueSize = CGM.getContext().getTypeSizeInChars(valueType);
102 
103     addTypedData(atomicType->getValueType(), begin);
104 
105     // Add atomic padding.
106     auto atomicPadding = atomicSize - valueSize;
107     if (atomicPadding > CharUnits::Zero())
108       addOpaqueData(begin + valueSize, begin + atomicSize);
109 
110     // Everything else is scalar and should not convert as an LLVM aggregate.
111   } else {
112     // We intentionally convert as !ForMem because we want to preserve
113     // that a type was an i1.
114     auto *llvmType = CGM.getTypes().ConvertType(type);
115     addTypedData(llvmType, begin);
116   }
117 }
118 
119 void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin) {
120   addTypedData(record, begin, CGM.getContext().getASTRecordLayout(record));
121 }
122 
123 void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin,
124                                     const ASTRecordLayout &layout) {
125   // Unions are a special case.
126   if (record->isUnion()) {
127     for (auto field : record->fields()) {
128       if (field->isBitField()) {
129         addBitFieldData(field, begin, 0);
130       } else {
131         addTypedData(field->getType(), begin);
132       }
133     }
134     return;
135   }
136 
137   // Note that correctness does not rely on us adding things in
138   // their actual order of layout; it's just somewhat more efficient
139   // for the builder.
140 
141   // With that in mind, add "early" C++ data.
142   auto cxxRecord = dyn_cast<CXXRecordDecl>(record);
143   if (cxxRecord) {
144     //   - a v-table pointer, if the class adds its own
145     if (layout.hasOwnVFPtr()) {
146       addTypedData(CGM.Int8PtrTy, begin);
147     }
148 
149     //   - non-virtual bases
150     for (auto &baseSpecifier : cxxRecord->bases()) {
151       if (baseSpecifier.isVirtual()) continue;
152 
153       auto baseRecord = baseSpecifier.getType()->getAsCXXRecordDecl();
154       addTypedData(baseRecord, begin + layout.getBaseClassOffset(baseRecord));
155     }
156 
157     //   - a vbptr if the class adds its own
158     if (layout.hasOwnVBPtr()) {
159       addTypedData(CGM.Int8PtrTy, begin + layout.getVBPtrOffset());
160     }
161   }
162 
163   // Add fields.
164   for (auto field : record->fields()) {
165     auto fieldOffsetInBits = layout.getFieldOffset(field->getFieldIndex());
166     if (field->isBitField()) {
167       addBitFieldData(field, begin, fieldOffsetInBits);
168     } else {
169       addTypedData(field->getType(),
170               begin + CGM.getContext().toCharUnitsFromBits(fieldOffsetInBits));
171     }
172   }
173 
174   // Add "late" C++ data:
175   if (cxxRecord) {
176     //   - virtual bases
177     for (auto &vbaseSpecifier : cxxRecord->vbases()) {
178       auto baseRecord = vbaseSpecifier.getType()->getAsCXXRecordDecl();
179       addTypedData(baseRecord, begin + layout.getVBaseClassOffset(baseRecord));
180     }
181   }
182 }
183 
184 void SwiftAggLowering::addBitFieldData(const FieldDecl *bitfield,
185                                        CharUnits recordBegin,
186                                        uint64_t bitfieldBitBegin) {
187   assert(bitfield->isBitField());
188   auto &ctx = CGM.getContext();
189   auto width = bitfield->getBitWidthValue(ctx);
190 
191   // We can ignore zero-width bit-fields.
192   if (width == 0) return;
193 
194   // toCharUnitsFromBits rounds down.
195   CharUnits bitfieldByteBegin = ctx.toCharUnitsFromBits(bitfieldBitBegin);
196 
197   // Find the offset of the last byte that is partially occupied by the
198   // bit-field; since we otherwise expect exclusive ends, the end is the
199   // next byte.
200   uint64_t bitfieldBitLast = bitfieldBitBegin + width - 1;
201   CharUnits bitfieldByteEnd =
202     ctx.toCharUnitsFromBits(bitfieldBitLast) + CharUnits::One();
203   addOpaqueData(recordBegin + bitfieldByteBegin,
204                 recordBegin + bitfieldByteEnd);
205 }
206 
207 void SwiftAggLowering::addTypedData(llvm::Type *type, CharUnits begin) {
208   assert(type && "didn't provide type for typed data");
209   addTypedData(type, begin, begin + getTypeStoreSize(CGM, type));
210 }
211 
212 void SwiftAggLowering::addTypedData(llvm::Type *type,
213                                     CharUnits begin, CharUnits end) {
214   assert(type && "didn't provide type for typed data");
215   assert(getTypeStoreSize(CGM, type) == end - begin);
216 
217   // Legalize vector types.
218   if (auto vecTy = dyn_cast<llvm::VectorType>(type)) {
219     SmallVector<llvm::Type*, 4> componentTys;
220     legalizeVectorType(CGM, end - begin, vecTy, componentTys);
221     assert(componentTys.size() >= 1);
222 
223     // Walk the initial components.
224     for (size_t i = 0, e = componentTys.size(); i != e - 1; ++i) {
225       llvm::Type *componentTy = componentTys[i];
226       auto componentSize = getTypeStoreSize(CGM, componentTy);
227       assert(componentSize < end - begin);
228       addLegalTypedData(componentTy, begin, begin + componentSize);
229       begin += componentSize;
230     }
231 
232     return addLegalTypedData(componentTys.back(), begin, end);
233   }
234 
235   // Legalize integer types.
236   if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
237     if (!isLegalIntegerType(CGM, intTy))
238       return addOpaqueData(begin, end);
239   }
240 
241   // All other types should be legal.
242   return addLegalTypedData(type, begin, end);
243 }
244 
245 void SwiftAggLowering::addLegalTypedData(llvm::Type *type,
246                                          CharUnits begin, CharUnits end) {
247   // Require the type to be naturally aligned.
248   if (!begin.isZero() && !begin.isMultipleOf(getNaturalAlignment(CGM, type))) {
249 
250     // Try splitting vector types.
251     if (auto vecTy = dyn_cast<llvm::VectorType>(type)) {
252       auto split = splitLegalVectorType(CGM, end - begin, vecTy);
253       auto eltTy = split.first;
254       auto numElts = split.second;
255 
256       auto eltSize = (end - begin) / numElts;
257       assert(eltSize == getTypeStoreSize(CGM, eltTy));
258       for (size_t i = 0, e = numElts; i != e; ++i) {
259         addLegalTypedData(eltTy, begin, begin + eltSize);
260         begin += eltSize;
261       }
262       assert(begin == end);
263       return;
264     }
265 
266     return addOpaqueData(begin, end);
267   }
268 
269   addEntry(type, begin, end);
270 }
271 
272 void SwiftAggLowering::addEntry(llvm::Type *type,
273                                 CharUnits begin, CharUnits end) {
274   assert((!type ||
275           (!isa<llvm::StructType>(type) && !isa<llvm::ArrayType>(type))) &&
276          "cannot add aggregate-typed data");
277   assert(!type || begin.isMultipleOf(getNaturalAlignment(CGM, type)));
278 
279   // Fast path: we can just add entries to the end.
280   if (Entries.empty() || Entries.back().End <= begin) {
281     Entries.push_back({begin, end, type});
282     return;
283   }
284 
285   // Find the first existing entry that ends after the start of the new data.
286   // TODO: do a binary search if Entries is big enough for it to matter.
287   size_t index = Entries.size() - 1;
288   while (index != 0) {
289     if (Entries[index - 1].End <= begin) break;
290     --index;
291   }
292 
293   // The entry ends after the start of the new data.
294   // If the entry starts after the end of the new data, there's no conflict.
295   if (Entries[index].Begin >= end) {
296     // This insertion is potentially O(n), but the way we generally build
297     // these layouts makes that unlikely to matter: we'd need a union of
298     // several very large types.
299     Entries.insert(Entries.begin() + index, {begin, end, type});
300     return;
301   }
302 
303   // Otherwise, the ranges overlap.  The new range might also overlap
304   // with later ranges.
305 restartAfterSplit:
306 
307   // Simplest case: an exact overlap.
308   if (Entries[index].Begin == begin && Entries[index].End == end) {
309     // If the types match exactly, great.
310     if (Entries[index].Type == type) return;
311 
312     // If either type is opaque, make the entry opaque and return.
313     if (Entries[index].Type == nullptr) {
314       return;
315     } else if (type == nullptr) {
316       Entries[index].Type = nullptr;
317       return;
318     }
319 
320     // If they disagree in an ABI-agnostic way, just resolve the conflict
321     // arbitrarily.
322     if (auto entryType = getCommonType(Entries[index].Type, type)) {
323       Entries[index].Type = entryType;
324       return;
325     }
326 
327     // Otherwise, make the entry opaque.
328     Entries[index].Type = nullptr;
329     return;
330   }
331 
332   // Okay, we have an overlapping conflict of some sort.
333 
334   // If we have a vector type, split it.
335   if (auto vecTy = dyn_cast_or_null<llvm::VectorType>(type)) {
336     auto eltTy = vecTy->getElementType();
337     CharUnits eltSize =
338         (end - begin) / cast<llvm::FixedVectorType>(vecTy)->getNumElements();
339     assert(eltSize == getTypeStoreSize(CGM, eltTy));
340     for (unsigned i = 0,
341                   e = cast<llvm::FixedVectorType>(vecTy)->getNumElements();
342          i != e; ++i) {
343       addEntry(eltTy, begin, begin + eltSize);
344       begin += eltSize;
345     }
346     assert(begin == end);
347     return;
348   }
349 
350   // If the entry is a vector type, split it and try again.
351   if (Entries[index].Type && Entries[index].Type->isVectorTy()) {
352     splitVectorEntry(index);
353     goto restartAfterSplit;
354   }
355 
356   // Okay, we have no choice but to make the existing entry opaque.
357 
358   Entries[index].Type = nullptr;
359 
360   // Stretch the start of the entry to the beginning of the range.
361   if (begin < Entries[index].Begin) {
362     Entries[index].Begin = begin;
363     assert(index == 0 || begin >= Entries[index - 1].End);
364   }
365 
366   // Stretch the end of the entry to the end of the range; but if we run
367   // into the start of the next entry, just leave the range there and repeat.
368   while (end > Entries[index].End) {
369     assert(Entries[index].Type == nullptr);
370 
371     // If the range doesn't overlap the next entry, we're done.
372     if (index == Entries.size() - 1 || end <= Entries[index + 1].Begin) {
373       Entries[index].End = end;
374       break;
375     }
376 
377     // Otherwise, stretch to the start of the next entry.
378     Entries[index].End = Entries[index + 1].Begin;
379 
380     // Continue with the next entry.
381     index++;
382 
383     // This entry needs to be made opaque if it is not already.
384     if (Entries[index].Type == nullptr)
385       continue;
386 
387     // Split vector entries unless we completely subsume them.
388     if (Entries[index].Type->isVectorTy() &&
389         end < Entries[index].End) {
390       splitVectorEntry(index);
391     }
392 
393     // Make the entry opaque.
394     Entries[index].Type = nullptr;
395   }
396 }
397 
398 /// Replace the entry of vector type at offset 'index' with a sequence
399 /// of its component vectors.
400 void SwiftAggLowering::splitVectorEntry(unsigned index) {
401   auto vecTy = cast<llvm::VectorType>(Entries[index].Type);
402   auto split = splitLegalVectorType(CGM, Entries[index].getWidth(), vecTy);
403 
404   auto eltTy = split.first;
405   CharUnits eltSize = getTypeStoreSize(CGM, eltTy);
406   auto numElts = split.second;
407   Entries.insert(Entries.begin() + index + 1, numElts - 1, StorageEntry());
408 
409   CharUnits begin = Entries[index].Begin;
410   for (unsigned i = 0; i != numElts; ++i) {
411     Entries[index].Type = eltTy;
412     Entries[index].Begin = begin;
413     Entries[index].End = begin + eltSize;
414     begin += eltSize;
415   }
416 }
417 
418 /// Given a power-of-two unit size, return the offset of the aligned unit
419 /// of that size which contains the given offset.
420 ///
421 /// In other words, round down to the nearest multiple of the unit size.
422 static CharUnits getOffsetAtStartOfUnit(CharUnits offset, CharUnits unitSize) {
423   assert(isPowerOf2(unitSize.getQuantity()));
424   auto unitMask = ~(unitSize.getQuantity() - 1);
425   return CharUnits::fromQuantity(offset.getQuantity() & unitMask);
426 }
427 
428 static bool areBytesInSameUnit(CharUnits first, CharUnits second,
429                                CharUnits chunkSize) {
430   return getOffsetAtStartOfUnit(first, chunkSize)
431       == getOffsetAtStartOfUnit(second, chunkSize);
432 }
433 
434 static bool isMergeableEntryType(llvm::Type *type) {
435   // Opaquely-typed memory is always mergeable.
436   if (type == nullptr) return true;
437 
438   // Pointers and integers are always mergeable.  In theory we should not
439   // merge pointers, but (1) it doesn't currently matter in practice because
440   // the chunk size is never greater than the size of a pointer and (2)
441   // Swift IRGen uses integer types for a lot of things that are "really"
442   // just storing pointers (like Optional<SomePointer>).  If we ever have a
443   // target that would otherwise combine pointers, we should put some effort
444   // into fixing those cases in Swift IRGen and then call out pointer types
445   // here.
446 
447   // Floating-point and vector types should never be merged.
448   // Most such types are too large and highly-aligned to ever trigger merging
449   // in practice, but it's important for the rule to cover at least 'half'
450   // and 'float', as well as things like small vectors of 'i1' or 'i8'.
451   return (!type->isFloatingPointTy() && !type->isVectorTy());
452 }
453 
454 bool SwiftAggLowering::shouldMergeEntries(const StorageEntry &first,
455                                           const StorageEntry &second,
456                                           CharUnits chunkSize) {
457   // Only merge entries that overlap the same chunk.  We test this first
458   // despite being a bit more expensive because this is the condition that
459   // tends to prevent merging.
460   if (!areBytesInSameUnit(first.End - CharUnits::One(), second.Begin,
461                           chunkSize))
462     return false;
463 
464   return (isMergeableEntryType(first.Type) &&
465           isMergeableEntryType(second.Type));
466 }
467 
468 void SwiftAggLowering::finish() {
469   if (Entries.empty()) {
470     Finished = true;
471     return;
472   }
473 
474   // We logically split the layout down into a series of chunks of this size,
475   // which is generally the size of a pointer.
476   const CharUnits chunkSize = getMaximumVoluntaryIntegerSize(CGM);
477 
478   // First pass: if two entries should be merged, make them both opaque
479   // and stretch one to meet the next.
480   // Also, remember if there are any opaque entries.
481   bool hasOpaqueEntries = (Entries[0].Type == nullptr);
482   for (size_t i = 1, e = Entries.size(); i != e; ++i) {
483     if (shouldMergeEntries(Entries[i - 1], Entries[i], chunkSize)) {
484       Entries[i - 1].Type = nullptr;
485       Entries[i].Type = nullptr;
486       Entries[i - 1].End = Entries[i].Begin;
487       hasOpaqueEntries = true;
488 
489     } else if (Entries[i].Type == nullptr) {
490       hasOpaqueEntries = true;
491     }
492   }
493 
494   // The rest of the algorithm leaves non-opaque entries alone, so if we
495   // have no opaque entries, we're done.
496   if (!hasOpaqueEntries) {
497     Finished = true;
498     return;
499   }
500 
501   // Okay, move the entries to a temporary and rebuild Entries.
502   auto orig = std::move(Entries);
503   assert(Entries.empty());
504 
505   for (size_t i = 0, e = orig.size(); i != e; ++i) {
506     // Just copy over non-opaque entries.
507     if (orig[i].Type != nullptr) {
508       Entries.push_back(orig[i]);
509       continue;
510     }
511 
512     // Scan forward to determine the full extent of the next opaque range.
513     // We know from the first pass that only contiguous ranges will overlap
514     // the same aligned chunk.
515     auto begin = orig[i].Begin;
516     auto end = orig[i].End;
517     while (i + 1 != e &&
518            orig[i + 1].Type == nullptr &&
519            end == orig[i + 1].Begin) {
520       end = orig[i + 1].End;
521       i++;
522     }
523 
524     // Add an entry per intersected chunk.
525     do {
526       // Find the smallest aligned storage unit in the maximal aligned
527       // storage unit containing 'begin' that contains all the bytes in
528       // the intersection between the range and this chunk.
529       CharUnits localBegin = begin;
530       CharUnits chunkBegin = getOffsetAtStartOfUnit(localBegin, chunkSize);
531       CharUnits chunkEnd = chunkBegin + chunkSize;
532       CharUnits localEnd = std::min(end, chunkEnd);
533 
534       // Just do a simple loop over ever-increasing unit sizes.
535       CharUnits unitSize = CharUnits::One();
536       CharUnits unitBegin, unitEnd;
537       for (; ; unitSize *= 2) {
538         assert(unitSize <= chunkSize);
539         unitBegin = getOffsetAtStartOfUnit(localBegin, unitSize);
540         unitEnd = unitBegin + unitSize;
541         if (unitEnd >= localEnd) break;
542       }
543 
544       // Add an entry for this unit.
545       auto entryTy =
546         llvm::IntegerType::get(CGM.getLLVMContext(),
547                                CGM.getContext().toBits(unitSize));
548       Entries.push_back({unitBegin, unitEnd, entryTy});
549 
550       // The next chunk starts where this chunk left off.
551       begin = localEnd;
552     } while (begin != end);
553   }
554 
555   // Okay, finally finished.
556   Finished = true;
557 }
558 
559 void SwiftAggLowering::enumerateComponents(EnumerationCallback callback) const {
560   assert(Finished && "haven't yet finished lowering");
561 
562   for (auto &entry : Entries) {
563     callback(entry.Begin, entry.End, entry.Type);
564   }
565 }
566 
567 std::pair<llvm::StructType*, llvm::Type*>
568 SwiftAggLowering::getCoerceAndExpandTypes() const {
569   assert(Finished && "haven't yet finished lowering");
570 
571   auto &ctx = CGM.getLLVMContext();
572 
573   if (Entries.empty()) {
574     auto type = llvm::StructType::get(ctx);
575     return { type, type };
576   }
577 
578   SmallVector<llvm::Type*, 8> elts;
579   CharUnits lastEnd = CharUnits::Zero();
580   bool hasPadding = false;
581   bool packed = false;
582   for (auto &entry : Entries) {
583     if (entry.Begin != lastEnd) {
584       auto paddingSize = entry.Begin - lastEnd;
585       assert(!paddingSize.isNegative());
586 
587       auto padding = llvm::ArrayType::get(llvm::Type::getInt8Ty(ctx),
588                                           paddingSize.getQuantity());
589       elts.push_back(padding);
590       hasPadding = true;
591     }
592 
593     if (!packed && !entry.Begin.isMultipleOf(
594           CharUnits::fromQuantity(
595             CGM.getDataLayout().getABITypeAlignment(entry.Type))))
596       packed = true;
597 
598     elts.push_back(entry.Type);
599 
600     lastEnd = entry.Begin + getTypeAllocSize(CGM, entry.Type);
601     assert(entry.End <= lastEnd);
602   }
603 
604   // We don't need to adjust 'packed' to deal with possible tail padding
605   // because we never do that kind of access through the coercion type.
606   auto coercionType = llvm::StructType::get(ctx, elts, packed);
607 
608   llvm::Type *unpaddedType = coercionType;
609   if (hasPadding) {
610     elts.clear();
611     for (auto &entry : Entries) {
612       elts.push_back(entry.Type);
613     }
614     if (elts.size() == 1) {
615       unpaddedType = elts[0];
616     } else {
617       unpaddedType = llvm::StructType::get(ctx, elts, /*packed*/ false);
618     }
619   } else if (Entries.size() == 1) {
620     unpaddedType = Entries[0].Type;
621   }
622 
623   return { coercionType, unpaddedType };
624 }
625 
626 bool SwiftAggLowering::shouldPassIndirectly(bool asReturnValue) const {
627   assert(Finished && "haven't yet finished lowering");
628 
629   // Empty types don't need to be passed indirectly.
630   if (Entries.empty()) return false;
631 
632   // Avoid copying the array of types when there's just a single element.
633   if (Entries.size() == 1) {
634     return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(
635                                                            Entries.back().Type,
636                                                              asReturnValue);
637   }
638 
639   SmallVector<llvm::Type*, 8> componentTys;
640   componentTys.reserve(Entries.size());
641   for (auto &entry : Entries) {
642     componentTys.push_back(entry.Type);
643   }
644   return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(componentTys,
645                                                            asReturnValue);
646 }
647 
648 bool swiftcall::shouldPassIndirectly(CodeGenModule &CGM,
649                                      ArrayRef<llvm::Type*> componentTys,
650                                      bool asReturnValue) {
651   return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(componentTys,
652                                                            asReturnValue);
653 }
654 
655 CharUnits swiftcall::getMaximumVoluntaryIntegerSize(CodeGenModule &CGM) {
656   // Currently always the size of an ordinary pointer.
657   return CGM.getContext().toCharUnitsFromBits(
658            CGM.getContext().getTargetInfo().getPointerWidth(0));
659 }
660 
661 CharUnits swiftcall::getNaturalAlignment(CodeGenModule &CGM, llvm::Type *type) {
662   // For Swift's purposes, this is always just the store size of the type
663   // rounded up to a power of 2.
664   auto size = (unsigned long long) getTypeStoreSize(CGM, type).getQuantity();
665   if (!isPowerOf2(size)) {
666     size = 1ULL << (llvm::findLastSet(size, llvm::ZB_Undefined) + 1);
667   }
668   assert(size >= CGM.getDataLayout().getABITypeAlignment(type));
669   return CharUnits::fromQuantity(size);
670 }
671 
672 bool swiftcall::isLegalIntegerType(CodeGenModule &CGM,
673                                    llvm::IntegerType *intTy) {
674   auto size = intTy->getBitWidth();
675   switch (size) {
676   case 1:
677   case 8:
678   case 16:
679   case 32:
680   case 64:
681     // Just assume that the above are always legal.
682     return true;
683 
684   case 128:
685     return CGM.getContext().getTargetInfo().hasInt128Type();
686 
687   default:
688     return false;
689   }
690 }
691 
692 bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
693                                   llvm::VectorType *vectorTy) {
694   return isLegalVectorType(
695       CGM, vectorSize, vectorTy->getElementType(),
696       cast<llvm::FixedVectorType>(vectorTy)->getNumElements());
697 }
698 
699 bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
700                                   llvm::Type *eltTy, unsigned numElts) {
701   assert(numElts > 1 && "illegal vector length");
702   return getSwiftABIInfo(CGM)
703            .isLegalVectorTypeForSwift(vectorSize, eltTy, numElts);
704 }
705 
706 std::pair<llvm::Type*, unsigned>
707 swiftcall::splitLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
708                                 llvm::VectorType *vectorTy) {
709   auto numElts = cast<llvm::FixedVectorType>(vectorTy)->getNumElements();
710   auto eltTy = vectorTy->getElementType();
711 
712   // Try to split the vector type in half.
713   if (numElts >= 4 && isPowerOf2(numElts)) {
714     if (isLegalVectorType(CGM, vectorSize / 2, eltTy, numElts / 2))
715       return {llvm::FixedVectorType::get(eltTy, numElts / 2), 2};
716   }
717 
718   return {eltTy, numElts};
719 }
720 
721 void swiftcall::legalizeVectorType(CodeGenModule &CGM, CharUnits origVectorSize,
722                                    llvm::VectorType *origVectorTy,
723                              llvm::SmallVectorImpl<llvm::Type*> &components) {
724   // If it's already a legal vector type, use it.
725   if (isLegalVectorType(CGM, origVectorSize, origVectorTy)) {
726     components.push_back(origVectorTy);
727     return;
728   }
729 
730   // Try to split the vector into legal subvectors.
731   auto numElts = cast<llvm::FixedVectorType>(origVectorTy)->getNumElements();
732   auto eltTy = origVectorTy->getElementType();
733   assert(numElts != 1);
734 
735   // The largest size that we're still considering making subvectors of.
736   // Always a power of 2.
737   unsigned logCandidateNumElts = llvm::findLastSet(numElts, llvm::ZB_Undefined);
738   unsigned candidateNumElts = 1U << logCandidateNumElts;
739   assert(candidateNumElts <= numElts && candidateNumElts * 2 > numElts);
740 
741   // Minor optimization: don't check the legality of this exact size twice.
742   if (candidateNumElts == numElts) {
743     logCandidateNumElts--;
744     candidateNumElts >>= 1;
745   }
746 
747   CharUnits eltSize = (origVectorSize / numElts);
748   CharUnits candidateSize = eltSize * candidateNumElts;
749 
750   // The sensibility of this algorithm relies on the fact that we never
751   // have a legal non-power-of-2 vector size without having the power of 2
752   // also be legal.
753   while (logCandidateNumElts > 0) {
754     assert(candidateNumElts == 1U << logCandidateNumElts);
755     assert(candidateNumElts <= numElts);
756     assert(candidateSize == eltSize * candidateNumElts);
757 
758     // Skip illegal vector sizes.
759     if (!isLegalVectorType(CGM, candidateSize, eltTy, candidateNumElts)) {
760       logCandidateNumElts--;
761       candidateNumElts /= 2;
762       candidateSize /= 2;
763       continue;
764     }
765 
766     // Add the right number of vectors of this size.
767     auto numVecs = numElts >> logCandidateNumElts;
768     components.append(numVecs,
769                       llvm::FixedVectorType::get(eltTy, candidateNumElts));
770     numElts -= (numVecs << logCandidateNumElts);
771 
772     if (numElts == 0) return;
773 
774     // It's possible that the number of elements remaining will be legal.
775     // This can happen with e.g. <7 x float> when <3 x float> is legal.
776     // This only needs to be separately checked if it's not a power of 2.
777     if (numElts > 2 && !isPowerOf2(numElts) &&
778         isLegalVectorType(CGM, eltSize * numElts, eltTy, numElts)) {
779       components.push_back(llvm::FixedVectorType::get(eltTy, numElts));
780       return;
781     }
782 
783     // Bring vecSize down to something no larger than numElts.
784     do {
785       logCandidateNumElts--;
786       candidateNumElts /= 2;
787       candidateSize /= 2;
788     } while (candidateNumElts > numElts);
789   }
790 
791   // Otherwise, just append a bunch of individual elements.
792   components.append(numElts, eltTy);
793 }
794 
795 bool swiftcall::mustPassRecordIndirectly(CodeGenModule &CGM,
796                                          const RecordDecl *record) {
797   // FIXME: should we not rely on the standard computation in Sema, just in
798   // case we want to diverge from the platform ABI (e.g. on targets where
799   // that uses the MSVC rule)?
800   return !record->canPassInRegisters();
801 }
802 
803 static ABIArgInfo classifyExpandedType(SwiftAggLowering &lowering,
804                                        bool forReturn,
805                                        CharUnits alignmentForIndirect) {
806   if (lowering.empty()) {
807     return ABIArgInfo::getIgnore();
808   } else if (lowering.shouldPassIndirectly(forReturn)) {
809     return ABIArgInfo::getIndirect(alignmentForIndirect, /*byval*/ false);
810   } else {
811     auto types = lowering.getCoerceAndExpandTypes();
812     return ABIArgInfo::getCoerceAndExpand(types.first, types.second);
813   }
814 }
815 
816 static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type,
817                                bool forReturn) {
818   if (auto recordType = dyn_cast<RecordType>(type)) {
819     auto record = recordType->getDecl();
820     auto &layout = CGM.getContext().getASTRecordLayout(record);
821 
822     if (mustPassRecordIndirectly(CGM, record))
823       return ABIArgInfo::getIndirect(layout.getAlignment(), /*byval*/ false);
824 
825     SwiftAggLowering lowering(CGM);
826     lowering.addTypedData(recordType->getDecl(), CharUnits::Zero(), layout);
827     lowering.finish();
828 
829     return classifyExpandedType(lowering, forReturn, layout.getAlignment());
830   }
831 
832   // Just assume that all of our target ABIs can support returning at least
833   // two integer or floating-point values.
834   if (isa<ComplexType>(type)) {
835     return (forReturn ? ABIArgInfo::getDirect() : ABIArgInfo::getExpand());
836   }
837 
838   // Vector types may need to be legalized.
839   if (isa<VectorType>(type)) {
840     SwiftAggLowering lowering(CGM);
841     lowering.addTypedData(type, CharUnits::Zero());
842     lowering.finish();
843 
844     CharUnits alignment = CGM.getContext().getTypeAlignInChars(type);
845     return classifyExpandedType(lowering, forReturn, alignment);
846   }
847 
848   // Member pointer types need to be expanded, but it's a simple form of
849   // expansion that 'Direct' can handle.  Note that CanBeFlattened should be
850   // true for this to work.
851 
852   // 'void' needs to be ignored.
853   if (type->isVoidType()) {
854     return ABIArgInfo::getIgnore();
855   }
856 
857   // Everything else can be passed directly.
858   return ABIArgInfo::getDirect();
859 }
860 
861 ABIArgInfo swiftcall::classifyReturnType(CodeGenModule &CGM, CanQualType type) {
862   return classifyType(CGM, type, /*forReturn*/ true);
863 }
864 
865 ABIArgInfo swiftcall::classifyArgumentType(CodeGenModule &CGM,
866                                            CanQualType type) {
867   return classifyType(CGM, type, /*forReturn*/ false);
868 }
869 
870 void swiftcall::computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
871   auto &retInfo = FI.getReturnInfo();
872   retInfo = classifyReturnType(CGM, FI.getReturnType());
873 
874   for (unsigned i = 0, e = FI.arg_size(); i != e; ++i) {
875     auto &argInfo = FI.arg_begin()[i];
876     argInfo.info = classifyArgumentType(CGM, argInfo.type);
877   }
878 }
879 
880 // Is swifterror lowered to a register by the target ABI.
881 bool swiftcall::isSwiftErrorLoweredInRegister(CodeGenModule &CGM) {
882   return getSwiftABIInfo(CGM).isSwiftErrorInRegister();
883 }
884