1 | //===--- SwiftCallingConv.cpp - Lowering for the Swift calling convention -===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // Implementation of the abstract lowering for the Swift calling convention. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "clang/CodeGen/SwiftCallingConv.h" |
14 | #include "ABIInfo.h" |
15 | #include "CodeGenModule.h" |
16 | #include "TargetInfo.h" |
17 | #include "clang/Basic/TargetInfo.h" |
18 | |
19 | using namespace clang; |
20 | using namespace CodeGen; |
21 | using namespace swiftcall; |
22 | |
23 | static const SwiftABIInfo &getSwiftABIInfo(CodeGenModule &CGM) { |
24 | return CGM.getTargetCodeGenInfo().getSwiftABIInfo(); |
25 | } |
26 | |
27 | static bool isPowerOf2(unsigned n) { |
28 | return n == (n & -n); |
29 | } |
30 | |
31 | /// Given two types with the same size, try to find a common type. |
32 | static llvm::Type *getCommonType(llvm::Type *first, llvm::Type *second) { |
33 | assert(first != second); |
34 | |
35 | // Allow pointers to merge with integers, but prefer the integer type. |
36 | if (first->isIntegerTy()) { |
37 | if (second->isPointerTy()) return first; |
38 | } else if (first->isPointerTy()) { |
39 | if (second->isIntegerTy()) return second; |
40 | if (second->isPointerTy()) return first; |
41 | |
42 | // Allow two vectors to be merged (given that they have the same size). |
43 | // This assumes that we never have two different vector register sets. |
44 | } else if (auto firstVecTy = dyn_cast<llvm::VectorType>(Val: first)) { |
45 | if (auto secondVecTy = dyn_cast<llvm::VectorType>(Val: second)) { |
46 | if (auto commonTy = getCommonType(first: firstVecTy->getElementType(), |
47 | second: secondVecTy->getElementType())) { |
48 | return (commonTy == firstVecTy->getElementType() ? first : second); |
49 | } |
50 | } |
51 | } |
52 | |
53 | return nullptr; |
54 | } |
55 | |
56 | static CharUnits getTypeStoreSize(CodeGenModule &CGM, llvm::Type *type) { |
57 | return CharUnits::fromQuantity(Quantity: CGM.getDataLayout().getTypeStoreSize(Ty: type)); |
58 | } |
59 | |
60 | static CharUnits getTypeAllocSize(CodeGenModule &CGM, llvm::Type *type) { |
61 | return CharUnits::fromQuantity(Quantity: CGM.getDataLayout().getTypeAllocSize(Ty: type)); |
62 | } |
63 | |
64 | void SwiftAggLowering::addTypedData(QualType type, CharUnits begin) { |
65 | // Deal with various aggregate types as special cases: |
66 | |
67 | // Record types. |
68 | if (auto recType = type->getAs<RecordType>()) { |
69 | addTypedData(record: recType->getDecl(), begin); |
70 | |
71 | // Array types. |
72 | } else if (type->isArrayType()) { |
73 | // Incomplete array types (flexible array members?) don't provide |
74 | // data to lay out, and the other cases shouldn't be possible. |
75 | auto arrayType = CGM.getContext().getAsConstantArrayType(T: type); |
76 | if (!arrayType) return; |
77 | |
78 | QualType eltType = arrayType->getElementType(); |
79 | auto eltSize = CGM.getContext().getTypeSizeInChars(T: eltType); |
80 | for (uint64_t i = 0, e = arrayType->getZExtSize(); i != e; ++i) { |
81 | addTypedData(type: eltType, begin: begin + i * eltSize); |
82 | } |
83 | |
84 | // Complex types. |
85 | } else if (auto complexType = type->getAs<ComplexType>()) { |
86 | auto eltType = complexType->getElementType(); |
87 | auto eltSize = CGM.getContext().getTypeSizeInChars(T: eltType); |
88 | auto eltLLVMType = CGM.getTypes().ConvertType(T: eltType); |
89 | addTypedData(type: eltLLVMType, begin, end: begin + eltSize); |
90 | addTypedData(type: eltLLVMType, begin: begin + eltSize, end: begin + 2 * eltSize); |
91 | |
92 | // Member pointer types. |
93 | } else if (type->getAs<MemberPointerType>()) { |
94 | // Just add it all as opaque. |
95 | addOpaqueData(begin, end: begin + CGM.getContext().getTypeSizeInChars(T: type)); |
96 | |
97 | // Atomic types. |
98 | } else if (const auto *atomicType = type->getAs<AtomicType>()) { |
99 | auto valueType = atomicType->getValueType(); |
100 | auto atomicSize = CGM.getContext().getTypeSizeInChars(T: atomicType); |
101 | auto valueSize = CGM.getContext().getTypeSizeInChars(T: valueType); |
102 | |
103 | addTypedData(type: atomicType->getValueType(), begin); |
104 | |
105 | // Add atomic padding. |
106 | auto atomicPadding = atomicSize - valueSize; |
107 | if (atomicPadding > CharUnits::Zero()) |
108 | addOpaqueData(begin: begin + valueSize, end: begin + atomicSize); |
109 | |
110 | // Everything else is scalar and should not convert as an LLVM aggregate. |
111 | } else { |
112 | // We intentionally convert as !ForMem because we want to preserve |
113 | // that a type was an i1. |
114 | auto *llvmType = CGM.getTypes().ConvertType(T: type); |
115 | addTypedData(type: llvmType, begin); |
116 | } |
117 | } |
118 | |
119 | void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin) { |
120 | addTypedData(record, begin, layout: CGM.getContext().getASTRecordLayout(D: record)); |
121 | } |
122 | |
123 | void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin, |
124 | const ASTRecordLayout &layout) { |
125 | // Unions are a special case. |
126 | if (record->isUnion()) { |
127 | for (auto *field : record->fields()) { |
128 | if (field->isBitField()) { |
129 | addBitFieldData(field, begin, bitOffset: 0); |
130 | } else { |
131 | addTypedData(type: field->getType(), begin); |
132 | } |
133 | } |
134 | return; |
135 | } |
136 | |
137 | // Note that correctness does not rely on us adding things in |
138 | // their actual order of layout; it's just somewhat more efficient |
139 | // for the builder. |
140 | |
141 | // With that in mind, add "early" C++ data. |
142 | auto cxxRecord = dyn_cast<CXXRecordDecl>(Val: record); |
143 | if (cxxRecord) { |
144 | // - a v-table pointer, if the class adds its own |
145 | if (layout.hasOwnVFPtr()) { |
146 | addTypedData(type: CGM.Int8PtrTy, begin); |
147 | } |
148 | |
149 | // - non-virtual bases |
150 | for (auto &baseSpecifier : cxxRecord->bases()) { |
151 | if (baseSpecifier.isVirtual()) continue; |
152 | |
153 | auto baseRecord = baseSpecifier.getType()->getAsCXXRecordDecl(); |
154 | addTypedData(record: baseRecord, begin: begin + layout.getBaseClassOffset(Base: baseRecord)); |
155 | } |
156 | |
157 | // - a vbptr if the class adds its own |
158 | if (layout.hasOwnVBPtr()) { |
159 | addTypedData(type: CGM.Int8PtrTy, begin: begin + layout.getVBPtrOffset()); |
160 | } |
161 | } |
162 | |
163 | // Add fields. |
164 | for (auto *field : record->fields()) { |
165 | auto fieldOffsetInBits = layout.getFieldOffset(FieldNo: field->getFieldIndex()); |
166 | if (field->isBitField()) { |
167 | addBitFieldData(field, begin, bitOffset: fieldOffsetInBits); |
168 | } else { |
169 | addTypedData(type: field->getType(), |
170 | begin: begin + CGM.getContext().toCharUnitsFromBits(BitSize: fieldOffsetInBits)); |
171 | } |
172 | } |
173 | |
174 | // Add "late" C++ data: |
175 | if (cxxRecord) { |
176 | // - virtual bases |
177 | for (auto &vbaseSpecifier : cxxRecord->vbases()) { |
178 | auto baseRecord = vbaseSpecifier.getType()->getAsCXXRecordDecl(); |
179 | addTypedData(record: baseRecord, begin: begin + layout.getVBaseClassOffset(VBase: baseRecord)); |
180 | } |
181 | } |
182 | } |
183 | |
184 | void SwiftAggLowering::addBitFieldData(const FieldDecl *bitfield, |
185 | CharUnits recordBegin, |
186 | uint64_t bitfieldBitBegin) { |
187 | assert(bitfield->isBitField()); |
188 | auto &ctx = CGM.getContext(); |
189 | auto width = bitfield->getBitWidthValue(); |
190 | |
191 | // We can ignore zero-width bit-fields. |
192 | if (width == 0) return; |
193 | |
194 | // toCharUnitsFromBits rounds down. |
195 | CharUnits bitfieldByteBegin = ctx.toCharUnitsFromBits(BitSize: bitfieldBitBegin); |
196 | |
197 | // Find the offset of the last byte that is partially occupied by the |
198 | // bit-field; since we otherwise expect exclusive ends, the end is the |
199 | // next byte. |
200 | uint64_t bitfieldBitLast = bitfieldBitBegin + width - 1; |
201 | CharUnits bitfieldByteEnd = |
202 | ctx.toCharUnitsFromBits(BitSize: bitfieldBitLast) + CharUnits::One(); |
203 | addOpaqueData(begin: recordBegin + bitfieldByteBegin, |
204 | end: recordBegin + bitfieldByteEnd); |
205 | } |
206 | |
207 | void SwiftAggLowering::addTypedData(llvm::Type *type, CharUnits begin) { |
208 | assert(type && "didn't provide type for typed data" ); |
209 | addTypedData(type, begin, end: begin + getTypeStoreSize(CGM, type)); |
210 | } |
211 | |
212 | void SwiftAggLowering::addTypedData(llvm::Type *type, |
213 | CharUnits begin, CharUnits end) { |
214 | assert(type && "didn't provide type for typed data" ); |
215 | assert(getTypeStoreSize(CGM, type) == end - begin); |
216 | |
217 | // Legalize vector types. |
218 | if (auto vecTy = dyn_cast<llvm::VectorType>(Val: type)) { |
219 | SmallVector<llvm::Type*, 4> componentTys; |
220 | legalizeVectorType(CGM, vectorSize: end - begin, vectorTy: vecTy, types&: componentTys); |
221 | assert(componentTys.size() >= 1); |
222 | |
223 | // Walk the initial components. |
224 | for (size_t i = 0, e = componentTys.size(); i != e - 1; ++i) { |
225 | llvm::Type *componentTy = componentTys[i]; |
226 | auto componentSize = getTypeStoreSize(CGM, type: componentTy); |
227 | assert(componentSize < end - begin); |
228 | addLegalTypedData(type: componentTy, begin, end: begin + componentSize); |
229 | begin += componentSize; |
230 | } |
231 | |
232 | return addLegalTypedData(type: componentTys.back(), begin, end); |
233 | } |
234 | |
235 | // Legalize integer types. |
236 | if (auto intTy = dyn_cast<llvm::IntegerType>(Val: type)) { |
237 | if (!isLegalIntegerType(CGM, type: intTy)) |
238 | return addOpaqueData(begin, end); |
239 | } |
240 | |
241 | // All other types should be legal. |
242 | return addLegalTypedData(type, begin, end); |
243 | } |
244 | |
245 | void SwiftAggLowering::addLegalTypedData(llvm::Type *type, |
246 | CharUnits begin, CharUnits end) { |
247 | // Require the type to be naturally aligned. |
248 | if (!begin.isZero() && !begin.isMultipleOf(N: getNaturalAlignment(CGM, type))) { |
249 | |
250 | // Try splitting vector types. |
251 | if (auto vecTy = dyn_cast<llvm::VectorType>(Val: type)) { |
252 | auto split = splitLegalVectorType(CGM, vectorSize: end - begin, vectorTy: vecTy); |
253 | auto eltTy = split.first; |
254 | auto numElts = split.second; |
255 | |
256 | auto eltSize = (end - begin) / numElts; |
257 | assert(eltSize == getTypeStoreSize(CGM, eltTy)); |
258 | for (size_t i = 0, e = numElts; i != e; ++i) { |
259 | addLegalTypedData(type: eltTy, begin, end: begin + eltSize); |
260 | begin += eltSize; |
261 | } |
262 | assert(begin == end); |
263 | return; |
264 | } |
265 | |
266 | return addOpaqueData(begin, end); |
267 | } |
268 | |
269 | addEntry(type, begin, end); |
270 | } |
271 | |
272 | void SwiftAggLowering::addEntry(llvm::Type *type, |
273 | CharUnits begin, CharUnits end) { |
274 | assert((!type || |
275 | (!isa<llvm::StructType>(type) && !isa<llvm::ArrayType>(type))) && |
276 | "cannot add aggregate-typed data" ); |
277 | assert(!type || begin.isMultipleOf(getNaturalAlignment(CGM, type))); |
278 | |
279 | // Fast path: we can just add entries to the end. |
280 | if (Entries.empty() || Entries.back().End <= begin) { |
281 | Entries.push_back(Elt: {.Begin: begin, .End: end, .Type: type}); |
282 | return; |
283 | } |
284 | |
285 | // Find the first existing entry that ends after the start of the new data. |
286 | // TODO: do a binary search if Entries is big enough for it to matter. |
287 | size_t index = Entries.size() - 1; |
288 | while (index != 0) { |
289 | if (Entries[index - 1].End <= begin) break; |
290 | --index; |
291 | } |
292 | |
293 | // The entry ends after the start of the new data. |
294 | // If the entry starts after the end of the new data, there's no conflict. |
295 | if (Entries[index].Begin >= end) { |
296 | // This insertion is potentially O(n), but the way we generally build |
297 | // these layouts makes that unlikely to matter: we'd need a union of |
298 | // several very large types. |
299 | Entries.insert(I: Entries.begin() + index, Elt: {.Begin: begin, .End: end, .Type: type}); |
300 | return; |
301 | } |
302 | |
303 | // Otherwise, the ranges overlap. The new range might also overlap |
304 | // with later ranges. |
305 | restartAfterSplit: |
306 | |
307 | // Simplest case: an exact overlap. |
308 | if (Entries[index].Begin == begin && Entries[index].End == end) { |
309 | // If the types match exactly, great. |
310 | if (Entries[index].Type == type) return; |
311 | |
312 | // If either type is opaque, make the entry opaque and return. |
313 | if (Entries[index].Type == nullptr) { |
314 | return; |
315 | } else if (type == nullptr) { |
316 | Entries[index].Type = nullptr; |
317 | return; |
318 | } |
319 | |
320 | // If they disagree in an ABI-agnostic way, just resolve the conflict |
321 | // arbitrarily. |
322 | if (auto entryType = getCommonType(first: Entries[index].Type, second: type)) { |
323 | Entries[index].Type = entryType; |
324 | return; |
325 | } |
326 | |
327 | // Otherwise, make the entry opaque. |
328 | Entries[index].Type = nullptr; |
329 | return; |
330 | } |
331 | |
332 | // Okay, we have an overlapping conflict of some sort. |
333 | |
334 | // If we have a vector type, split it. |
335 | if (auto vecTy = dyn_cast_or_null<llvm::VectorType>(Val: type)) { |
336 | auto eltTy = vecTy->getElementType(); |
337 | CharUnits eltSize = |
338 | (end - begin) / cast<llvm::FixedVectorType>(Val: vecTy)->getNumElements(); |
339 | assert(eltSize == getTypeStoreSize(CGM, eltTy)); |
340 | for (unsigned i = 0, |
341 | e = cast<llvm::FixedVectorType>(Val: vecTy)->getNumElements(); |
342 | i != e; ++i) { |
343 | addEntry(type: eltTy, begin, end: begin + eltSize); |
344 | begin += eltSize; |
345 | } |
346 | assert(begin == end); |
347 | return; |
348 | } |
349 | |
350 | // If the entry is a vector type, split it and try again. |
351 | if (Entries[index].Type && Entries[index].Type->isVectorTy()) { |
352 | splitVectorEntry(index); |
353 | goto restartAfterSplit; |
354 | } |
355 | |
356 | // Okay, we have no choice but to make the existing entry opaque. |
357 | |
358 | Entries[index].Type = nullptr; |
359 | |
360 | // Stretch the start of the entry to the beginning of the range. |
361 | if (begin < Entries[index].Begin) { |
362 | Entries[index].Begin = begin; |
363 | assert(index == 0 || begin >= Entries[index - 1].End); |
364 | } |
365 | |
366 | // Stretch the end of the entry to the end of the range; but if we run |
367 | // into the start of the next entry, just leave the range there and repeat. |
368 | while (end > Entries[index].End) { |
369 | assert(Entries[index].Type == nullptr); |
370 | |
371 | // If the range doesn't overlap the next entry, we're done. |
372 | if (index == Entries.size() - 1 || end <= Entries[index + 1].Begin) { |
373 | Entries[index].End = end; |
374 | break; |
375 | } |
376 | |
377 | // Otherwise, stretch to the start of the next entry. |
378 | Entries[index].End = Entries[index + 1].Begin; |
379 | |
380 | // Continue with the next entry. |
381 | index++; |
382 | |
383 | // This entry needs to be made opaque if it is not already. |
384 | if (Entries[index].Type == nullptr) |
385 | continue; |
386 | |
387 | // Split vector entries unless we completely subsume them. |
388 | if (Entries[index].Type->isVectorTy() && |
389 | end < Entries[index].End) { |
390 | splitVectorEntry(index); |
391 | } |
392 | |
393 | // Make the entry opaque. |
394 | Entries[index].Type = nullptr; |
395 | } |
396 | } |
397 | |
398 | /// Replace the entry of vector type at offset 'index' with a sequence |
399 | /// of its component vectors. |
400 | void SwiftAggLowering::splitVectorEntry(unsigned index) { |
401 | auto vecTy = cast<llvm::VectorType>(Val: Entries[index].Type); |
402 | auto split = splitLegalVectorType(CGM, vectorSize: Entries[index].getWidth(), vectorTy: vecTy); |
403 | |
404 | auto eltTy = split.first; |
405 | CharUnits eltSize = getTypeStoreSize(CGM, type: eltTy); |
406 | auto numElts = split.second; |
407 | Entries.insert(I: Entries.begin() + index + 1, NumToInsert: numElts - 1, Elt: StorageEntry()); |
408 | |
409 | CharUnits begin = Entries[index].Begin; |
410 | for (unsigned i = 0; i != numElts; ++i) { |
411 | unsigned idx = index + i; |
412 | Entries[idx].Type = eltTy; |
413 | Entries[idx].Begin = begin; |
414 | Entries[idx].End = begin + eltSize; |
415 | begin += eltSize; |
416 | } |
417 | } |
418 | |
419 | /// Given a power-of-two unit size, return the offset of the aligned unit |
420 | /// of that size which contains the given offset. |
421 | /// |
422 | /// In other words, round down to the nearest multiple of the unit size. |
423 | static CharUnits getOffsetAtStartOfUnit(CharUnits offset, CharUnits unitSize) { |
424 | assert(isPowerOf2(unitSize.getQuantity())); |
425 | auto unitMask = ~(unitSize.getQuantity() - 1); |
426 | return CharUnits::fromQuantity(Quantity: offset.getQuantity() & unitMask); |
427 | } |
428 | |
429 | static bool areBytesInSameUnit(CharUnits first, CharUnits second, |
430 | CharUnits chunkSize) { |
431 | return getOffsetAtStartOfUnit(offset: first, unitSize: chunkSize) |
432 | == getOffsetAtStartOfUnit(offset: second, unitSize: chunkSize); |
433 | } |
434 | |
435 | static bool isMergeableEntryType(llvm::Type *type) { |
436 | // Opaquely-typed memory is always mergeable. |
437 | if (type == nullptr) return true; |
438 | |
439 | // Pointers and integers are always mergeable. In theory we should not |
440 | // merge pointers, but (1) it doesn't currently matter in practice because |
441 | // the chunk size is never greater than the size of a pointer and (2) |
442 | // Swift IRGen uses integer types for a lot of things that are "really" |
443 | // just storing pointers (like std::optional<SomePointer>). If we ever have a |
444 | // target that would otherwise combine pointers, we should put some effort |
445 | // into fixing those cases in Swift IRGen and then call out pointer types |
446 | // here. |
447 | |
448 | // Floating-point and vector types should never be merged. |
449 | // Most such types are too large and highly-aligned to ever trigger merging |
450 | // in practice, but it's important for the rule to cover at least 'half' |
451 | // and 'float', as well as things like small vectors of 'i1' or 'i8'. |
452 | return (!type->isFloatingPointTy() && !type->isVectorTy()); |
453 | } |
454 | |
455 | bool SwiftAggLowering::shouldMergeEntries(const StorageEntry &first, |
456 | const StorageEntry &second, |
457 | CharUnits chunkSize) { |
458 | // Only merge entries that overlap the same chunk. We test this first |
459 | // despite being a bit more expensive because this is the condition that |
460 | // tends to prevent merging. |
461 | if (!areBytesInSameUnit(first: first.End - CharUnits::One(), second: second.Begin, |
462 | chunkSize)) |
463 | return false; |
464 | |
465 | return (isMergeableEntryType(type: first.Type) && |
466 | isMergeableEntryType(type: second.Type)); |
467 | } |
468 | |
469 | void SwiftAggLowering::finish() { |
470 | if (Entries.empty()) { |
471 | Finished = true; |
472 | return; |
473 | } |
474 | |
475 | // We logically split the layout down into a series of chunks of this size, |
476 | // which is generally the size of a pointer. |
477 | const CharUnits chunkSize = getMaximumVoluntaryIntegerSize(CGM); |
478 | |
479 | // First pass: if two entries should be merged, make them both opaque |
480 | // and stretch one to meet the next. |
481 | // Also, remember if there are any opaque entries. |
482 | bool hasOpaqueEntries = (Entries[0].Type == nullptr); |
483 | for (size_t i = 1, e = Entries.size(); i != e; ++i) { |
484 | if (shouldMergeEntries(first: Entries[i - 1], second: Entries[i], chunkSize)) { |
485 | Entries[i - 1].Type = nullptr; |
486 | Entries[i].Type = nullptr; |
487 | Entries[i - 1].End = Entries[i].Begin; |
488 | hasOpaqueEntries = true; |
489 | |
490 | } else if (Entries[i].Type == nullptr) { |
491 | hasOpaqueEntries = true; |
492 | } |
493 | } |
494 | |
495 | // The rest of the algorithm leaves non-opaque entries alone, so if we |
496 | // have no opaque entries, we're done. |
497 | if (!hasOpaqueEntries) { |
498 | Finished = true; |
499 | return; |
500 | } |
501 | |
502 | // Okay, move the entries to a temporary and rebuild Entries. |
503 | auto orig = std::move(Entries); |
504 | assert(Entries.empty()); |
505 | |
506 | for (size_t i = 0, e = orig.size(); i != e; ++i) { |
507 | // Just copy over non-opaque entries. |
508 | if (orig[i].Type != nullptr) { |
509 | Entries.push_back(Elt: orig[i]); |
510 | continue; |
511 | } |
512 | |
513 | // Scan forward to determine the full extent of the next opaque range. |
514 | // We know from the first pass that only contiguous ranges will overlap |
515 | // the same aligned chunk. |
516 | auto begin = orig[i].Begin; |
517 | auto end = orig[i].End; |
518 | while (i + 1 != e && |
519 | orig[i + 1].Type == nullptr && |
520 | end == orig[i + 1].Begin) { |
521 | end = orig[i + 1].End; |
522 | i++; |
523 | } |
524 | |
525 | // Add an entry per intersected chunk. |
526 | do { |
527 | // Find the smallest aligned storage unit in the maximal aligned |
528 | // storage unit containing 'begin' that contains all the bytes in |
529 | // the intersection between the range and this chunk. |
530 | CharUnits localBegin = begin; |
531 | CharUnits chunkBegin = getOffsetAtStartOfUnit(offset: localBegin, unitSize: chunkSize); |
532 | CharUnits chunkEnd = chunkBegin + chunkSize; |
533 | CharUnits localEnd = std::min(a: end, b: chunkEnd); |
534 | |
535 | // Just do a simple loop over ever-increasing unit sizes. |
536 | CharUnits unitSize = CharUnits::One(); |
537 | CharUnits unitBegin, unitEnd; |
538 | for (; ; unitSize *= 2) { |
539 | assert(unitSize <= chunkSize); |
540 | unitBegin = getOffsetAtStartOfUnit(offset: localBegin, unitSize); |
541 | unitEnd = unitBegin + unitSize; |
542 | if (unitEnd >= localEnd) break; |
543 | } |
544 | |
545 | // Add an entry for this unit. |
546 | auto entryTy = |
547 | llvm::IntegerType::get(C&: CGM.getLLVMContext(), |
548 | NumBits: CGM.getContext().toBits(CharSize: unitSize)); |
549 | Entries.push_back(Elt: {.Begin: unitBegin, .End: unitEnd, .Type: entryTy}); |
550 | |
551 | // The next chunk starts where this chunk left off. |
552 | begin = localEnd; |
553 | } while (begin != end); |
554 | } |
555 | |
556 | // Okay, finally finished. |
557 | Finished = true; |
558 | } |
559 | |
560 | void SwiftAggLowering::enumerateComponents(EnumerationCallback callback) const { |
561 | assert(Finished && "haven't yet finished lowering" ); |
562 | |
563 | for (auto &entry : Entries) { |
564 | callback(entry.Begin, entry.End, entry.Type); |
565 | } |
566 | } |
567 | |
568 | std::pair<llvm::StructType*, llvm::Type*> |
569 | SwiftAggLowering::getCoerceAndExpandTypes() const { |
570 | assert(Finished && "haven't yet finished lowering" ); |
571 | |
572 | auto &ctx = CGM.getLLVMContext(); |
573 | |
574 | if (Entries.empty()) { |
575 | auto type = llvm::StructType::get(Context&: ctx); |
576 | return { type, type }; |
577 | } |
578 | |
579 | SmallVector<llvm::Type*, 8> elts; |
580 | CharUnits lastEnd = CharUnits::Zero(); |
581 | bool hasPadding = false; |
582 | bool packed = false; |
583 | for (auto &entry : Entries) { |
584 | if (entry.Begin != lastEnd) { |
585 | auto paddingSize = entry.Begin - lastEnd; |
586 | assert(!paddingSize.isNegative()); |
587 | |
588 | auto padding = llvm::ArrayType::get(ElementType: llvm::Type::getInt8Ty(C&: ctx), |
589 | NumElements: paddingSize.getQuantity()); |
590 | elts.push_back(Elt: padding); |
591 | hasPadding = true; |
592 | } |
593 | |
594 | if (!packed && !entry.Begin.isMultipleOf(N: CharUnits::fromQuantity( |
595 | Quantity: CGM.getDataLayout().getABITypeAlign(Ty: entry.Type)))) |
596 | packed = true; |
597 | |
598 | elts.push_back(Elt: entry.Type); |
599 | |
600 | lastEnd = entry.Begin + getTypeAllocSize(CGM, type: entry.Type); |
601 | assert(entry.End <= lastEnd); |
602 | } |
603 | |
604 | // We don't need to adjust 'packed' to deal with possible tail padding |
605 | // because we never do that kind of access through the coercion type. |
606 | auto coercionType = llvm::StructType::get(Context&: ctx, Elements: elts, isPacked: packed); |
607 | |
608 | llvm::Type *unpaddedType = coercionType; |
609 | if (hasPadding) { |
610 | elts.clear(); |
611 | for (auto &entry : Entries) { |
612 | elts.push_back(Elt: entry.Type); |
613 | } |
614 | if (elts.size() == 1) { |
615 | unpaddedType = elts[0]; |
616 | } else { |
617 | unpaddedType = llvm::StructType::get(Context&: ctx, Elements: elts, /*packed*/ isPacked: false); |
618 | } |
619 | } else if (Entries.size() == 1) { |
620 | unpaddedType = Entries[0].Type; |
621 | } |
622 | |
623 | return { coercionType, unpaddedType }; |
624 | } |
625 | |
626 | bool SwiftAggLowering::shouldPassIndirectly(bool asReturnValue) const { |
627 | assert(Finished && "haven't yet finished lowering" ); |
628 | |
629 | // Empty types don't need to be passed indirectly. |
630 | if (Entries.empty()) return false; |
631 | |
632 | // Avoid copying the array of types when there's just a single element. |
633 | if (Entries.size() == 1) { |
634 | return getSwiftABIInfo(CGM).shouldPassIndirectly(ComponentTys: Entries.back().Type, |
635 | AsReturnValue: asReturnValue); |
636 | } |
637 | |
638 | SmallVector<llvm::Type*, 8> componentTys; |
639 | componentTys.reserve(N: Entries.size()); |
640 | for (auto &entry : Entries) { |
641 | componentTys.push_back(Elt: entry.Type); |
642 | } |
643 | return getSwiftABIInfo(CGM).shouldPassIndirectly(ComponentTys: componentTys, AsReturnValue: asReturnValue); |
644 | } |
645 | |
646 | bool swiftcall::shouldPassIndirectly(CodeGenModule &CGM, |
647 | ArrayRef<llvm::Type*> componentTys, |
648 | bool asReturnValue) { |
649 | return getSwiftABIInfo(CGM).shouldPassIndirectly(ComponentTys: componentTys, AsReturnValue: asReturnValue); |
650 | } |
651 | |
652 | CharUnits swiftcall::getMaximumVoluntaryIntegerSize(CodeGenModule &CGM) { |
653 | // Currently always the size of an ordinary pointer. |
654 | return CGM.getContext().toCharUnitsFromBits( |
655 | BitSize: CGM.getContext().getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default)); |
656 | } |
657 | |
658 | CharUnits swiftcall::getNaturalAlignment(CodeGenModule &CGM, llvm::Type *type) { |
659 | // For Swift's purposes, this is always just the store size of the type |
660 | // rounded up to a power of 2. |
661 | auto size = (unsigned long long) getTypeStoreSize(CGM, type).getQuantity(); |
662 | size = llvm::bit_ceil(Value: size); |
663 | assert(CGM.getDataLayout().getABITypeAlign(type) <= size); |
664 | return CharUnits::fromQuantity(Quantity: size); |
665 | } |
666 | |
667 | bool swiftcall::isLegalIntegerType(CodeGenModule &CGM, |
668 | llvm::IntegerType *intTy) { |
669 | auto size = intTy->getBitWidth(); |
670 | switch (size) { |
671 | case 1: |
672 | case 8: |
673 | case 16: |
674 | case 32: |
675 | case 64: |
676 | // Just assume that the above are always legal. |
677 | return true; |
678 | |
679 | case 128: |
680 | return CGM.getContext().getTargetInfo().hasInt128Type(); |
681 | |
682 | default: |
683 | return false; |
684 | } |
685 | } |
686 | |
687 | bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, |
688 | llvm::VectorType *vectorTy) { |
689 | return isLegalVectorType( |
690 | CGM, vectorSize, eltTy: vectorTy->getElementType(), |
691 | numElts: cast<llvm::FixedVectorType>(Val: vectorTy)->getNumElements()); |
692 | } |
693 | |
694 | bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, |
695 | llvm::Type *eltTy, unsigned numElts) { |
696 | assert(numElts > 1 && "illegal vector length" ); |
697 | return getSwiftABIInfo(CGM).isLegalVectorType(VectorSize: vectorSize, EltTy: eltTy, NumElts: numElts); |
698 | } |
699 | |
700 | std::pair<llvm::Type*, unsigned> |
701 | swiftcall::splitLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, |
702 | llvm::VectorType *vectorTy) { |
703 | auto numElts = cast<llvm::FixedVectorType>(Val: vectorTy)->getNumElements(); |
704 | auto eltTy = vectorTy->getElementType(); |
705 | |
706 | // Try to split the vector type in half. |
707 | if (numElts >= 4 && isPowerOf2(n: numElts)) { |
708 | if (isLegalVectorType(CGM, vectorSize: vectorSize / 2, eltTy, numElts: numElts / 2)) |
709 | return {llvm::FixedVectorType::get(ElementType: eltTy, NumElts: numElts / 2), 2}; |
710 | } |
711 | |
712 | return {eltTy, numElts}; |
713 | } |
714 | |
715 | void swiftcall::legalizeVectorType(CodeGenModule &CGM, CharUnits origVectorSize, |
716 | llvm::VectorType *origVectorTy, |
717 | llvm::SmallVectorImpl<llvm::Type*> &components) { |
718 | // If it's already a legal vector type, use it. |
719 | if (isLegalVectorType(CGM, vectorSize: origVectorSize, vectorTy: origVectorTy)) { |
720 | components.push_back(Elt: origVectorTy); |
721 | return; |
722 | } |
723 | |
724 | // Try to split the vector into legal subvectors. |
725 | auto numElts = cast<llvm::FixedVectorType>(Val: origVectorTy)->getNumElements(); |
726 | auto eltTy = origVectorTy->getElementType(); |
727 | assert(numElts != 1); |
728 | |
729 | // The largest size that we're still considering making subvectors of. |
730 | // Always a power of 2. |
731 | unsigned logCandidateNumElts = llvm::Log2_32(Value: numElts); |
732 | unsigned candidateNumElts = 1U << logCandidateNumElts; |
733 | assert(candidateNumElts <= numElts && candidateNumElts * 2 > numElts); |
734 | |
735 | // Minor optimization: don't check the legality of this exact size twice. |
736 | if (candidateNumElts == numElts) { |
737 | logCandidateNumElts--; |
738 | candidateNumElts >>= 1; |
739 | } |
740 | |
741 | CharUnits eltSize = (origVectorSize / numElts); |
742 | CharUnits candidateSize = eltSize * candidateNumElts; |
743 | |
744 | // The sensibility of this algorithm relies on the fact that we never |
745 | // have a legal non-power-of-2 vector size without having the power of 2 |
746 | // also be legal. |
747 | while (logCandidateNumElts > 0) { |
748 | assert(candidateNumElts == 1U << logCandidateNumElts); |
749 | assert(candidateNumElts <= numElts); |
750 | assert(candidateSize == eltSize * candidateNumElts); |
751 | |
752 | // Skip illegal vector sizes. |
753 | if (!isLegalVectorType(CGM, vectorSize: candidateSize, eltTy, numElts: candidateNumElts)) { |
754 | logCandidateNumElts--; |
755 | candidateNumElts /= 2; |
756 | candidateSize /= 2; |
757 | continue; |
758 | } |
759 | |
760 | // Add the right number of vectors of this size. |
761 | auto numVecs = numElts >> logCandidateNumElts; |
762 | components.append(NumInputs: numVecs, |
763 | Elt: llvm::FixedVectorType::get(ElementType: eltTy, NumElts: candidateNumElts)); |
764 | numElts -= (numVecs << logCandidateNumElts); |
765 | |
766 | if (numElts == 0) return; |
767 | |
768 | // It's possible that the number of elements remaining will be legal. |
769 | // This can happen with e.g. <7 x float> when <3 x float> is legal. |
770 | // This only needs to be separately checked if it's not a power of 2. |
771 | if (numElts > 2 && !isPowerOf2(n: numElts) && |
772 | isLegalVectorType(CGM, vectorSize: eltSize * numElts, eltTy, numElts)) { |
773 | components.push_back(Elt: llvm::FixedVectorType::get(ElementType: eltTy, NumElts: numElts)); |
774 | return; |
775 | } |
776 | |
777 | // Bring vecSize down to something no larger than numElts. |
778 | do { |
779 | logCandidateNumElts--; |
780 | candidateNumElts /= 2; |
781 | candidateSize /= 2; |
782 | } while (candidateNumElts > numElts); |
783 | } |
784 | |
785 | // Otherwise, just append a bunch of individual elements. |
786 | components.append(NumInputs: numElts, Elt: eltTy); |
787 | } |
788 | |
789 | bool swiftcall::mustPassRecordIndirectly(CodeGenModule &CGM, |
790 | const RecordDecl *record) { |
791 | // FIXME: should we not rely on the standard computation in Sema, just in |
792 | // case we want to diverge from the platform ABI (e.g. on targets where |
793 | // that uses the MSVC rule)? |
794 | return !record->canPassInRegisters(); |
795 | } |
796 | |
797 | static ABIArgInfo classifyExpandedType(SwiftAggLowering &lowering, |
798 | bool forReturn, |
799 | CharUnits alignmentForIndirect, |
800 | unsigned IndirectAS) { |
801 | if (lowering.empty()) { |
802 | return ABIArgInfo::getIgnore(); |
803 | } else if (lowering.shouldPassIndirectly(asReturnValue: forReturn)) { |
804 | return ABIArgInfo::getIndirect(Alignment: alignmentForIndirect, |
805 | /*AddrSpace=*/IndirectAS, |
806 | /*byval=*/ByVal: false); |
807 | } else { |
808 | auto types = lowering.getCoerceAndExpandTypes(); |
809 | return ABIArgInfo::getCoerceAndExpand(coerceToType: types.first, unpaddedCoerceToType: types.second); |
810 | } |
811 | } |
812 | |
813 | static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type, |
814 | bool forReturn) { |
815 | unsigned IndirectAS = CGM.getDataLayout().getAllocaAddrSpace(); |
816 | if (auto recordType = dyn_cast<RecordType>(Val&: type)) { |
817 | auto record = recordType->getDecl(); |
818 | auto &layout = CGM.getContext().getASTRecordLayout(D: record); |
819 | |
820 | if (mustPassRecordIndirectly(CGM, record)) |
821 | return ABIArgInfo::getIndirect(Alignment: layout.getAlignment(), |
822 | /*AddrSpace=*/IndirectAS, /*byval=*/ByVal: false); |
823 | |
824 | SwiftAggLowering lowering(CGM); |
825 | lowering.addTypedData(record: recordType->getDecl(), begin: CharUnits::Zero(), layout); |
826 | lowering.finish(); |
827 | |
828 | return classifyExpandedType(lowering, forReturn, alignmentForIndirect: layout.getAlignment(), |
829 | IndirectAS); |
830 | } |
831 | |
832 | // Just assume that all of our target ABIs can support returning at least |
833 | // two integer or floating-point values. |
834 | if (isa<ComplexType>(Val: type)) { |
835 | return (forReturn ? ABIArgInfo::getDirect() : ABIArgInfo::getExpand()); |
836 | } |
837 | |
838 | // Vector types may need to be legalized. |
839 | if (isa<VectorType>(Val: type)) { |
840 | SwiftAggLowering lowering(CGM); |
841 | lowering.addTypedData(type, begin: CharUnits::Zero()); |
842 | lowering.finish(); |
843 | |
844 | CharUnits alignment = CGM.getContext().getTypeAlignInChars(T: type); |
845 | return classifyExpandedType(lowering, forReturn, alignmentForIndirect: alignment, IndirectAS); |
846 | } |
847 | |
848 | // Member pointer types need to be expanded, but it's a simple form of |
849 | // expansion that 'Direct' can handle. Note that CanBeFlattened should be |
850 | // true for this to work. |
851 | |
852 | // 'void' needs to be ignored. |
853 | if (type->isVoidType()) { |
854 | return ABIArgInfo::getIgnore(); |
855 | } |
856 | |
857 | // Everything else can be passed directly. |
858 | return ABIArgInfo::getDirect(); |
859 | } |
860 | |
861 | ABIArgInfo swiftcall::classifyReturnType(CodeGenModule &CGM, CanQualType type) { |
862 | return classifyType(CGM, type, /*forReturn*/ true); |
863 | } |
864 | |
865 | ABIArgInfo swiftcall::classifyArgumentType(CodeGenModule &CGM, |
866 | CanQualType type) { |
867 | return classifyType(CGM, type, /*forReturn*/ false); |
868 | } |
869 | |
870 | void swiftcall::computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) { |
871 | auto &retInfo = FI.getReturnInfo(); |
872 | retInfo = classifyReturnType(CGM, type: FI.getReturnType()); |
873 | |
874 | for (unsigned i = 0, e = FI.arg_size(); i != e; ++i) { |
875 | auto &argInfo = FI.arg_begin()[i]; |
876 | argInfo.info = classifyArgumentType(CGM, type: argInfo.type); |
877 | } |
878 | } |
879 | |
880 | // Is swifterror lowered to a register by the target ABI. |
881 | bool swiftcall::isSwiftErrorLoweredInRegister(CodeGenModule &CGM) { |
882 | return getSwiftABIInfo(CGM).isSwiftErrorInRegister(); |
883 | } |
884 | |