1//===- PPC.cpp ------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ABIInfoImpl.h"
10#include "TargetInfo.h"
11#include "clang/Basic/DiagnosticFrontend.h"
12
13using namespace clang;
14using namespace clang::CodeGen;
15
16static RValue complexTempStructure(CodeGenFunction &CGF, Address VAListAddr,
17 QualType Ty, CharUnits SlotSize,
18 CharUnits EltSize, const ComplexType *CTy) {
19 Address Addr =
20 emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy: CGF.Int8Ty, DirectSize: SlotSize * 2,
21 DirectAlign: SlotSize, SlotSize, /*AllowHigher*/ AllowHigherAlign: true);
22
23 Address RealAddr = Addr;
24 Address ImagAddr = RealAddr;
25 if (CGF.CGM.getDataLayout().isBigEndian()) {
26 RealAddr =
27 CGF.Builder.CreateConstInBoundsByteGEP(Addr: RealAddr, Offset: SlotSize - EltSize);
28 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(Addr: ImagAddr,
29 Offset: 2 * SlotSize - EltSize);
30 } else {
31 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(Addr: RealAddr, Offset: SlotSize);
32 }
33
34 llvm::Type *EltTy = CGF.ConvertTypeForMem(T: CTy->getElementType());
35 RealAddr = RealAddr.withElementType(ElemTy: EltTy);
36 ImagAddr = ImagAddr.withElementType(ElemTy: EltTy);
37 llvm::Value *Real = CGF.Builder.CreateLoad(Addr: RealAddr, Name: ".vareal");
38 llvm::Value *Imag = CGF.Builder.CreateLoad(Addr: ImagAddr, Name: ".vaimag");
39
40 return RValue::getComplex(V1: Real, V2: Imag);
41}
42
43static bool PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
44 llvm::Value *Address, bool Is64Bit,
45 bool IsAIX) {
46 // This is calculated from the LLVM and GCC tables and verified
47 // against gcc output. AFAIK all PPC ABIs use the same encoding.
48
49 CodeGen::CGBuilderTy &Builder = CGF.Builder;
50
51 llvm::IntegerType *i8 = CGF.Int8Ty;
52 llvm::Value *Four8 = llvm::ConstantInt::get(Ty: i8, V: 4);
53 llvm::Value *Eight8 = llvm::ConstantInt::get(Ty: i8, V: 8);
54 llvm::Value *Sixteen8 = llvm::ConstantInt::get(Ty: i8, V: 16);
55
56 // 0-31: r0-31, the 4-byte or 8-byte general-purpose registers
57 AssignToArrayRange(Builder, Array: Address, Value: Is64Bit ? Eight8 : Four8, FirstIndex: 0, LastIndex: 31);
58
59 // 32-63: fp0-31, the 8-byte floating-point registers
60 AssignToArrayRange(Builder, Array: Address, Value: Eight8, FirstIndex: 32, LastIndex: 63);
61
62 // 64-67 are various 4-byte or 8-byte special-purpose registers:
63 // 64: mq
64 // 65: lr
65 // 66: ctr
66 // 67: ap
67 AssignToArrayRange(Builder, Array: Address, Value: Is64Bit ? Eight8 : Four8, FirstIndex: 64, LastIndex: 67);
68
69 // 68-76 are various 4-byte special-purpose registers:
70 // 68-75 cr0-7
71 // 76: xer
72 AssignToArrayRange(Builder, Array: Address, Value: Four8, FirstIndex: 68, LastIndex: 76);
73
74 // 77-108: v0-31, the 16-byte vector registers
75 AssignToArrayRange(Builder, Array: Address, Value: Sixteen8, FirstIndex: 77, LastIndex: 108);
76
77 // 109: vrsave
78 // 110: vscr
79 AssignToArrayRange(Builder, Array: Address, Value: Is64Bit ? Eight8 : Four8, FirstIndex: 109, LastIndex: 110);
80
81 // AIX does not utilize the rest of the registers.
82 if (IsAIX)
83 return false;
84
85 // 111: spe_acc
86 // 112: spefscr
87 // 113: sfp
88 AssignToArrayRange(Builder, Array: Address, Value: Is64Bit ? Eight8 : Four8, FirstIndex: 111, LastIndex: 113);
89
90 if (!Is64Bit)
91 return false;
92
93 // TODO: Need to verify if these registers are used on 64 bit AIX with Power8
94 // or above CPU.
95 // 64-bit only registers:
96 // 114: tfhar
97 // 115: tfiar
98 // 116: texasr
99 AssignToArrayRange(Builder, Array: Address, Value: Eight8, FirstIndex: 114, LastIndex: 116);
100
101 return false;
102}
103
104// AIX
105namespace {
106/// AIXABIInfo - The AIX XCOFF ABI information.
107class AIXABIInfo : public ABIInfo {
108 const bool Is64Bit;
109 const unsigned PtrByteSize;
110 CharUnits getParamTypeAlignment(QualType Ty) const;
111
112public:
113 AIXABIInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
114 : ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {}
115
116 bool isPromotableTypeForABI(QualType Ty) const;
117
118 ABIArgInfo classifyReturnType(QualType RetTy) const;
119 ABIArgInfo classifyArgumentType(QualType Ty) const;
120
121 void computeInfo(CGFunctionInfo &FI) const override {
122 if (!getCXXABI().classifyReturnType(FI))
123 FI.getReturnInfo() = classifyReturnType(RetTy: FI.getReturnType());
124
125 for (auto &I : FI.arguments())
126 I.info = classifyArgumentType(Ty: I.type);
127 }
128
129 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
130 AggValueSlot Slot) const override;
131
132 using ABIInfo::appendAttributeMangling;
133 void appendAttributeMangling(TargetClonesAttr *Attr, unsigned Index,
134 raw_ostream &Out) const override;
135 void appendAttributeMangling(StringRef AttrStr,
136 raw_ostream &Out) const override;
137};
138
139void AIXABIInfo::appendAttributeMangling(TargetClonesAttr *Attr, unsigned Index,
140 raw_ostream &Out) const {
141 appendAttributeMangling(AttrStr: Attr->getFeatureStr(Index), Out);
142}
143
144void AIXABIInfo::appendAttributeMangling(StringRef AttrStr,
145 raw_ostream &Out) const {
146 if (AttrStr == "default") {
147 Out << ".default";
148 return;
149 }
150
151 const TargetInfo &TI = CGT.getTarget();
152 ParsedTargetAttr Info = TI.parseTargetAttr(Str: AttrStr);
153
154 if (!Info.CPU.empty()) {
155 assert(Info.Features.empty() && "cannot have both a CPU and a feature");
156 Out << ".cpu_" << Info.CPU;
157 return;
158 }
159
160 assert(0 && "specifying target features on an FMV is unsupported on AIX");
161}
162
163class AIXTargetCodeGenInfo : public TargetCodeGenInfo {
164 const bool Is64Bit;
165
166public:
167 AIXTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
168 : TargetCodeGenInfo(std::make_unique<AIXABIInfo>(args&: CGT, args&: Is64Bit)),
169 Is64Bit(Is64Bit) {}
170 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
171 return 1; // r1 is the dedicated stack pointer
172 }
173
174 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
175 llvm::Value *Address) const override;
176
177 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
178 CodeGen::CodeGenModule &M) const override;
179};
180} // namespace
181
182// Return true if the ABI requires Ty to be passed sign- or zero-
183// extended to 32/64 bits.
184bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const {
185 // Treat an enum type as its underlying type.
186 if (const auto *ED = Ty->getAsEnumDecl())
187 Ty = ED->getIntegerType();
188
189 // Promotable integer types are required to be promoted by the ABI.
190 if (getContext().isPromotableIntegerType(T: Ty))
191 return true;
192
193 if (!Is64Bit)
194 return false;
195
196 // For 64 bit mode, in addition to the usual promotable integer types, we also
197 // need to extend all 32-bit types, since the ABI requires promotion to 64
198 // bits.
199 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
200 switch (BT->getKind()) {
201 case BuiltinType::Int:
202 case BuiltinType::UInt:
203 return true;
204 default:
205 break;
206 }
207
208 return false;
209}
210
211ABIArgInfo AIXABIInfo::classifyReturnType(QualType RetTy) const {
212 if (RetTy->isAnyComplexType())
213 return ABIArgInfo::getDirect();
214
215 if (RetTy->isVectorType())
216 return ABIArgInfo::getDirect();
217
218 if (RetTy->isVoidType())
219 return ABIArgInfo::getIgnore();
220
221 if (isAggregateTypeForABI(T: RetTy))
222 return getNaturalAlignIndirect(Ty: RetTy, AddrSpace: getDataLayout().getAllocaAddrSpace());
223
224 return (isPromotableTypeForABI(Ty: RetTy) ? ABIArgInfo::getExtend(Ty: RetTy)
225 : ABIArgInfo::getDirect());
226}
227
228ABIArgInfo AIXABIInfo::classifyArgumentType(QualType Ty) const {
229 Ty = useFirstFieldIfTransparentUnion(Ty);
230
231 if (Ty->isAnyComplexType())
232 return ABIArgInfo::getDirect();
233
234 if (Ty->isVectorType())
235 return ABIArgInfo::getDirect();
236
237 if (isAggregateTypeForABI(T: Ty)) {
238 // Records with non-trivial destructors/copy-constructors should not be
239 // passed by value.
240 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(T: Ty, CXXABI&: getCXXABI()))
241 return getNaturalAlignIndirect(Ty, AddrSpace: getDataLayout().getAllocaAddrSpace(),
242 ByVal: RAA == CGCXXABI::RAA_DirectInMemory);
243
244 CharUnits CCAlign = getParamTypeAlignment(Ty);
245 CharUnits TyAlign = getContext().getTypeAlignInChars(T: Ty);
246
247 return ABIArgInfo::getIndirect(
248 Alignment: CCAlign, /*AddrSpace=*/getDataLayout().getAllocaAddrSpace(),
249 /*ByVal=*/true,
250 /*Realign=*/TyAlign > CCAlign);
251 }
252
253 return (isPromotableTypeForABI(Ty)
254 ? ABIArgInfo::getExtend(Ty, T: CGT.ConvertType(T: Ty))
255 : ABIArgInfo::getDirect());
256}
257
258CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const {
259 // Complex types are passed just like their elements.
260 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
261 Ty = CTy->getElementType();
262
263 if (Ty->isVectorType())
264 return CharUnits::fromQuantity(Quantity: 16);
265
266 // If the structure contains a vector type, the alignment is 16.
267 if (isRecordWithSIMDVectorType(Context&: getContext(), Ty))
268 return CharUnits::fromQuantity(Quantity: 16);
269
270 return CharUnits::fromQuantity(Quantity: PtrByteSize);
271}
272
273RValue AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
274 QualType Ty, AggValueSlot Slot) const {
275
276 auto TypeInfo = getContext().getTypeInfoInChars(T: Ty);
277 TypeInfo.Align = getParamTypeAlignment(Ty);
278
279 CharUnits SlotSize = CharUnits::fromQuantity(Quantity: PtrByteSize);
280
281 // If we have a complex type and the base type is smaller than the register
282 // size, the ABI calls for the real and imaginary parts to be right-adjusted
283 // in separate words in 32bit mode or doublewords in 64bit mode. However,
284 // Clang expects us to produce a pointer to a structure with the two parts
285 // packed tightly. So generate loads of the real and imaginary parts relative
286 // to the va_list pointer, and store them to a temporary structure. We do the
287 // same as the PPC64ABI here.
288 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
289 CharUnits EltSize = TypeInfo.Width / 2;
290 if (EltSize < SlotSize)
291 return complexTempStructure(CGF, VAListAddr, Ty, SlotSize, EltSize, CTy);
292 }
293
294 return emitVoidPtrVAArg(CGF, VAListAddr, ValueTy: Ty, /*Indirect*/ IsIndirect: false, ValueInfo: TypeInfo,
295 SlotSizeAndAlign: SlotSize, /*AllowHigher*/ AllowHigherAlign: true, Slot);
296}
297
298bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable(
299 CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const {
300 return PPC_initDwarfEHRegSizeTable(CGF, Address, Is64Bit, /*IsAIX*/ true);
301}
302
303void AIXTargetCodeGenInfo::setTargetAttributes(
304 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
305 if (!isa<llvm::GlobalVariable>(Val: GV))
306 return;
307
308 auto *GVar = cast<llvm::GlobalVariable>(Val: GV);
309 auto GVId = GV->getName();
310
311 // Is this a global variable specified by the user as toc-data?
312 bool UserSpecifiedTOC =
313 llvm::binary_search(Range: M.getCodeGenOpts().TocDataVarsUserSpecified, Value&: GVId);
314 // Assumes the same variable cannot be in both TocVarsUserSpecified and
315 // NoTocVars.
316 if (UserSpecifiedTOC ||
317 ((M.getCodeGenOpts().AllTocData) &&
318 !llvm::binary_search(Range: M.getCodeGenOpts().NoTocDataVars, Value&: GVId))) {
319 const unsigned long PointerSize =
320 GV->getParent()->getDataLayout().getPointerSizeInBits() / 8;
321 auto *VarD = dyn_cast<VarDecl>(Val: D);
322 assert(VarD && "Invalid declaration of global variable.");
323
324 ASTContext &Context = D->getASTContext();
325 unsigned Alignment = Context.toBits(CharSize: Context.getDeclAlign(D)) / 8;
326 const auto *Ty = VarD->getType().getTypePtr();
327 const RecordDecl *RDecl = Ty->getAsRecordDecl();
328
329 bool EmitDiagnostic = UserSpecifiedTOC && GV->hasExternalLinkage();
330 auto reportUnsupportedWarning = [&](bool ShouldEmitWarning, StringRef Msg) {
331 if (ShouldEmitWarning)
332 M.getDiags().Report(Loc: D->getLocation(), DiagID: diag::warn_toc_unsupported_type)
333 << GVId << Msg;
334 };
335 if (!Ty || Ty->isIncompleteType())
336 reportUnsupportedWarning(EmitDiagnostic, "of incomplete type");
337 else if (RDecl && RDecl->hasFlexibleArrayMember())
338 reportUnsupportedWarning(EmitDiagnostic,
339 "it contains a flexible array member");
340 else if (VarD->getTLSKind() != VarDecl::TLS_None)
341 reportUnsupportedWarning(EmitDiagnostic, "of thread local storage");
342 else if (PointerSize < Context.getTypeInfo(T: VarD->getType()).Width / 8)
343 reportUnsupportedWarning(EmitDiagnostic,
344 "variable is larger than a pointer");
345 else if (PointerSize < Alignment)
346 reportUnsupportedWarning(EmitDiagnostic,
347 "variable is aligned wider than a pointer");
348 else if (D->hasAttr<SectionAttr>())
349 reportUnsupportedWarning(EmitDiagnostic,
350 "variable has a section attribute");
351 else if (GV->hasExternalLinkage() ||
352 (M.getCodeGenOpts().AllTocData && !GV->hasLocalLinkage()))
353 GVar->addAttribute(Kind: "toc-data");
354 }
355}
356
357// PowerPC-32
358namespace {
359/// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
360class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
361 bool IsSoftFloatABI;
362 bool IsRetSmallStructInRegABI;
363
364 CharUnits getParamTypeAlignment(QualType Ty) const;
365
366public:
367 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI,
368 bool RetSmallStructInRegABI)
369 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI),
370 IsRetSmallStructInRegABI(RetSmallStructInRegABI) {}
371
372 ABIArgInfo classifyReturnType(QualType RetTy) const;
373
374 void computeInfo(CGFunctionInfo &FI) const override {
375 if (!getCXXABI().classifyReturnType(FI))
376 FI.getReturnInfo() = classifyReturnType(RetTy: FI.getReturnType());
377 for (auto &I : FI.arguments())
378 I.info = classifyArgumentType(RetTy: I.type);
379 }
380
381 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
382 AggValueSlot Slot) const override;
383};
384
385class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
386public:
387 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI,
388 bool RetSmallStructInRegABI)
389 : TargetCodeGenInfo(std::make_unique<PPC32_SVR4_ABIInfo>(
390 args&: CGT, args&: SoftFloatABI, args&: RetSmallStructInRegABI)) {}
391
392 static bool isStructReturnInRegABI(const llvm::Triple &Triple,
393 const CodeGenOptions &Opts);
394
395 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
396 // This is recovered from gcc output.
397 return 1; // r1 is the dedicated stack pointer
398 }
399
400 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
401 llvm::Value *Address) const override;
402};
403}
404
405CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
406 // Complex types are passed just like their elements.
407 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
408 Ty = CTy->getElementType();
409
410 if (Ty->isVectorType())
411 return CharUnits::fromQuantity(Quantity: getContext().getTypeSize(T: Ty) == 128 ? 16
412 : 4);
413
414 // For single-element float/vector structs, we consider the whole type
415 // to have the same alignment requirements as its single element.
416 const Type *AlignTy = nullptr;
417 if (const Type *EltType = isSingleElementStruct(T: Ty, Context&: getContext())) {
418 const BuiltinType *BT = EltType->getAs<BuiltinType>();
419 if ((EltType->isVectorType() && getContext().getTypeSize(T: EltType) == 128) ||
420 (BT && BT->isFloatingPoint()))
421 AlignTy = EltType;
422 }
423
424 if (AlignTy)
425 return CharUnits::fromQuantity(Quantity: AlignTy->isVectorType() ? 16 : 4);
426 return CharUnits::fromQuantity(Quantity: 4);
427}
428
429ABIArgInfo PPC32_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
430 uint64_t Size;
431
432 // -msvr4-struct-return puts small aggregates in GPR3 and GPR4.
433 if (isAggregateTypeForABI(T: RetTy) && IsRetSmallStructInRegABI &&
434 (Size = getContext().getTypeSize(T: RetTy)) <= 64) {
435 // System V ABI (1995), page 3-22, specified:
436 // > A structure or union whose size is less than or equal to 8 bytes
437 // > shall be returned in r3 and r4, as if it were first stored in the
438 // > 8-byte aligned memory area and then the low addressed word were
439 // > loaded into r3 and the high-addressed word into r4. Bits beyond
440 // > the last member of the structure or union are not defined.
441 //
442 // GCC for big-endian PPC32 inserts the pad before the first member,
443 // not "beyond the last member" of the struct. To stay compatible
444 // with GCC, we coerce the struct to an integer of the same size.
445 // LLVM will extend it and return i32 in r3, or i64 in r3:r4.
446 if (Size == 0)
447 return ABIArgInfo::getIgnore();
448 else {
449 llvm::Type *CoerceTy = llvm::Type::getIntNTy(C&: getVMContext(), N: Size);
450 return ABIArgInfo::getDirect(T: CoerceTy);
451 }
452 }
453
454 return DefaultABIInfo::classifyReturnType(RetTy);
455}
456
457// TODO: this implementation is now likely redundant with
458// DefaultABIInfo::EmitVAArg.
459RValue PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
460 QualType Ty, AggValueSlot Slot) const {
461 if (getTarget().getTriple().isOSDarwin()) {
462 auto TI = getContext().getTypeInfoInChars(T: Ty);
463 TI.Align = getParamTypeAlignment(Ty);
464
465 CharUnits SlotSize = CharUnits::fromQuantity(Quantity: 4);
466 return emitVoidPtrVAArg(CGF, VAListAddr: VAList, ValueTy: Ty,
467 IsIndirect: classifyArgumentType(RetTy: Ty).isIndirect(), ValueInfo: TI, SlotSizeAndAlign: SlotSize,
468 /*AllowHigherAlign=*/true, Slot);
469 }
470
471 const unsigned OverflowLimit = 8;
472 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
473 // TODO: Implement this. For now ignore.
474 (void)CTy;
475 return RValue::getAggregate(addr: Address::invalid()); // FIXME?
476 }
477
478 // struct __va_list_tag {
479 // unsigned char gpr;
480 // unsigned char fpr;
481 // unsigned short reserved;
482 // void *overflow_arg_area;
483 // void *reg_save_area;
484 // };
485
486 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(T: Ty) == 64;
487 bool isInt = !Ty->isFloatingType();
488 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(T: Ty) == 64;
489
490 // All aggregates are passed indirectly? That doesn't seem consistent
491 // with the argument-lowering code.
492 bool isIndirect = isAggregateTypeForABI(T: Ty);
493
494 CGBuilderTy &Builder = CGF.Builder;
495
496 // The calling convention either uses 1-2 GPRs or 1 FPR.
497 Address NumRegsAddr = Address::invalid();
498 if (isInt || IsSoftFloatABI) {
499 NumRegsAddr = Builder.CreateStructGEP(Addr: VAList, Index: 0, Name: "gpr");
500 } else {
501 NumRegsAddr = Builder.CreateStructGEP(Addr: VAList, Index: 1, Name: "fpr");
502 }
503
504 llvm::Value *NumRegs = Builder.CreateLoad(Addr: NumRegsAddr, Name: "numUsedRegs");
505
506 // "Align" the register count when TY is i64.
507 if (isI64 || (isF64 && IsSoftFloatABI)) {
508 NumRegs = Builder.CreateAdd(LHS: NumRegs, RHS: Builder.getInt8(C: 1));
509 NumRegs = Builder.CreateAnd(LHS: NumRegs, RHS: Builder.getInt8(C: (uint8_t) ~1U));
510 }
511
512 llvm::Value *CC =
513 Builder.CreateICmpULT(LHS: NumRegs, RHS: Builder.getInt8(C: OverflowLimit), Name: "cond");
514
515 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock(name: "using_regs");
516 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock(name: "using_overflow");
517 llvm::BasicBlock *Cont = CGF.createBasicBlock(name: "cont");
518
519 Builder.CreateCondBr(Cond: CC, True: UsingRegs, False: UsingOverflow);
520
521 llvm::Type *DirectTy = CGF.ConvertType(T: Ty), *ElementTy = DirectTy;
522 if (isIndirect)
523 DirectTy = CGF.DefaultPtrTy;
524
525 // Case 1: consume registers.
526 Address RegAddr = Address::invalid();
527 {
528 CGF.EmitBlock(BB: UsingRegs);
529
530 Address RegSaveAreaPtr = Builder.CreateStructGEP(Addr: VAList, Index: 4);
531 RegAddr = Address(Builder.CreateLoad(Addr: RegSaveAreaPtr), CGF.Int8Ty,
532 CharUnits::fromQuantity(Quantity: 8));
533 assert(RegAddr.getElementType() == CGF.Int8Ty);
534
535 // Floating-point registers start after the general-purpose registers.
536 if (!(isInt || IsSoftFloatABI)) {
537 RegAddr = Builder.CreateConstInBoundsByteGEP(Addr: RegAddr,
538 Offset: CharUnits::fromQuantity(Quantity: 32));
539 }
540
541 // Get the address of the saved value by scaling the number of
542 // registers we've used by the number of
543 CharUnits RegSize = CharUnits::fromQuantity(Quantity: (isInt || IsSoftFloatABI) ? 4 : 8);
544 llvm::Value *RegOffset =
545 Builder.CreateMul(LHS: NumRegs, RHS: Builder.getInt8(C: RegSize.getQuantity()));
546 RegAddr = Address(Builder.CreateInBoundsGEP(
547 Ty: CGF.Int8Ty, Ptr: RegAddr.emitRawPointer(CGF), IdxList: RegOffset),
548 DirectTy,
549 RegAddr.getAlignment().alignmentOfArrayElement(elementSize: RegSize));
550
551 // Increase the used-register count.
552 NumRegs =
553 Builder.CreateAdd(LHS: NumRegs,
554 RHS: Builder.getInt8(C: (isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
555 Builder.CreateStore(Val: NumRegs, Addr: NumRegsAddr);
556
557 CGF.EmitBranch(Block: Cont);
558 }
559
560 // Case 2: consume space in the overflow area.
561 Address MemAddr = Address::invalid();
562 {
563 CGF.EmitBlock(BB: UsingOverflow);
564
565 Builder.CreateStore(Val: Builder.getInt8(C: OverflowLimit), Addr: NumRegsAddr);
566
567 // Everything in the overflow area is rounded up to a size of at least 4.
568 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(Quantity: 4);
569
570 CharUnits Size;
571 if (!isIndirect) {
572 auto TypeInfo = CGF.getContext().getTypeInfoInChars(T: Ty);
573 Size = TypeInfo.Width.alignTo(Align: OverflowAreaAlign);
574 } else {
575 Size = CGF.getPointerSize();
576 }
577
578 Address OverflowAreaAddr = Builder.CreateStructGEP(Addr: VAList, Index: 3);
579 Address OverflowArea =
580 Address(Builder.CreateLoad(Addr: OverflowAreaAddr, Name: "argp.cur"), CGF.Int8Ty,
581 OverflowAreaAlign);
582 // Round up address of argument to alignment
583 CharUnits Align = CGF.getContext().getTypeAlignInChars(T: Ty);
584 if (Align > OverflowAreaAlign) {
585 llvm::Value *Ptr = OverflowArea.emitRawPointer(CGF);
586 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
587 OverflowArea.getElementType(), Align);
588 }
589
590 MemAddr = OverflowArea.withElementType(ElemTy: DirectTy);
591
592 // Increase the overflow area.
593 OverflowArea = Builder.CreateConstInBoundsByteGEP(Addr: OverflowArea, Offset: Size);
594 Builder.CreateStore(Val: OverflowArea.emitRawPointer(CGF), Addr: OverflowAreaAddr);
595 CGF.EmitBranch(Block: Cont);
596 }
597
598 CGF.EmitBlock(BB: Cont);
599
600 // Merge the cases with a phi.
601 Address Result = emitMergePHI(CGF, Addr1: RegAddr, Block1: UsingRegs, Addr2: MemAddr, Block2: UsingOverflow,
602 Name: "vaarg.addr");
603
604 // Load the pointer if the argument was passed indirectly.
605 if (isIndirect) {
606 Result = Address(Builder.CreateLoad(Addr: Result, Name: "aggr"), ElementTy,
607 getContext().getTypeAlignInChars(T: Ty));
608 }
609
610 return CGF.EmitLoadOfAnyValue(V: CGF.MakeAddrLValue(Addr: Result, T: Ty), Slot);
611}
612
613bool PPC32TargetCodeGenInfo::isStructReturnInRegABI(
614 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
615 assert(Triple.isPPC32());
616
617 switch (Opts.getStructReturnConvention()) {
618 case CodeGenOptions::SRCK_Default:
619 break;
620 case CodeGenOptions::SRCK_OnStack: // -maix-struct-return
621 return false;
622 case CodeGenOptions::SRCK_InRegs: // -msvr4-struct-return
623 return true;
624 }
625
626 if (Triple.isOSBinFormatELF() && !Triple.isOSLinux())
627 return true;
628
629 return false;
630}
631
632bool
633PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
634 llvm::Value *Address) const {
635 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ false,
636 /*IsAIX*/ false);
637}
638
639// PowerPC-64
640
641namespace {
642
643/// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
644class PPC64_SVR4_ABIInfo : public ABIInfo {
645 static const unsigned GPRBits = 64;
646 PPC64_SVR4_ABIKind Kind;
647 bool IsSoftFloatABI;
648
649public:
650 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, PPC64_SVR4_ABIKind Kind,
651 bool SoftFloatABI)
652 : ABIInfo(CGT), Kind(Kind), IsSoftFloatABI(SoftFloatABI) {}
653
654 bool isPromotableTypeForABI(QualType Ty) const;
655 CharUnits getParamTypeAlignment(QualType Ty) const;
656
657 ABIArgInfo classifyReturnType(QualType RetTy) const;
658 ABIArgInfo classifyArgumentType(QualType Ty) const;
659
660 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
661 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
662 uint64_t Members) const override;
663
664 // TODO: We can add more logic to computeInfo to improve performance.
665 // Example: For aggregate arguments that fit in a register, we could
666 // use getDirectInReg (as is done below for structs containing a single
667 // floating-point value) to avoid pushing them to memory on function
668 // entry. This would require changing the logic in PPCISelLowering
669 // when lowering the parameters in the caller and args in the callee.
670 void computeInfo(CGFunctionInfo &FI) const override {
671 if (!getCXXABI().classifyReturnType(FI))
672 FI.getReturnInfo() = classifyReturnType(RetTy: FI.getReturnType());
673 for (auto &I : FI.arguments()) {
674 // We rely on the default argument classification for the most part.
675 // One exception: An aggregate containing a single floating-point
676 // or vector item must be passed in a register if one is available.
677 const Type *T = isSingleElementStruct(T: I.type, Context&: getContext());
678 if (T) {
679 const BuiltinType *BT = T->getAs<BuiltinType>();
680 if ((T->isVectorType() && getContext().getTypeSize(T) == 128) ||
681 (BT && BT->isFloatingPoint())) {
682 QualType QT(T, 0);
683 I.info = ABIArgInfo::getDirectInReg(T: CGT.ConvertType(T: QT));
684 continue;
685 }
686 }
687 I.info = classifyArgumentType(Ty: I.type);
688 }
689 }
690
691 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
692 AggValueSlot Slot) const override;
693};
694
695class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
696
697public:
698 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT, PPC64_SVR4_ABIKind Kind,
699 bool SoftFloatABI)
700 : TargetCodeGenInfo(
701 std::make_unique<PPC64_SVR4_ABIInfo>(args&: CGT, args&: Kind, args&: SoftFloatABI)) {
702 SwiftInfo =
703 std::make_unique<SwiftABIInfo>(args&: CGT, /*SwiftErrorInRegister=*/args: false);
704 }
705
706 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
707 // This is recovered from gcc output.
708 return 1; // r1 is the dedicated stack pointer
709 }
710
711 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
712 llvm::Value *Address) const override;
713 void emitTargetMetadata(CodeGen::CodeGenModule &CGM,
714 const llvm::MapVector<GlobalDecl, StringRef>
715 &MangledDeclNames) const override;
716};
717
718class PPC64TargetCodeGenInfo : public TargetCodeGenInfo {
719public:
720 PPC64TargetCodeGenInfo(CodeGenTypes &CGT)
721 : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(args&: CGT)) {}
722
723 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
724 // This is recovered from gcc output.
725 return 1; // r1 is the dedicated stack pointer
726 }
727
728 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
729 llvm::Value *Address) const override;
730};
731}
732
733// Return true if the ABI requires Ty to be passed sign- or zero-
734// extended to 64 bits.
735bool
736PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
737 // Treat an enum type as its underlying type.
738 if (const auto *ED = Ty->getAsEnumDecl())
739 Ty = ED->getIntegerType();
740
741 // Promotable integer types are required to be promoted by the ABI.
742 if (isPromotableIntegerTypeForABI(Ty))
743 return true;
744
745 // In addition to the usual promotable integer types, we also need to
746 // extend all 32-bit types, since the ABI requires promotion to 64 bits.
747 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
748 switch (BT->getKind()) {
749 case BuiltinType::Int:
750 case BuiltinType::UInt:
751 return true;
752 default:
753 break;
754 }
755
756 if (const auto *EIT = Ty->getAs<BitIntType>())
757 if (EIT->getNumBits() < 64)
758 return true;
759
760 return false;
761}
762
763/// isAlignedParamType - Determine whether a type requires 16-byte or
764/// higher alignment in the parameter area. Always returns at least 8.
765CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
766 // Complex types are passed just like their elements.
767 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
768 Ty = CTy->getElementType();
769
770 auto FloatUsesVector = [this](QualType Ty){
771 return Ty->isRealFloatingType() && &getContext().getFloatTypeSemantics(
772 T: Ty) == &llvm::APFloat::IEEEquad();
773 };
774
775 // Only vector types of size 16 bytes need alignment (larger types are
776 // passed via reference, smaller types are not aligned).
777 if (Ty->isVectorType()) {
778 return CharUnits::fromQuantity(Quantity: getContext().getTypeSize(T: Ty) == 128 ? 16 : 8);
779 } else if (FloatUsesVector(Ty)) {
780 // According to ABI document section 'Optional Save Areas': If extended
781 // precision floating-point values in IEEE BINARY 128 QUADRUPLE PRECISION
782 // format are supported, map them to a single quadword, quadword aligned.
783 return CharUnits::fromQuantity(Quantity: 16);
784 }
785
786 // For single-element float/vector structs, we consider the whole type
787 // to have the same alignment requirements as its single element.
788 const Type *AlignAsType = nullptr;
789 const Type *EltType = isSingleElementStruct(T: Ty, Context&: getContext());
790 if (EltType) {
791 const BuiltinType *BT = EltType->getAs<BuiltinType>();
792 if ((EltType->isVectorType() && getContext().getTypeSize(T: EltType) == 128) ||
793 (BT && BT->isFloatingPoint()))
794 AlignAsType = EltType;
795 }
796
797 // Likewise for ELFv2 homogeneous aggregates.
798 const Type *Base = nullptr;
799 uint64_t Members = 0;
800 if (!AlignAsType && Kind == PPC64_SVR4_ABIKind::ELFv2 &&
801 isAggregateTypeForABI(T: Ty) && isHomogeneousAggregate(Ty, Base, Members))
802 AlignAsType = Base;
803
804 // With special case aggregates, only vector base types need alignment.
805 if (AlignAsType) {
806 bool UsesVector = AlignAsType->isVectorType() ||
807 FloatUsesVector(QualType(AlignAsType, 0));
808 return CharUnits::fromQuantity(Quantity: UsesVector ? 16 : 8);
809 }
810
811 // Otherwise, we only need alignment for any aggregate type that
812 // has an alignment requirement of >= 16 bytes.
813 if (isAggregateTypeForABI(T: Ty) && getContext().getTypeAlign(T: Ty) >= 128) {
814 return CharUnits::fromQuantity(Quantity: 16);
815 }
816
817 return CharUnits::fromQuantity(Quantity: 8);
818}
819
820bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
821 // Homogeneous aggregates for ELFv2 must have base types of float,
822 // double, long double, or 128-bit vectors.
823 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
824 if (BT->getKind() == BuiltinType::Float ||
825 BT->getKind() == BuiltinType::Double ||
826 BT->getKind() == BuiltinType::LongDouble ||
827 BT->getKind() == BuiltinType::Ibm128 ||
828 (getContext().getTargetInfo().hasFloat128Type() &&
829 (BT->getKind() == BuiltinType::Float128))) {
830 if (IsSoftFloatABI)
831 return false;
832 return true;
833 }
834 }
835 if (const VectorType *VT = Ty->getAs<VectorType>()) {
836 if (getContext().getTypeSize(T: VT) == 128)
837 return true;
838 }
839 return false;
840}
841
842bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
843 const Type *Base, uint64_t Members) const {
844 // Vector and fp128 types require one register, other floating point types
845 // require one or two registers depending on their size.
846 uint32_t NumRegs =
847 ((getContext().getTargetInfo().hasFloat128Type() &&
848 Base->isFloat128Type()) ||
849 Base->isVectorType()) ? 1
850 : (getContext().getTypeSize(T: Base) + 63) / 64;
851
852 // Homogeneous Aggregates may occupy at most 8 registers.
853 return Members * NumRegs <= 8;
854}
855
856ABIArgInfo
857PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
858 Ty = useFirstFieldIfTransparentUnion(Ty);
859
860 if (Ty->isAnyComplexType())
861 return ABIArgInfo::getDirect();
862
863 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
864 // or via reference (larger than 16 bytes).
865 if (Ty->isVectorType()) {
866 uint64_t Size = getContext().getTypeSize(T: Ty);
867 if (Size > 128)
868 return getNaturalAlignIndirect(Ty, AddrSpace: getDataLayout().getAllocaAddrSpace(),
869 /*ByVal=*/false);
870 else if (Size < 128) {
871 llvm::Type *CoerceTy = llvm::IntegerType::get(C&: getVMContext(), NumBits: Size);
872 return ABIArgInfo::getDirect(T: CoerceTy);
873 }
874 }
875
876 if (const auto *EIT = Ty->getAs<BitIntType>())
877 if (EIT->getNumBits() > 128)
878 return getNaturalAlignIndirect(Ty, AddrSpace: getDataLayout().getAllocaAddrSpace(),
879 /*ByVal=*/true);
880
881 if (isAggregateTypeForABI(T: Ty)) {
882 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(T: Ty, CXXABI&: getCXXABI()))
883 return getNaturalAlignIndirect(Ty, AddrSpace: getDataLayout().getAllocaAddrSpace(),
884 ByVal: RAA == CGCXXABI::RAA_DirectInMemory);
885
886 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
887 uint64_t TyAlign = getContext().getTypeAlignInChars(T: Ty).getQuantity();
888
889 // ELFv2 homogeneous aggregates are passed as array types.
890 const Type *Base = nullptr;
891 uint64_t Members = 0;
892 if (Kind == PPC64_SVR4_ABIKind::ELFv2 &&
893 isHomogeneousAggregate(Ty, Base, Members)) {
894 llvm::Type *BaseTy = CGT.ConvertType(T: QualType(Base, 0));
895 llvm::Type *CoerceTy = llvm::ArrayType::get(ElementType: BaseTy, NumElements: Members);
896 return ABIArgInfo::getDirect(T: CoerceTy);
897 }
898
899 // If an aggregate may end up fully in registers, we do not
900 // use the ByVal method, but pass the aggregate as array.
901 // This is usually beneficial since we avoid forcing the
902 // back-end to store the argument to memory.
903 uint64_t Bits = getContext().getTypeSize(T: Ty);
904 if (Bits > 0 && Bits <= 8 * GPRBits) {
905 llvm::Type *CoerceTy;
906
907 // Types up to 8 bytes are passed as integer type (which will be
908 // properly aligned in the argument save area doubleword).
909 if (Bits <= GPRBits)
910 CoerceTy =
911 llvm::IntegerType::get(C&: getVMContext(), NumBits: llvm::alignTo(Value: Bits, Align: 8));
912 // Larger types are passed as arrays, with the base type selected
913 // according to the required alignment in the save area.
914 else {
915 uint64_t RegBits = ABIAlign * 8;
916 uint64_t NumRegs = llvm::alignTo(Value: Bits, Align: RegBits) / RegBits;
917 llvm::Type *RegTy = llvm::IntegerType::get(C&: getVMContext(), NumBits: RegBits);
918 CoerceTy = llvm::ArrayType::get(ElementType: RegTy, NumElements: NumRegs);
919 }
920
921 return ABIArgInfo::getDirect(T: CoerceTy);
922 }
923
924 // All other aggregates are passed ByVal.
925 return ABIArgInfo::getIndirect(
926 Alignment: CharUnits::fromQuantity(Quantity: ABIAlign),
927 /*AddrSpace=*/getDataLayout().getAllocaAddrSpace(),
928 /*ByVal=*/true, /*Realign=*/TyAlign > ABIAlign);
929 }
930
931 return (isPromotableTypeForABI(Ty)
932 ? ABIArgInfo::getExtend(Ty, T: CGT.ConvertType(T: Ty))
933 : ABIArgInfo::getDirect());
934}
935
936ABIArgInfo
937PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
938 if (RetTy->isVoidType())
939 return ABIArgInfo::getIgnore();
940
941 if (RetTy->isAnyComplexType())
942 return ABIArgInfo::getDirect();
943
944 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
945 // or via reference (larger than 16 bytes).
946 if (RetTy->isVectorType()) {
947 uint64_t Size = getContext().getTypeSize(T: RetTy);
948 if (Size > 128)
949 return getNaturalAlignIndirect(Ty: RetTy,
950 AddrSpace: getDataLayout().getAllocaAddrSpace());
951 else if (Size < 128) {
952 llvm::Type *CoerceTy = llvm::IntegerType::get(C&: getVMContext(), NumBits: Size);
953 return ABIArgInfo::getDirect(T: CoerceTy);
954 }
955 }
956
957 if (const auto *EIT = RetTy->getAs<BitIntType>())
958 if (EIT->getNumBits() > 128)
959 return getNaturalAlignIndirect(
960 Ty: RetTy, AddrSpace: getDataLayout().getAllocaAddrSpace(), /*ByVal=*/false);
961
962 if (isAggregateTypeForABI(T: RetTy)) {
963 // ELFv2 homogeneous aggregates are returned as array types.
964 const Type *Base = nullptr;
965 uint64_t Members = 0;
966 if (Kind == PPC64_SVR4_ABIKind::ELFv2 &&
967 isHomogeneousAggregate(Ty: RetTy, Base, Members)) {
968 llvm::Type *BaseTy = CGT.ConvertType(T: QualType(Base, 0));
969 llvm::Type *CoerceTy = llvm::ArrayType::get(ElementType: BaseTy, NumElements: Members);
970 return ABIArgInfo::getDirect(T: CoerceTy);
971 }
972
973 // ELFv2 small aggregates are returned in up to two registers.
974 uint64_t Bits = getContext().getTypeSize(T: RetTy);
975 if (Kind == PPC64_SVR4_ABIKind::ELFv2 && Bits <= 2 * GPRBits) {
976 if (Bits == 0)
977 return ABIArgInfo::getIgnore();
978
979 llvm::Type *CoerceTy;
980 if (Bits > GPRBits) {
981 CoerceTy = llvm::IntegerType::get(C&: getVMContext(), NumBits: GPRBits);
982 CoerceTy = llvm::StructType::get(elt1: CoerceTy, elts: CoerceTy);
983 } else
984 CoerceTy =
985 llvm::IntegerType::get(C&: getVMContext(), NumBits: llvm::alignTo(Value: Bits, Align: 8));
986 return ABIArgInfo::getDirect(T: CoerceTy);
987 }
988
989 // All other aggregates are returned indirectly.
990 return getNaturalAlignIndirect(Ty: RetTy, AddrSpace: getDataLayout().getAllocaAddrSpace());
991 }
992
993 return (isPromotableTypeForABI(Ty: RetTy) ? ABIArgInfo::getExtend(Ty: RetTy)
994 : ABIArgInfo::getDirect());
995}
996
997// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
998RValue PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
999 QualType Ty, AggValueSlot Slot) const {
1000 auto TypeInfo = getContext().getTypeInfoInChars(T: Ty);
1001 TypeInfo.Align = getParamTypeAlignment(Ty);
1002
1003 CharUnits SlotSize = CharUnits::fromQuantity(Quantity: 8);
1004
1005 // If we have a complex type and the base type is smaller than 8 bytes,
1006 // the ABI calls for the real and imaginary parts to be right-adjusted
1007 // in separate doublewords. However, Clang expects us to produce a
1008 // pointer to a structure with the two parts packed tightly. So generate
1009 // loads of the real and imaginary parts relative to the va_list pointer,
1010 // and store them to a temporary structure.
1011 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
1012 CharUnits EltSize = TypeInfo.Width / 2;
1013 if (EltSize < SlotSize)
1014 return complexTempStructure(CGF, VAListAddr, Ty, SlotSize, EltSize, CTy);
1015 }
1016
1017 // Otherwise, just use the general rule.
1018 //
1019 // The PPC64 ABI passes some arguments in integer registers, even to variadic
1020 // functions. To allow va_list to use the simple "void*" representation,
1021 // variadic calls allocate space in the argument area for the integer argument
1022 // registers, and variadic functions spill their integer argument registers to
1023 // this area in their prologues. When aggregates smaller than a register are
1024 // passed this way, they are passed in the least significant bits of the
1025 // register, which means that after spilling on big-endian targets they will
1026 // be right-aligned in their argument slot. This is uncommon; for a variety of
1027 // reasons, other big-endian targets don't end up right-aligning aggregate
1028 // types this way, and so right-alignment only applies to fundamental types.
1029 // So on PPC64, we must force the use of right-alignment even for aggregates.
1030 return emitVoidPtrVAArg(CGF, VAListAddr, ValueTy: Ty, /*Indirect*/ IsIndirect: false, ValueInfo: TypeInfo,
1031 SlotSizeAndAlign: SlotSize, /*AllowHigher*/ AllowHigherAlign: true, Slot,
1032 /*ForceRightAdjust*/ true);
1033}
1034
1035bool
1036PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
1037 CodeGen::CodeGenFunction &CGF,
1038 llvm::Value *Address) const {
1039 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
1040 /*IsAIX*/ false);
1041}
1042
1043void PPC64_SVR4_TargetCodeGenInfo::emitTargetMetadata(
1044 CodeGen::CodeGenModule &CGM,
1045 const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const {
1046 if (CGM.getTypes().isLongDoubleReferenced()) {
1047 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
1048 const auto *flt = &CGM.getTarget().getLongDoubleFormat();
1049 if (flt == &llvm::APFloat::PPCDoubleDouble())
1050 CGM.getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "float-abi",
1051 Val: llvm::MDString::get(Context&: Ctx, Str: "doubledouble"));
1052 else if (flt == &llvm::APFloat::IEEEquad())
1053 CGM.getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "float-abi",
1054 Val: llvm::MDString::get(Context&: Ctx, Str: "ieeequad"));
1055 else if (flt == &llvm::APFloat::IEEEdouble())
1056 CGM.getModule().addModuleFlag(Behavior: llvm::Module::Error, Key: "float-abi",
1057 Val: llvm::MDString::get(Context&: Ctx, Str: "ieeedouble"));
1058 }
1059}
1060
1061bool
1062PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1063 llvm::Value *Address) const {
1064 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
1065 /*IsAIX*/ false);
1066}
1067
1068std::unique_ptr<TargetCodeGenInfo>
1069CodeGen::createAIXTargetCodeGenInfo(CodeGenModule &CGM, bool Is64Bit) {
1070 return std::make_unique<AIXTargetCodeGenInfo>(args&: CGM.getTypes(), args&: Is64Bit);
1071}
1072
1073std::unique_ptr<TargetCodeGenInfo>
1074CodeGen::createPPC32TargetCodeGenInfo(CodeGenModule &CGM, bool SoftFloatABI) {
1075 bool RetSmallStructInRegABI = PPC32TargetCodeGenInfo::isStructReturnInRegABI(
1076 Triple: CGM.getTriple(), Opts: CGM.getCodeGenOpts());
1077 return std::make_unique<PPC32TargetCodeGenInfo>(args&: CGM.getTypes(), args&: SoftFloatABI,
1078 args&: RetSmallStructInRegABI);
1079}
1080
1081std::unique_ptr<TargetCodeGenInfo>
1082CodeGen::createPPC64TargetCodeGenInfo(CodeGenModule &CGM) {
1083 return std::make_unique<PPC64TargetCodeGenInfo>(args&: CGM.getTypes());
1084}
1085
1086std::unique_ptr<TargetCodeGenInfo> CodeGen::createPPC64_SVR4_TargetCodeGenInfo(
1087 CodeGenModule &CGM, PPC64_SVR4_ABIKind Kind, bool SoftFloatABI) {
1088 return std::make_unique<PPC64_SVR4_TargetCodeGenInfo>(args&: CGM.getTypes(), args&: Kind,
1089 args&: SoftFloatABI);
1090}
1091