1//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This provides C++ code generation targeting the Itanium C++ ABI. The class
10// in this file generates structures that follow the Itanium C++ ABI, which is
11// documented at:
12// https://itanium-cxx-abi.github.io/cxx-abi/abi.html
13// https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
14//
15// It also supports the closely-related ARM ABI, documented at:
16// https://developer.arm.com/documentation/ihi0041/g/
17//
18//===----------------------------------------------------------------------===//
19
20#include "CGCXXABI.h"
21#include "CGCleanup.h"
22#include "CGDebugInfo.h"
23#include "CGRecordLayout.h"
24#include "CGVTables.h"
25#include "CodeGenFunction.h"
26#include "CodeGenModule.h"
27#include "TargetInfo.h"
28#include "clang/AST/Attr.h"
29#include "clang/AST/Mangle.h"
30#include "clang/AST/StmtCXX.h"
31#include "clang/AST/Type.h"
32#include "clang/CodeGen/ConstantInitBuilder.h"
33#include "llvm/IR/DataLayout.h"
34#include "llvm/IR/GlobalValue.h"
35#include "llvm/IR/Instructions.h"
36#include "llvm/IR/Intrinsics.h"
37#include "llvm/IR/Value.h"
38#include "llvm/Support/ConvertEBCDIC.h"
39#include "llvm/Support/ScopedPrinter.h"
40
41#include <optional>
42
43using namespace clang;
44using namespace CodeGen;
45
46namespace {
47class ItaniumCXXABI : public CodeGen::CGCXXABI {
48 /// VTables - All the vtables which have been defined.
49 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
50
51 /// All the thread wrapper functions that have been used.
52 llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8>
53 ThreadWrappers;
54
55protected:
56 bool UseARMMethodPtrABI;
57 bool UseARMGuardVarABI;
58 bool Use32BitVTableOffsetABI;
59
60 ItaniumMangleContext &getMangleContext() {
61 return cast<ItaniumMangleContext>(Val&: CodeGen::CGCXXABI::getMangleContext());
62 }
63
64public:
65 ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
66 bool UseARMMethodPtrABI = false,
67 bool UseARMGuardVarABI = false) :
68 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
69 UseARMGuardVarABI(UseARMGuardVarABI),
70 Use32BitVTableOffsetABI(false) { }
71
72 bool classifyReturnType(CGFunctionInfo &FI) const override;
73
74 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
75 // If C++ prohibits us from making a copy, pass by address.
76 if (!RD->canPassInRegisters())
77 return RAA_Indirect;
78 return RAA_Default;
79 }
80
81 bool isThisCompleteObject(GlobalDecl GD) const override {
82 // The Itanium ABI has separate complete-object vs. base-object
83 // variants of both constructors and destructors.
84 if (isa<CXXDestructorDecl>(Val: GD.getDecl())) {
85 switch (GD.getDtorType()) {
86 case Dtor_Complete:
87 case Dtor_Deleting:
88 return true;
89
90 case Dtor_Base:
91 return false;
92
93 case Dtor_Comdat:
94 llvm_unreachable("emitting dtor comdat as function?");
95 case Dtor_Unified:
96 llvm_unreachable("emitting unified dtor as function?");
97 case Dtor_VectorDeleting:
98 llvm_unreachable("unexpected dtor kind for this ABI");
99 }
100 llvm_unreachable("bad dtor kind");
101 }
102 if (isa<CXXConstructorDecl>(Val: GD.getDecl())) {
103 switch (GD.getCtorType()) {
104 case Ctor_Complete:
105 return true;
106
107 case Ctor_Base:
108 return false;
109
110 case Ctor_CopyingClosure:
111 case Ctor_DefaultClosure:
112 llvm_unreachable("closure ctors in Itanium ABI?");
113
114 case Ctor_Comdat:
115 llvm_unreachable("emitting ctor comdat as function?");
116
117 case Ctor_Unified:
118 llvm_unreachable("emitting unified ctor as function?");
119 }
120 llvm_unreachable("bad dtor kind");
121 }
122
123 // No other kinds.
124 return false;
125 }
126
127 bool isZeroInitializable(const MemberPointerType *MPT) override;
128
129 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
130
131 CGCallee
132 EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
133 const Expr *E,
134 Address This,
135 llvm::Value *&ThisPtrForCall,
136 llvm::Value *MemFnPtr,
137 const MemberPointerType *MPT) override;
138
139 llvm::Value *EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
140 Address Base, llvm::Value *MemPtr,
141 const MemberPointerType *MPT,
142 bool IsInBounds) override;
143
144 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
145 const CastExpr *E,
146 llvm::Value *Src) override;
147 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
148 llvm::Constant *Src) override;
149
150 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
151
152 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
153 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
154 CharUnits offset) override;
155 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
156 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
157 CharUnits ThisAdjustment);
158
159 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
160 llvm::Value *L, llvm::Value *R,
161 const MemberPointerType *MPT,
162 bool Inequality) override;
163
164 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
165 llvm::Value *Addr,
166 const MemberPointerType *MPT) override;
167
168 void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
169 Address Ptr, QualType ElementType,
170 const CXXDestructorDecl *Dtor) override;
171
172 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
173 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
174
175 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
176
177 llvm::CallInst *
178 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
179 llvm::Value *Exn) override;
180
181 void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
182 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
183 CatchTypeInfo
184 getAddrOfCXXCatchHandlerType(QualType Ty,
185 QualType CatchHandlerType) override {
186 return CatchTypeInfo{.RTTI: getAddrOfRTTIDescriptor(Ty), .Flags: 0};
187 }
188
189 bool shouldTypeidBeNullChecked(QualType SrcRecordTy) override;
190 void EmitBadTypeidCall(CodeGenFunction &CGF) override;
191 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
192 Address ThisPtr,
193 llvm::Type *StdTypeInfoPtrTy) override;
194
195 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
196 QualType SrcRecordTy) override;
197
198 /// Determine whether we know that all instances of type RecordTy will have
199 /// the same vtable pointer values, that is distinct from all other vtable
200 /// pointers. While this is required by the Itanium ABI, it doesn't happen in
201 /// practice in some cases due to language extensions.
202 bool hasUniqueVTablePointer(QualType RecordTy) {
203 const CXXRecordDecl *RD = RecordTy->getAsCXXRecordDecl();
204
205 // Under -fapple-kext, multiple definitions of the same vtable may be
206 // emitted.
207 if (!CGM.getCodeGenOpts().AssumeUniqueVTables ||
208 getContext().getLangOpts().AppleKext)
209 return false;
210
211 // If the type_info* would be null, the vtable might be merged with that of
212 // another type.
213 if (!CGM.shouldEmitRTTI())
214 return false;
215
216 // If there's only one definition of the vtable in the program, it has a
217 // unique address.
218 if (!llvm::GlobalValue::isWeakForLinker(Linkage: CGM.getVTableLinkage(RD)))
219 return true;
220
221 // Even if there are multiple definitions of the vtable, they are required
222 // by the ABI to use the same symbol name, so should be merged at load
223 // time. However, if the class has hidden visibility, there can be
224 // different versions of the class in different modules, and the ABI
225 // library might treat them as being the same.
226 if (CGM.GetLLVMVisibility(V: RD->getVisibility()) !=
227 llvm::GlobalValue::DefaultVisibility)
228 return false;
229
230 return true;
231 }
232
233 bool shouldEmitExactDynamicCast(QualType DestRecordTy) override {
234 return hasUniqueVTablePointer(RecordTy: DestRecordTy);
235 }
236
237 std::optional<ExactDynamicCastInfo>
238 getExactDynamicCastInfo(QualType SrcRecordTy, QualType DestTy,
239 QualType DestRecordTy) override;
240
241 llvm::Value *emitDynamicCastCall(CodeGenFunction &CGF, Address Value,
242 QualType SrcRecordTy, QualType DestTy,
243 QualType DestRecordTy,
244 llvm::BasicBlock *CastEnd) override;
245
246 llvm::Value *emitExactDynamicCast(CodeGenFunction &CGF, Address ThisAddr,
247 QualType SrcRecordTy, QualType DestTy,
248 QualType DestRecordTy,
249 const ExactDynamicCastInfo &CastInfo,
250 llvm::BasicBlock *CastSuccess,
251 llvm::BasicBlock *CastFail) override;
252
253 llvm::Value *emitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
254 QualType SrcRecordTy) override;
255
256 bool EmitBadCastCall(CodeGenFunction &CGF) override;
257
258 llvm::Value *
259 GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
260 const CXXRecordDecl *ClassDecl,
261 const CXXRecordDecl *BaseClassDecl) override;
262
263 void EmitCXXConstructors(const CXXConstructorDecl *D) override;
264
265 AddedStructorArgCounts
266 buildStructorSignature(GlobalDecl GD,
267 SmallVectorImpl<CanQualType> &ArgTys) override;
268
269 bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
270 CXXDtorType DT) const override {
271 // Itanium does not emit any destructor variant as an inline thunk.
272 // Delegating may occur as an optimization, but all variants are either
273 // emitted with external linkage or as linkonce if they are inline and used.
274 return false;
275 }
276
277 void EmitCXXDestructors(const CXXDestructorDecl *D) override;
278
279 void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
280 FunctionArgList &Params) override;
281
282 void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
283
284 AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
285 const CXXConstructorDecl *D,
286 CXXCtorType Type,
287 bool ForVirtualBase,
288 bool Delegating) override;
289
290 llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
291 const CXXDestructorDecl *DD,
292 CXXDtorType Type,
293 bool ForVirtualBase,
294 bool Delegating) override;
295
296 void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
297 CXXDtorType Type, bool ForVirtualBase,
298 bool Delegating, Address This,
299 QualType ThisTy) override;
300
301 void emitVTableDefinitions(CodeGenVTables &CGVT,
302 const CXXRecordDecl *RD) override;
303
304 bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
305 CodeGenFunction::VPtr Vptr) override;
306
307 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
308 return true;
309 }
310
311 llvm::Constant *
312 getVTableAddressPoint(BaseSubobject Base,
313 const CXXRecordDecl *VTableClass) override;
314
315 llvm::Value *getVTableAddressPointInStructor(
316 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
317 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
318
319 llvm::Value *getVTableAddressPointInStructorWithVTT(
320 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
321 BaseSubobject Base, const CXXRecordDecl *NearestVBase);
322
323 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
324 CharUnits VPtrOffset) override;
325
326 CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
327 Address This, llvm::Type *Ty,
328 SourceLocation Loc) override;
329
330 llvm::Value *
331 EmitVirtualDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *Dtor,
332 CXXDtorType DtorType, Address This,
333 DeleteOrMemberCallExpr E,
334 llvm::CallBase **CallOrInvoke) override;
335
336 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
337
338 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
339 bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
340
341 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
342 bool ReturnAdjustment) override {
343 // Allow inlining of thunks by emitting them with available_externally
344 // linkage together with vtables when needed.
345 if (ForVTable && !Thunk->hasLocalLinkage())
346 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
347 CGM.setGVProperties(GV: Thunk, GD);
348 }
349
350 bool exportThunk() override { return true; }
351
352 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
353 const CXXRecordDecl *UnadjustedThisClass,
354 const ThunkInfo &TI) override;
355
356 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
357 const CXXRecordDecl *UnadjustedRetClass,
358 const ReturnAdjustment &RA) override;
359
360 size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
361 FunctionArgList &Args) const override {
362 assert(!Args.empty() && "expected the arglist to not be empty!");
363 return Args.size() - 1;
364 }
365
366 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
367 StringRef GetDeletedVirtualCallName() override
368 { return "__cxa_deleted_virtual"; }
369
370 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
371 Address InitializeArrayCookie(CodeGenFunction &CGF,
372 Address NewPtr,
373 llvm::Value *NumElements,
374 const CXXNewExpr *expr,
375 QualType ElementType) override;
376 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
377 Address allocPtr,
378 CharUnits cookieSize) override;
379
380 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
381 llvm::GlobalVariable *DeclPtr,
382 bool PerformInit) override;
383 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
384 llvm::FunctionCallee dtor,
385 llvm::Constant *addr) override;
386
387 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
388 llvm::Value *Val);
389 void EmitThreadLocalInitFuncs(
390 CodeGenModule &CGM,
391 ArrayRef<const VarDecl *> CXXThreadLocals,
392 ArrayRef<llvm::Function *> CXXThreadLocalInits,
393 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
394
395 bool usesThreadWrapperFunction(const VarDecl *VD) const override {
396 return !isEmittedWithConstantInitializer(VD) ||
397 mayNeedDestruction(VD);
398 }
399 LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
400 QualType LValType) override;
401
402 bool NeedsVTTParameter(GlobalDecl GD) override;
403
404 llvm::Constant *
405 getOrCreateVirtualFunctionPointerThunk(const CXXMethodDecl *MD);
406
407 /**************************** RTTI Uniqueness ******************************/
408
409protected:
410 /// Returns true if the ABI requires RTTI type_info objects to be unique
411 /// across a program.
412 virtual bool shouldRTTIBeUnique() const { return true; }
413
414public:
415 /// What sort of unique-RTTI behavior should we use?
416 enum RTTIUniquenessKind {
417 /// We are guaranteeing, or need to guarantee, that the RTTI string
418 /// is unique.
419 RUK_Unique,
420
421 /// We are not guaranteeing uniqueness for the RTTI string, so we
422 /// can demote to hidden visibility but must use string comparisons.
423 RUK_NonUniqueHidden,
424
425 /// We are not guaranteeing uniqueness for the RTTI string, so we
426 /// have to use string comparisons, but we also have to emit it with
427 /// non-hidden visibility.
428 RUK_NonUniqueVisible
429 };
430
431 /// Return the required visibility status for the given type and linkage in
432 /// the current ABI.
433 RTTIUniquenessKind
434 classifyRTTIUniqueness(QualType CanTy,
435 llvm::GlobalValue::LinkageTypes Linkage) const;
436 friend class ItaniumRTTIBuilder;
437
438 void emitCXXStructor(GlobalDecl GD) override;
439
440 std::pair<llvm::Value *, const CXXRecordDecl *>
441 LoadVTablePtr(CodeGenFunction &CGF, Address This,
442 const CXXRecordDecl *RD) override;
443
444 private:
445 llvm::Constant *
446 getSignedVirtualMemberFunctionPointer(const CXXMethodDecl *MD);
447
448 bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
449 const auto &VtableLayout =
450 CGM.getItaniumVTableContext().getVTableLayout(RD);
451
452 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
453 // Skip empty slot.
454 if (!VtableComponent.isUsedFunctionPointerKind())
455 continue;
456
457 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
458 const FunctionDecl *FD = Method->getDefinition();
459 const bool IsInlined =
460 Method->getCanonicalDecl()->isInlined() || (FD && FD->isInlined());
461 if (!IsInlined)
462 continue;
463
464 StringRef Name = CGM.getMangledName(
465 GD: VtableComponent.getGlobalDecl(/*HasVectorDeletingDtors=*/false));
466 auto *Entry = CGM.GetGlobalValue(Ref: Name);
467 // This checks if virtual inline function has already been emitted.
468 // Note that it is possible that this inline function would be emitted
469 // after trying to emit vtable speculatively. Because of this we do
470 // an extra pass after emitting all deferred vtables to find and emit
471 // these vtables opportunistically.
472 if (!Entry || Entry->isDeclaration())
473 return true;
474 }
475 return false;
476 }
477
478 bool isVTableHidden(const CXXRecordDecl *RD) const {
479 const auto &VtableLayout =
480 CGM.getItaniumVTableContext().getVTableLayout(RD);
481
482 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
483 if (VtableComponent.isRTTIKind()) {
484 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
485 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
486 return true;
487 } else if (VtableComponent.isUsedFunctionPointerKind()) {
488 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
489 if (Method->getVisibility() == Visibility::HiddenVisibility &&
490 !Method->isDefined())
491 return true;
492 }
493 }
494 return false;
495 }
496};
497
498class ARMCXXABI : public ItaniumCXXABI {
499public:
500 ARMCXXABI(CodeGen::CodeGenModule &CGM) :
501 ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
502 /*UseARMGuardVarABI=*/true) {}
503
504 bool constructorsAndDestructorsReturnThis() const override { return true; }
505
506 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
507 QualType ResTy) override;
508
509 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
510 Address InitializeArrayCookie(CodeGenFunction &CGF,
511 Address NewPtr,
512 llvm::Value *NumElements,
513 const CXXNewExpr *expr,
514 QualType ElementType) override;
515 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
516 CharUnits cookieSize) override;
517};
518
519class AppleARM64CXXABI : public ARMCXXABI {
520public:
521 AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
522 Use32BitVTableOffsetABI = true;
523 }
524
525 // ARM64 libraries are prepared for non-unique RTTI.
526 bool shouldRTTIBeUnique() const override { return false; }
527};
528
529class FuchsiaCXXABI final : public ItaniumCXXABI {
530public:
531 explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
532 : ItaniumCXXABI(CGM) {}
533
534private:
535 bool constructorsAndDestructorsReturnThis() const override { return true; }
536};
537
538class WebAssemblyCXXABI final : public ItaniumCXXABI {
539public:
540 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
541 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
542 /*UseARMGuardVarABI=*/true) {}
543 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
544 llvm::CallInst *
545 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
546 llvm::Value *Exn) override;
547
548private:
549 bool constructorsAndDestructorsReturnThis() const override { return true; }
550 bool canCallMismatchedFunctionType() const override { return false; }
551};
552
553class XLCXXABI final : public ItaniumCXXABI {
554public:
555 explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
556 : ItaniumCXXABI(CGM) {}
557
558 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
559 llvm::FunctionCallee dtor,
560 llvm::Constant *addr) override;
561
562 bool useSinitAndSterm() const override { return true; }
563
564private:
565 void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
566 llvm::Constant *addr);
567};
568}
569
570CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
571 switch (CGM.getContext().getCXXABIKind()) {
572 // For IR-generation purposes, there's no significant difference
573 // between the ARM and iOS ABIs.
574 case TargetCXXABI::GenericARM:
575 case TargetCXXABI::iOS:
576 case TargetCXXABI::WatchOS:
577 return new ARMCXXABI(CGM);
578
579 case TargetCXXABI::AppleARM64:
580 return new AppleARM64CXXABI(CGM);
581
582 case TargetCXXABI::Fuchsia:
583 return new FuchsiaCXXABI(CGM);
584
585 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
586 // include the other 32-bit ARM oddities: constructor/destructor return values
587 // and array cookies.
588 case TargetCXXABI::GenericAArch64:
589 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
590 /*UseARMGuardVarABI=*/true);
591
592 case TargetCXXABI::GenericMIPS:
593 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
594
595 case TargetCXXABI::WebAssembly:
596 return new WebAssemblyCXXABI(CGM);
597
598 case TargetCXXABI::XL:
599 return new XLCXXABI(CGM);
600
601 case TargetCXXABI::GenericItanium:
602 return new ItaniumCXXABI(CGM);
603
604 case TargetCXXABI::Microsoft:
605 llvm_unreachable("Microsoft ABI is not Itanium-based");
606 }
607 llvm_unreachable("bad ABI kind");
608}
609
610llvm::Type *
611ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
612 if (MPT->isMemberDataPointer())
613 return CGM.PtrDiffTy;
614 return llvm::StructType::get(elt1: CGM.PtrDiffTy, elts: CGM.PtrDiffTy);
615}
616
617/// In the Itanium and ARM ABIs, method pointers have the form:
618/// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
619///
620/// In the Itanium ABI:
621/// - method pointers are virtual if (memptr.ptr & 1) is nonzero
622/// - the this-adjustment is (memptr.adj)
623/// - the virtual offset is (memptr.ptr - 1)
624///
625/// In the ARM ABI:
626/// - method pointers are virtual if (memptr.adj & 1) is nonzero
627/// - the this-adjustment is (memptr.adj >> 1)
628/// - the virtual offset is (memptr.ptr)
629/// ARM uses 'adj' for the virtual flag because Thumb functions
630/// may be only single-byte aligned.
631///
632/// If the member is virtual, the adjusted 'this' pointer points
633/// to a vtable pointer from which the virtual offset is applied.
634///
635/// If the member is non-virtual, memptr.ptr is the address of
636/// the function to call.
637CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
638 CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
639 llvm::Value *&ThisPtrForCall,
640 llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
641 CGBuilderTy &Builder = CGF.Builder;
642
643 const FunctionProtoType *FPT =
644 MPT->getPointeeType()->castAs<FunctionProtoType>();
645 auto *RD = MPT->getMostRecentCXXRecordDecl();
646
647 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: 1);
648
649 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock(name: "memptr.virtual");
650 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock(name: "memptr.nonvirtual");
651 llvm::BasicBlock *FnEnd = CGF.createBasicBlock(name: "memptr.end");
652
653 // Extract memptr.adj, which is in the second field.
654 llvm::Value *RawAdj = Builder.CreateExtractValue(Agg: MemFnPtr, Idxs: 1, Name: "memptr.adj");
655
656 // Compute the true adjustment.
657 llvm::Value *Adj = RawAdj;
658 if (UseARMMethodPtrABI)
659 Adj = Builder.CreateAShr(LHS: Adj, RHS: ptrdiff_1, Name: "memptr.adj.shifted");
660
661 // Apply the adjustment and cast back to the original struct type
662 // for consistency.
663 llvm::Value *This = ThisAddr.emitRawPointer(CGF);
664 This = Builder.CreateInBoundsGEP(Ty: Builder.getInt8Ty(), Ptr: This, IdxList: Adj);
665 ThisPtrForCall = This;
666
667 // Load the function pointer.
668 llvm::Value *FnAsInt = Builder.CreateExtractValue(Agg: MemFnPtr, Idxs: 0, Name: "memptr.ptr");
669
670 // If the LSB in the function pointer is 1, the function pointer points to
671 // a virtual function.
672 llvm::Value *IsVirtual;
673 if (UseARMMethodPtrABI)
674 IsVirtual = Builder.CreateAnd(LHS: RawAdj, RHS: ptrdiff_1);
675 else
676 IsVirtual = Builder.CreateAnd(LHS: FnAsInt, RHS: ptrdiff_1);
677 IsVirtual = Builder.CreateIsNotNull(Arg: IsVirtual, Name: "memptr.isvirtual");
678 Builder.CreateCondBr(Cond: IsVirtual, True: FnVirtual, False: FnNonVirtual);
679
680 // In the virtual path, the adjustment left 'This' pointing to the
681 // vtable of the correct base subobject. The "function pointer" is an
682 // offset within the vtable (+1 for the virtual flag on non-ARM).
683 CGF.EmitBlock(BB: FnVirtual);
684
685 // Cast the adjusted this to a pointer to vtable pointer and load.
686 llvm::Type *VTableTy = CGF.CGM.GlobalsInt8PtrTy;
687 CharUnits VTablePtrAlign =
688 CGF.CGM.getDynamicOffsetAlignment(ActualAlign: ThisAddr.getAlignment(), Class: RD,
689 ExpectedTargetAlign: CGF.getPointerAlign());
690 llvm::Value *VTable = CGF.GetVTablePtr(
691 This: Address(This, ThisAddr.getElementType(), VTablePtrAlign), VTableTy, VTableClass: RD);
692
693 // Apply the offset.
694 // On ARM64, to reserve extra space in virtual member function pointers,
695 // we only pay attention to the low 32 bits of the offset.
696 llvm::Value *VTableOffset = FnAsInt;
697 if (!UseARMMethodPtrABI)
698 VTableOffset = Builder.CreateSub(LHS: VTableOffset, RHS: ptrdiff_1);
699 if (Use32BitVTableOffsetABI) {
700 VTableOffset = Builder.CreateTrunc(V: VTableOffset, DestTy: CGF.Int32Ty);
701 VTableOffset = Builder.CreateZExt(V: VTableOffset, DestTy: CGM.PtrDiffTy);
702 }
703
704 // Check the address of the function pointer if CFI on member function
705 // pointers is enabled.
706 llvm::Constant *CheckSourceLocation;
707 llvm::Constant *CheckTypeDesc;
708 bool ShouldEmitCFICheck = CGF.SanOpts.has(K: SanitizerKind::CFIMFCall) &&
709 CGM.HasHiddenLTOVisibility(RD);
710
711 if (ShouldEmitCFICheck) {
712 if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: E)) {
713 if (BinOp->isPtrMemOp() &&
714 BinOp->getRHS()
715 ->getType()
716 ->hasPointeeToCFIUncheckedCalleeFunctionType())
717 ShouldEmitCFICheck = false;
718 }
719 }
720
721 bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
722 CGM.HasHiddenLTOVisibility(RD);
723 // TODO: Update this name not to be restricted to WPD only
724 // as we now emit the vtable info info for speculative devirtualization as
725 // well.
726 bool ShouldEmitWPDInfo =
727 (CGM.getCodeGenOpts().WholeProgramVTables &&
728 // Don't insert type tests if we are forcing public visibility.
729 !CGM.AlwaysHasLTOVisibilityPublic(RD)) ||
730 CGM.getCodeGenOpts().DevirtualizeSpeculatively;
731 llvm::Value *VirtualFn = nullptr;
732
733 {
734 auto CheckOrdinal = SanitizerKind::SO_CFIMFCall;
735 auto CheckHandler = SanitizerHandler::CFICheckFail;
736 SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
737
738 llvm::Value *TypeId = nullptr;
739 llvm::Value *CheckResult = nullptr;
740
741 if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
742 // If doing CFI, VFE or WPD, we will need the metadata node to check
743 // against.
744 llvm::Metadata *MD =
745 CGM.CreateMetadataIdentifierForVirtualMemPtrType(T: QualType(MPT, 0));
746 TypeId = llvm::MetadataAsValue::get(Context&: CGF.getLLVMContext(), MD);
747 }
748
749 if (ShouldEmitVFEInfo) {
750 llvm::Value *VFPAddr =
751 Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: VTable, IdxList: VTableOffset);
752
753 // If doing VFE, load from the vtable with a type.checked.load intrinsic
754 // call. Note that we use the GEP to calculate the address to load from
755 // and pass 0 as the offset to the intrinsic. This is because every
756 // vtable slot of the correct type is marked with matching metadata, and
757 // we know that the load must be from one of these slots.
758 llvm::Value *CheckedLoad = Builder.CreateCall(
759 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::type_checked_load),
760 Args: {VFPAddr, llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: 0), TypeId});
761 CheckResult = Builder.CreateExtractValue(Agg: CheckedLoad, Idxs: 1);
762 VirtualFn = Builder.CreateExtractValue(Agg: CheckedLoad, Idxs: 0);
763 } else {
764 // When not doing VFE, emit a normal load, as it allows more
765 // optimisations than type.checked.load.
766 if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
767 llvm::Value *VFPAddr =
768 Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: VTable, IdxList: VTableOffset);
769 llvm::Intrinsic::ID IID = CGM.HasHiddenLTOVisibility(RD)
770 ? llvm::Intrinsic::type_test
771 : llvm::Intrinsic::public_type_test;
772
773 CheckResult =
774 Builder.CreateCall(Callee: CGM.getIntrinsic(IID), Args: {VFPAddr, TypeId});
775 }
776
777 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
778 VirtualFn = CGF.Builder.CreateCall(
779 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::load_relative,
780 Tys: {VTableOffset->getType()}),
781 Args: {VTable, VTableOffset});
782 } else {
783 llvm::Value *VFPAddr =
784 CGF.Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: VTable, IdxList: VTableOffset);
785 VirtualFn = CGF.Builder.CreateAlignedLoad(Ty: CGF.DefaultPtrTy, Addr: VFPAddr,
786 Align: CGF.getPointerAlign(),
787 Name: "memptr.virtualfn");
788 }
789 }
790 assert(VirtualFn && "Virtual fuction pointer not created!");
791 assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
792 CheckResult) &&
793 "Check result required but not created!");
794
795 if (ShouldEmitCFICheck) {
796 // If doing CFI, emit the check.
797 CheckSourceLocation = CGF.EmitCheckSourceLocation(Loc: E->getBeginLoc());
798 CheckTypeDesc = CGF.EmitCheckTypeDescriptor(T: QualType(MPT, 0));
799 llvm::Constant *StaticData[] = {
800 llvm::ConstantInt::get(Ty: CGF.Int8Ty, V: CodeGenFunction::CFITCK_VMFCall),
801 CheckSourceLocation,
802 CheckTypeDesc,
803 };
804
805 if (CGM.getCodeGenOpts().SanitizeTrap.has(K: SanitizerKind::CFIMFCall)) {
806 CGF.EmitTrapCheck(Checked: CheckResult, CheckHandlerID: CheckHandler);
807 } else {
808 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
809 Context&: CGM.getLLVMContext(),
810 MD: llvm::MDString::get(Context&: CGM.getLLVMContext(), Str: "all-vtables"));
811 llvm::Value *ValidVtable = Builder.CreateCall(
812 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::type_test), Args: {VTable, AllVtables});
813 CGF.EmitCheck(Checked: std::make_pair(x&: CheckResult, y&: CheckOrdinal), Check: CheckHandler,
814 StaticArgs: StaticData, DynamicArgs: {VTable, ValidVtable});
815 }
816
817 FnVirtual = Builder.GetInsertBlock();
818 }
819 } // End of sanitizer scope
820
821 CGF.EmitBranch(Block: FnEnd);
822
823 // In the non-virtual path, the function pointer is actually a
824 // function pointer.
825 CGF.EmitBlock(BB: FnNonVirtual);
826 llvm::Value *NonVirtualFn =
827 Builder.CreateIntToPtr(V: FnAsInt, DestTy: CGF.DefaultPtrTy, Name: "memptr.nonvirtualfn");
828
829 // Check the function pointer if CFI on member function pointers is enabled.
830 if (ShouldEmitCFICheck) {
831 CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
832 if (RD->hasDefinition()) {
833 auto CheckOrdinal = SanitizerKind::SO_CFIMFCall;
834 auto CheckHandler = SanitizerHandler::CFICheckFail;
835 SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
836
837 llvm::Constant *StaticData[] = {
838 llvm::ConstantInt::get(Ty: CGF.Int8Ty, V: CodeGenFunction::CFITCK_NVMFCall),
839 CheckSourceLocation,
840 CheckTypeDesc,
841 };
842
843 llvm::Value *Bit = Builder.getFalse();
844 for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
845 llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
846 T: getContext().getMemberPointerType(T: MPT->getPointeeType(),
847 /*Qualifier=*/std::nullopt,
848 Cls: Base->getCanonicalDecl()));
849 llvm::Value *TypeId =
850 llvm::MetadataAsValue::get(Context&: CGF.getLLVMContext(), MD);
851
852 llvm::Value *TypeTest =
853 Builder.CreateCall(Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::type_test),
854 Args: {NonVirtualFn, TypeId});
855 Bit = Builder.CreateOr(LHS: Bit, RHS: TypeTest);
856 }
857
858 CGF.EmitCheck(Checked: std::make_pair(x&: Bit, y&: CheckOrdinal), Check: CheckHandler, StaticArgs: StaticData,
859 DynamicArgs: {NonVirtualFn, llvm::UndefValue::get(T: CGF.IntPtrTy)});
860
861 FnNonVirtual = Builder.GetInsertBlock();
862 }
863 }
864
865 // We're done.
866 CGF.EmitBlock(BB: FnEnd);
867 llvm::PHINode *CalleePtr = Builder.CreatePHI(Ty: CGF.DefaultPtrTy, NumReservedValues: 2);
868 CalleePtr->addIncoming(V: VirtualFn, BB: FnVirtual);
869 CalleePtr->addIncoming(V: NonVirtualFn, BB: FnNonVirtual);
870
871 CGPointerAuthInfo PointerAuth;
872
873 if (const auto &Schema =
874 CGM.getCodeGenOpts().PointerAuth.CXXMemberFunctionPointers) {
875 llvm::PHINode *DiscriminatorPHI = Builder.CreatePHI(Ty: CGF.IntPtrTy, NumReservedValues: 2);
876 DiscriminatorPHI->addIncoming(V: llvm::ConstantInt::get(Ty: CGF.IntPtrTy, V: 0),
877 BB: FnVirtual);
878 const auto &AuthInfo =
879 CGM.getMemberFunctionPointerAuthInfo(FT: QualType(MPT, 0));
880 assert(Schema.getKey() == AuthInfo.getKey() &&
881 "Keys for virtual and non-virtual member functions must match");
882 auto *NonVirtualDiscriminator = AuthInfo.getDiscriminator();
883 DiscriminatorPHI->addIncoming(V: NonVirtualDiscriminator, BB: FnNonVirtual);
884 PointerAuth = CGPointerAuthInfo(
885 Schema.getKey(), Schema.getAuthenticationMode(), Schema.isIsaPointer(),
886 Schema.authenticatesNullValues(), DiscriminatorPHI);
887 }
888
889 CGCallee Callee(FPT, CalleePtr, PointerAuth);
890 return Callee;
891}
892
893/// Compute an l-value by applying the given pointer-to-member to a
894/// base object.
895llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
896 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
897 const MemberPointerType *MPT, bool IsInBounds) {
898 assert(MemPtr->getType() == CGM.PtrDiffTy);
899
900 CGBuilderTy &Builder = CGF.Builder;
901
902 // Apply the offset.
903 llvm::Value *BaseAddr = Base.emitRawPointer(CGF);
904 return Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: BaseAddr, IdxList: MemPtr, Name: "memptr.offset",
905 NW: IsInBounds ? llvm::GEPNoWrapFlags::inBounds()
906 : llvm::GEPNoWrapFlags::none());
907}
908
909// See if it's possible to return a constant signed pointer.
910static llvm::Constant *pointerAuthResignConstant(
911 llvm::Value *Ptr, const CGPointerAuthInfo &CurAuthInfo,
912 const CGPointerAuthInfo &NewAuthInfo, CodeGenModule &CGM) {
913 const auto *CPA = dyn_cast<llvm::ConstantPtrAuth>(Val: Ptr);
914
915 if (!CPA)
916 return nullptr;
917
918 assert(CPA->getKey()->getZExtValue() == CurAuthInfo.getKey() &&
919 CPA->getAddrDiscriminator()->isNullValue() &&
920 CPA->getDiscriminator() == CurAuthInfo.getDiscriminator() &&
921 "unexpected key or discriminators");
922
923 return CGM.getConstantSignedPointer(
924 Pointer: CPA->getPointer(), Key: NewAuthInfo.getKey(), StorageAddress: nullptr,
925 OtherDiscriminator: cast<llvm::ConstantInt>(Val: NewAuthInfo.getDiscriminator()));
926}
927
928/// Perform a bitcast, derived-to-base, or base-to-derived member pointer
929/// conversion.
930///
931/// Bitcast conversions are always a no-op under Itanium.
932///
933/// Obligatory offset/adjustment diagram:
934/// <-- offset --> <-- adjustment -->
935/// |--------------------------|----------------------|--------------------|
936/// ^Derived address point ^Base address point ^Member address point
937///
938/// So when converting a base member pointer to a derived member pointer,
939/// we add the offset to the adjustment because the address point has
940/// decreased; and conversely, when converting a derived MP to a base MP
941/// we subtract the offset from the adjustment because the address point
942/// has increased.
943///
944/// The standard forbids (at compile time) conversion to and from
945/// virtual bases, which is why we don't have to consider them here.
946///
947/// The standard forbids (at run time) casting a derived MP to a base
948/// MP when the derived MP does not point to a member of the base.
949/// This is why -1 is a reasonable choice for null data member
950/// pointers.
951llvm::Value *
952ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
953 const CastExpr *E,
954 llvm::Value *src) {
955 // Use constant emission if we can.
956 if (isa<llvm::Constant>(Val: src))
957 return EmitMemberPointerConversion(E, Src: cast<llvm::Constant>(Val: src));
958
959 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
960 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
961 E->getCastKind() == CK_ReinterpretMemberPointer);
962
963 CGBuilderTy &Builder = CGF.Builder;
964 QualType DstType = E->getType();
965
966 if (DstType->isMemberFunctionPointerType()) {
967 if (const auto &NewAuthInfo =
968 CGM.getMemberFunctionPointerAuthInfo(FT: DstType)) {
969 QualType SrcType = E->getSubExpr()->getType();
970 assert(SrcType->isMemberFunctionPointerType());
971 const auto &CurAuthInfo = CGM.getMemberFunctionPointerAuthInfo(FT: SrcType);
972 llvm::Value *MemFnPtr = Builder.CreateExtractValue(Agg: src, Idxs: 0, Name: "memptr.ptr");
973 llvm::Type *OrigTy = MemFnPtr->getType();
974
975 llvm::BasicBlock *StartBB = Builder.GetInsertBlock();
976 llvm::BasicBlock *ResignBB = CGF.createBasicBlock(name: "resign");
977 llvm::BasicBlock *MergeBB = CGF.createBasicBlock(name: "merge");
978
979 // Check whether we have a virtual offset or a pointer to a function.
980 assert(UseARMMethodPtrABI && "ARM ABI expected");
981 llvm::Value *Adj = Builder.CreateExtractValue(Agg: src, Idxs: 1, Name: "memptr.adj");
982 llvm::Constant *Ptrdiff_1 = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: 1);
983 llvm::Value *AndVal = Builder.CreateAnd(LHS: Adj, RHS: Ptrdiff_1);
984 llvm::Value *IsVirtualOffset =
985 Builder.CreateIsNotNull(Arg: AndVal, Name: "is.virtual.offset");
986 Builder.CreateCondBr(Cond: IsVirtualOffset, True: MergeBB, False: ResignBB);
987
988 CGF.EmitBlock(BB: ResignBB);
989 llvm::Type *PtrTy = llvm::PointerType::getUnqual(C&: CGM.getLLVMContext());
990 MemFnPtr = Builder.CreateIntToPtr(V: MemFnPtr, DestTy: PtrTy);
991 MemFnPtr =
992 CGF.emitPointerAuthResign(Pointer: MemFnPtr, PointerType: SrcType, CurAuthInfo, NewAuthInfo,
993 IsKnownNonNull: isa<llvm::Constant>(Val: src));
994 MemFnPtr = Builder.CreatePtrToInt(V: MemFnPtr, DestTy: OrigTy);
995 llvm::Value *ResignedVal = Builder.CreateInsertValue(Agg: src, Val: MemFnPtr, Idxs: 0);
996 ResignBB = Builder.GetInsertBlock();
997
998 CGF.EmitBlock(BB: MergeBB);
999 llvm::PHINode *NewSrc = Builder.CreatePHI(Ty: src->getType(), NumReservedValues: 2);
1000 NewSrc->addIncoming(V: src, BB: StartBB);
1001 NewSrc->addIncoming(V: ResignedVal, BB: ResignBB);
1002 src = NewSrc;
1003 }
1004 }
1005
1006 // Under Itanium, reinterprets don't require any additional processing.
1007 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
1008
1009 llvm::Constant *adj = getMemberPointerAdjustment(E);
1010 if (!adj) return src;
1011
1012 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
1013
1014 const MemberPointerType *destTy =
1015 E->getType()->castAs<MemberPointerType>();
1016
1017 // For member data pointers, this is just a matter of adding the
1018 // offset if the source is non-null.
1019 if (destTy->isMemberDataPointer()) {
1020 llvm::Value *dst;
1021 if (isDerivedToBase)
1022 dst = Builder.CreateNSWSub(LHS: src, RHS: adj, Name: "adj");
1023 else
1024 dst = Builder.CreateNSWAdd(LHS: src, RHS: adj, Name: "adj");
1025
1026 // Null check.
1027 llvm::Value *null = llvm::Constant::getAllOnesValue(Ty: src->getType());
1028 llvm::Value *isNull = Builder.CreateICmpEQ(LHS: src, RHS: null, Name: "memptr.isnull");
1029 return Builder.CreateSelect(C: isNull, True: src, False: dst);
1030 }
1031
1032 // The this-adjustment is left-shifted by 1 on ARM.
1033 if (UseARMMethodPtrABI) {
1034 uint64_t offset = cast<llvm::ConstantInt>(Val: adj)->getZExtValue();
1035 offset <<= 1;
1036 adj = llvm::ConstantInt::get(Ty: adj->getType(), V: offset);
1037 }
1038
1039 llvm::Value *srcAdj = Builder.CreateExtractValue(Agg: src, Idxs: 1, Name: "src.adj");
1040 llvm::Value *dstAdj;
1041 if (isDerivedToBase)
1042 dstAdj = Builder.CreateNSWSub(LHS: srcAdj, RHS: adj, Name: "adj");
1043 else
1044 dstAdj = Builder.CreateNSWAdd(LHS: srcAdj, RHS: adj, Name: "adj");
1045
1046 return Builder.CreateInsertValue(Agg: src, Val: dstAdj, Idxs: 1);
1047}
1048
1049static llvm::Constant *
1050pointerAuthResignMemberFunctionPointer(llvm::Constant *Src, QualType DestType,
1051 QualType SrcType, CodeGenModule &CGM) {
1052 assert(DestType->isMemberFunctionPointerType() &&
1053 SrcType->isMemberFunctionPointerType() &&
1054 "member function pointers expected");
1055 if (DestType == SrcType)
1056 return Src;
1057
1058 const auto &NewAuthInfo = CGM.getMemberFunctionPointerAuthInfo(FT: DestType);
1059 const auto &CurAuthInfo = CGM.getMemberFunctionPointerAuthInfo(FT: SrcType);
1060
1061 if (!NewAuthInfo && !CurAuthInfo)
1062 return Src;
1063
1064 llvm::Constant *MemFnPtr = Src->getAggregateElement(Elt: 0u);
1065 if (MemFnPtr->getNumOperands() == 0) {
1066 // src must be a pair of null pointers.
1067 assert(isa<llvm::ConstantInt>(MemFnPtr) && "constant int expected");
1068 return Src;
1069 }
1070
1071 llvm::Constant *ConstPtr = pointerAuthResignConstant(
1072 Ptr: cast<llvm::User>(Val: MemFnPtr)->getOperand(i: 0), CurAuthInfo, NewAuthInfo, CGM);
1073 ConstPtr = llvm::ConstantExpr::getPtrToInt(C: ConstPtr, Ty: MemFnPtr->getType());
1074 return ConstantFoldInsertValueInstruction(Agg: Src, Val: ConstPtr, Idxs: 0);
1075}
1076
1077llvm::Constant *
1078ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
1079 llvm::Constant *src) {
1080 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
1081 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
1082 E->getCastKind() == CK_ReinterpretMemberPointer);
1083
1084 QualType DstType = E->getType();
1085
1086 if (DstType->isMemberFunctionPointerType())
1087 src = pointerAuthResignMemberFunctionPointer(
1088 Src: src, DestType: DstType, SrcType: E->getSubExpr()->getType(), CGM);
1089
1090 // Under Itanium, reinterprets don't require any additional processing.
1091 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
1092
1093 // If the adjustment is trivial, we don't need to do anything.
1094 llvm::Constant *adj = getMemberPointerAdjustment(E);
1095 if (!adj) return src;
1096
1097 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
1098
1099 const MemberPointerType *destTy =
1100 E->getType()->castAs<MemberPointerType>();
1101
1102 // For member data pointers, this is just a matter of adding the
1103 // offset if the source is non-null.
1104 if (destTy->isMemberDataPointer()) {
1105 // null maps to null.
1106 if (src->isAllOnesValue()) return src;
1107
1108 if (isDerivedToBase)
1109 return llvm::ConstantExpr::getNSWSub(C1: src, C2: adj);
1110 else
1111 return llvm::ConstantExpr::getNSWAdd(C1: src, C2: adj);
1112 }
1113
1114 // The this-adjustment is left-shifted by 1 on ARM.
1115 if (UseARMMethodPtrABI) {
1116 uint64_t offset = cast<llvm::ConstantInt>(Val: adj)->getZExtValue();
1117 offset <<= 1;
1118 adj = llvm::ConstantInt::get(Ty: adj->getType(), V: offset);
1119 }
1120
1121 llvm::Constant *srcAdj = src->getAggregateElement(Elt: 1);
1122 llvm::Constant *dstAdj;
1123 if (isDerivedToBase)
1124 dstAdj = llvm::ConstantExpr::getNSWSub(C1: srcAdj, C2: adj);
1125 else
1126 dstAdj = llvm::ConstantExpr::getNSWAdd(C1: srcAdj, C2: adj);
1127
1128 llvm::Constant *res = ConstantFoldInsertValueInstruction(Agg: src, Val: dstAdj, Idxs: 1);
1129 assert(res != nullptr && "Folding must succeed");
1130 return res;
1131}
1132
1133llvm::Constant *
1134ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
1135 // Itanium C++ ABI 2.3:
1136 // A NULL pointer is represented as -1.
1137 if (MPT->isMemberDataPointer())
1138 return llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: -1ULL, /*isSigned=*/IsSigned: true);
1139
1140 llvm::Constant *Zero = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: 0);
1141 llvm::Constant *Values[2] = { Zero, Zero };
1142 return llvm::ConstantStruct::getAnon(V: Values);
1143}
1144
1145llvm::Constant *
1146ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
1147 CharUnits offset) {
1148 // Itanium C++ ABI 2.3:
1149 // A pointer to data member is an offset from the base address of
1150 // the class object containing it, represented as a ptrdiff_t
1151 return llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: offset.getQuantity());
1152}
1153
1154llvm::Constant *
1155ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
1156 return BuildMemberPointer(MD, ThisAdjustment: CharUnits::Zero());
1157}
1158
1159llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
1160 CharUnits ThisAdjustment) {
1161 assert(MD->isInstance() && "Member function must not be static!");
1162
1163 CodeGenTypes &Types = CGM.getTypes();
1164
1165 // Get the function pointer (or index if this is a virtual function).
1166 llvm::Constant *MemPtr[2];
1167 if (MD->isVirtual()) {
1168 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(GD: MD);
1169 uint64_t VTableOffset;
1170 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1171 // Multiply by 4-byte relative offsets.
1172 VTableOffset = Index * 4;
1173 } else {
1174 const ASTContext &Context = getContext();
1175 CharUnits PointerWidth = Context.toCharUnitsFromBits(
1176 BitSize: Context.getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default));
1177 VTableOffset = Index * PointerWidth.getQuantity();
1178 }
1179
1180 if (UseARMMethodPtrABI) {
1181 // ARM C++ ABI 3.2.1:
1182 // This ABI specifies that adj contains twice the this
1183 // adjustment, plus 1 if the member function is virtual. The
1184 // least significant bit of adj then makes exactly the same
1185 // discrimination as the least significant bit of ptr does for
1186 // Itanium.
1187
1188 // We cannot use the Itanium ABI's representation for virtual member
1189 // function pointers under pointer authentication because it would
1190 // require us to store both the virtual offset and the constant
1191 // discriminator in the pointer, which would be immediately vulnerable
1192 // to attack. Instead we introduce a thunk that does the virtual dispatch
1193 // and store it as if it were a non-virtual member function. This means
1194 // that virtual function pointers may not compare equal anymore, but
1195 // fortunately they aren't required to by the standard, and we do make
1196 // a best-effort attempt to re-use the thunk.
1197 //
1198 // To support interoperation with code in which pointer authentication
1199 // is disabled, derefencing a member function pointer must still handle
1200 // the virtual case, but it can use a discriminator which should never
1201 // be valid.
1202 const auto &Schema =
1203 CGM.getCodeGenOpts().PointerAuth.CXXMemberFunctionPointers;
1204 if (Schema)
1205 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(
1206 C: getSignedVirtualMemberFunctionPointer(MD), Ty: CGM.PtrDiffTy);
1207 else
1208 MemPtr[0] = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: VTableOffset);
1209 // Don't set the LSB of adj to 1 if pointer authentication for member
1210 // function pointers is enabled.
1211 MemPtr[1] = llvm::ConstantInt::get(
1212 Ty: CGM.PtrDiffTy, V: 2 * ThisAdjustment.getQuantity() + !Schema);
1213 } else {
1214 // Itanium C++ ABI 2.3:
1215 // For a virtual function, [the pointer field] is 1 plus the
1216 // virtual table offset (in bytes) of the function,
1217 // represented as a ptrdiff_t.
1218 MemPtr[0] = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: VTableOffset + 1);
1219 MemPtr[1] = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy,
1220 V: ThisAdjustment.getQuantity());
1221 }
1222 } else {
1223 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1224 llvm::Type *Ty;
1225 // Check whether the function has a computable LLVM signature.
1226 if (Types.isFuncTypeConvertible(FT: FPT)) {
1227 // The function has a computable LLVM signature; use the correct type.
1228 Ty = Types.GetFunctionType(Info: Types.arrangeCXXMethodDeclaration(MD));
1229 } else {
1230 // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1231 // function type is incomplete.
1232 Ty = CGM.PtrDiffTy;
1233 }
1234 llvm::Constant *addr = CGM.getMemberFunctionPointer(FD: MD, Ty);
1235
1236 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(C: addr, Ty: CGM.PtrDiffTy);
1237 MemPtr[1] = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy,
1238 V: (UseARMMethodPtrABI ? 2 : 1) *
1239 ThisAdjustment.getQuantity());
1240 }
1241
1242 return llvm::ConstantStruct::getAnon(V: MemPtr);
1243}
1244
1245llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1246 QualType MPType) {
1247 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1248 const ValueDecl *MPD = MP.getMemberPointerDecl();
1249 if (!MPD)
1250 return EmitNullMemberPointer(MPT);
1251
1252 CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
1253
1254 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: MPD)) {
1255 llvm::Constant *Src = BuildMemberPointer(MD, ThisAdjustment);
1256 QualType SrcType = getContext().getMemberPointerType(
1257 T: MD->getType(), /*Qualifier=*/std::nullopt, Cls: MD->getParent());
1258 return pointerAuthResignMemberFunctionPointer(Src, DestType: MPType, SrcType, CGM);
1259 }
1260
1261 getContext().recordMemberDataPointerEvaluation(VD: MPD);
1262 CharUnits FieldOffset =
1263 getContext().toCharUnitsFromBits(BitSize: getContext().getFieldOffset(FD: MPD));
1264 return EmitMemberDataPointer(MPT, offset: ThisAdjustment + FieldOffset);
1265}
1266
1267/// The comparison algorithm is pretty easy: the member pointers are
1268/// the same if they're either bitwise identical *or* both null.
1269///
1270/// ARM is different here only because null-ness is more complicated.
1271llvm::Value *
1272ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1273 llvm::Value *L,
1274 llvm::Value *R,
1275 const MemberPointerType *MPT,
1276 bool Inequality) {
1277 CGBuilderTy &Builder = CGF.Builder;
1278
1279 llvm::ICmpInst::Predicate Eq;
1280 llvm::Instruction::BinaryOps And, Or;
1281 if (Inequality) {
1282 Eq = llvm::ICmpInst::ICMP_NE;
1283 And = llvm::Instruction::Or;
1284 Or = llvm::Instruction::And;
1285 } else {
1286 Eq = llvm::ICmpInst::ICMP_EQ;
1287 And = llvm::Instruction::And;
1288 Or = llvm::Instruction::Or;
1289 }
1290
1291 // Member data pointers are easy because there's a unique null
1292 // value, so it just comes down to bitwise equality.
1293 if (MPT->isMemberDataPointer())
1294 return Builder.CreateICmp(P: Eq, LHS: L, RHS: R);
1295
1296 // For member function pointers, the tautologies are more complex.
1297 // The Itanium tautology is:
1298 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1299 // The ARM tautology is:
1300 // (L == R) <==> (L.ptr == R.ptr &&
1301 // (L.adj == R.adj ||
1302 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1303 // The inequality tautologies have exactly the same structure, except
1304 // applying De Morgan's laws.
1305
1306 llvm::Value *LPtr = Builder.CreateExtractValue(Agg: L, Idxs: 0, Name: "lhs.memptr.ptr");
1307 llvm::Value *RPtr = Builder.CreateExtractValue(Agg: R, Idxs: 0, Name: "rhs.memptr.ptr");
1308
1309 // This condition tests whether L.ptr == R.ptr. This must always be
1310 // true for equality to hold.
1311 llvm::Value *PtrEq = Builder.CreateICmp(P: Eq, LHS: LPtr, RHS: RPtr, Name: "cmp.ptr");
1312
1313 // This condition, together with the assumption that L.ptr == R.ptr,
1314 // tests whether the pointers are both null. ARM imposes an extra
1315 // condition.
1316 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: LPtr->getType());
1317 llvm::Value *EqZero = Builder.CreateICmp(P: Eq, LHS: LPtr, RHS: Zero, Name: "cmp.ptr.null");
1318
1319 // This condition tests whether L.adj == R.adj. If this isn't
1320 // true, the pointers are unequal unless they're both null.
1321 llvm::Value *LAdj = Builder.CreateExtractValue(Agg: L, Idxs: 1, Name: "lhs.memptr.adj");
1322 llvm::Value *RAdj = Builder.CreateExtractValue(Agg: R, Idxs: 1, Name: "rhs.memptr.adj");
1323 llvm::Value *AdjEq = Builder.CreateICmp(P: Eq, LHS: LAdj, RHS: RAdj, Name: "cmp.adj");
1324
1325 // Null member function pointers on ARM clear the low bit of Adj,
1326 // so the zero condition has to check that neither low bit is set.
1327 if (UseARMMethodPtrABI) {
1328 llvm::Value *One = llvm::ConstantInt::get(Ty: LPtr->getType(), V: 1);
1329
1330 // Compute (l.adj | r.adj) & 1 and test it against zero.
1331 llvm::Value *OrAdj = Builder.CreateOr(LHS: LAdj, RHS: RAdj, Name: "or.adj");
1332 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(LHS: OrAdj, RHS: One);
1333 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(P: Eq, LHS: OrAdjAnd1, RHS: Zero,
1334 Name: "cmp.or.adj");
1335 EqZero = Builder.CreateBinOp(Opc: And, LHS: EqZero, RHS: OrAdjAnd1EqZero);
1336 }
1337
1338 // Tie together all our conditions.
1339 llvm::Value *Result = Builder.CreateBinOp(Opc: Or, LHS: EqZero, RHS: AdjEq);
1340 Result = Builder.CreateBinOp(Opc: And, LHS: PtrEq, RHS: Result,
1341 Name: Inequality ? "memptr.ne" : "memptr.eq");
1342 return Result;
1343}
1344
1345llvm::Value *
1346ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1347 llvm::Value *MemPtr,
1348 const MemberPointerType *MPT) {
1349 CGBuilderTy &Builder = CGF.Builder;
1350
1351 /// For member data pointers, this is just a check against -1.
1352 if (MPT->isMemberDataPointer()) {
1353 assert(MemPtr->getType() == CGM.PtrDiffTy);
1354 llvm::Value *NegativeOne =
1355 llvm::Constant::getAllOnesValue(Ty: MemPtr->getType());
1356 return Builder.CreateICmpNE(LHS: MemPtr, RHS: NegativeOne, Name: "memptr.tobool");
1357 }
1358
1359 // In Itanium, a member function pointer is not null if 'ptr' is not null.
1360 llvm::Value *Ptr = Builder.CreateExtractValue(Agg: MemPtr, Idxs: 0, Name: "memptr.ptr");
1361
1362 llvm::Constant *Zero = llvm::ConstantInt::get(Ty: Ptr->getType(), V: 0);
1363 llvm::Value *Result = Builder.CreateICmpNE(LHS: Ptr, RHS: Zero, Name: "memptr.tobool");
1364
1365 // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1366 // (the virtual bit) is set.
1367 if (UseARMMethodPtrABI) {
1368 llvm::Constant *One = llvm::ConstantInt::get(Ty: Ptr->getType(), V: 1);
1369 llvm::Value *Adj = Builder.CreateExtractValue(Agg: MemPtr, Idxs: 1, Name: "memptr.adj");
1370 llvm::Value *VirtualBit = Builder.CreateAnd(LHS: Adj, RHS: One, Name: "memptr.virtualbit");
1371 llvm::Value *IsVirtual = Builder.CreateICmpNE(LHS: VirtualBit, RHS: Zero,
1372 Name: "memptr.isvirtual");
1373 Result = Builder.CreateOr(LHS: Result, RHS: IsVirtual);
1374 }
1375
1376 return Result;
1377}
1378
1379bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1380 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1381 if (!RD)
1382 return false;
1383
1384 // If C++ prohibits us from making a copy, return by address.
1385 if (!RD->canPassInRegisters()) {
1386 auto Align = CGM.getContext().getTypeAlignInChars(T: FI.getReturnType());
1387 FI.getReturnInfo() = ABIArgInfo::getIndirect(
1388 Alignment: Align, /*AddrSpace=*/CGM.getDataLayout().getAllocaAddrSpace(),
1389 /*ByVal=*/false);
1390 return true;
1391 }
1392 return false;
1393}
1394
1395/// The Itanium ABI requires non-zero initialization only for data
1396/// member pointers, for which '0' is a valid offset.
1397bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1398 return MPT->isMemberFunctionPointer();
1399}
1400
1401/// The Itanium ABI always places an offset to the complete object
1402/// at entry -2 in the vtable.
1403void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1404 const CXXDeleteExpr *DE,
1405 Address Ptr,
1406 QualType ElementType,
1407 const CXXDestructorDecl *Dtor) {
1408 bool UseGlobalDelete = DE->isGlobalDelete();
1409 if (UseGlobalDelete) {
1410 // Derive the complete-object pointer, which is what we need
1411 // to pass to the deallocation function.
1412
1413 // Grab the vtable pointer as an intptr_t*.
1414 auto *ClassDecl = ElementType->castAsCXXRecordDecl();
1415 llvm::Value *VTable = CGF.GetVTablePtr(This: Ptr, VTableTy: CGF.DefaultPtrTy, VTableClass: ClassDecl);
1416
1417 // Track back to entry -2 and pull out the offset there.
1418 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1419 Ty: CGF.IntPtrTy, Ptr: VTable, Idx0: -2, Name: "complete-offset.ptr");
1420 llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(Ty: CGF.IntPtrTy, Addr: OffsetPtr,
1421 Align: CGF.getPointerAlign());
1422
1423 // Apply the offset.
1424 llvm::Value *CompletePtr = Ptr.emitRawPointer(CGF);
1425 CompletePtr =
1426 CGF.Builder.CreateInBoundsGEP(Ty: CGF.Int8Ty, Ptr: CompletePtr, IdxList: Offset);
1427
1428 // If we're supposed to call the global delete, make sure we do so
1429 // even if the destructor throws.
1430 CGF.pushCallObjectDeleteCleanup(OperatorDelete: DE->getOperatorDelete(), CompletePtr,
1431 ElementType);
1432 }
1433
1434 // FIXME: Provide a source location here even though there's no
1435 // CXXMemberCallExpr for dtor call.
1436 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1437 EmitVirtualDestructorCall(CGF, Dtor, DtorType, This: Ptr, E: DE,
1438 /*CallOrInvoke=*/nullptr);
1439
1440 if (UseGlobalDelete)
1441 CGF.PopCleanupBlock();
1442}
1443
1444void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1445 // void __cxa_rethrow();
1446
1447 llvm::FunctionType *FTy =
1448 llvm::FunctionType::get(Result: CGM.VoidTy, /*isVarArg=*/false);
1449
1450 llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_rethrow");
1451
1452 if (isNoReturn)
1453 CGF.EmitNoreturnRuntimeCallOrInvoke(callee: Fn, args: {});
1454 else
1455 CGF.EmitRuntimeCallOrInvoke(callee: Fn);
1456}
1457
1458static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1459 // void *__cxa_allocate_exception(size_t thrown_size);
1460
1461 llvm::FunctionType *FTy =
1462 llvm::FunctionType::get(Result: CGM.Int8PtrTy, Params: CGM.SizeTy, /*isVarArg=*/false);
1463
1464 return CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_allocate_exception");
1465}
1466
1467static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1468 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1469 // void (*dest) (void *));
1470
1471 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.GlobalsInt8PtrTy, CGM.Int8PtrTy };
1472 llvm::FunctionType *FTy =
1473 llvm::FunctionType::get(Result: CGM.VoidTy, Params: Args, /*isVarArg=*/false);
1474
1475 return CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_throw");
1476}
1477
1478void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1479 QualType ThrowType = E->getSubExpr()->getType();
1480 // Now allocate the exception object.
1481 llvm::Type *SizeTy = CGF.ConvertType(T: getContext().getSizeType());
1482 uint64_t TypeSize = getContext().getTypeSizeInChars(T: ThrowType).getQuantity();
1483
1484 llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1485 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1486 callee: AllocExceptionFn, args: llvm::ConstantInt::get(Ty: SizeTy, V: TypeSize), name: "exception");
1487
1488 CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1489 CGF.EmitAnyExprToExn(
1490 E: E->getSubExpr(), Addr: Address(ExceptionPtr, CGM.Int8Ty, ExnAlign));
1491
1492 // Now throw the exception.
1493 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(Ty: ThrowType,
1494 /*ForEH=*/true);
1495
1496 // The address of the destructor. If the exception type has a
1497 // trivial destructor (or isn't a record), we just pass null.
1498 llvm::Constant *Dtor = nullptr;
1499 if (const auto *Record = ThrowType->getAsCXXRecordDecl();
1500 Record && !Record->hasTrivialDestructor()) {
1501 // __cxa_throw is declared to take its destructor as void (*)(void *). We
1502 // must match that if function pointers can be authenticated with a
1503 // discriminator based on their type.
1504 const ASTContext &Ctx = getContext();
1505 QualType DtorTy = Ctx.getFunctionType(ResultTy: Ctx.VoidTy, Args: {Ctx.VoidPtrTy},
1506 EPI: FunctionProtoType::ExtProtoInfo());
1507
1508 CXXDestructorDecl *DtorD = Record->getDestructor();
1509 Dtor = CGM.getAddrOfCXXStructor(GD: GlobalDecl(DtorD, Dtor_Complete));
1510 Dtor = CGM.getFunctionPointer(Pointer: Dtor, FunctionType: DtorTy);
1511 }
1512 if (!Dtor) Dtor = llvm::Constant::getNullValue(Ty: CGM.Int8PtrTy);
1513
1514 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1515 CGF.EmitNoreturnRuntimeCallOrInvoke(callee: getThrowFn(CGM), args);
1516}
1517
1518static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1519 // void *__dynamic_cast(const void *sub,
1520 // global_as const abi::__class_type_info *src,
1521 // global_as const abi::__class_type_info *dst,
1522 // std::ptrdiff_t src2dst_offset);
1523
1524 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1525 llvm::Type *GlobInt8PtrTy = CGF.GlobalsInt8PtrTy;
1526 llvm::Type *PtrDiffTy =
1527 CGF.ConvertType(T: CGF.getContext().getPointerDiffType());
1528
1529 llvm::Type *Args[4] = { Int8PtrTy, GlobInt8PtrTy, GlobInt8PtrTy, PtrDiffTy };
1530
1531 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: Int8PtrTy, Params: Args, isVarArg: false);
1532
1533 // Mark the function as nounwind willreturn readonly.
1534 llvm::AttrBuilder FuncAttrs(CGF.getLLVMContext());
1535 FuncAttrs.addAttribute(Val: llvm::Attribute::NoUnwind);
1536 FuncAttrs.addAttribute(Val: llvm::Attribute::WillReturn);
1537 FuncAttrs.addMemoryAttr(ME: llvm::MemoryEffects::readOnly());
1538 llvm::AttributeList Attrs = llvm::AttributeList::get(
1539 C&: CGF.getLLVMContext(), Index: llvm::AttributeList::FunctionIndex, B: FuncAttrs);
1540
1541 return CGF.CGM.CreateRuntimeFunction(Ty: FTy, Name: "__dynamic_cast", ExtraAttrs: Attrs);
1542}
1543
1544static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1545 // void __cxa_bad_cast();
1546 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: CGF.VoidTy, isVarArg: false);
1547 return CGF.CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_bad_cast");
1548}
1549
1550/// Compute the src2dst_offset hint as described in the
1551/// Itanium C++ ABI [2.9.7]
1552static CharUnits computeOffsetHint(ASTContext &Context,
1553 const CXXRecordDecl *Src,
1554 const CXXRecordDecl *Dst) {
1555 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1556 /*DetectVirtual=*/false);
1557
1558 // If Dst is not derived from Src we can skip the whole computation below and
1559 // return that Src is not a public base of Dst. Record all inheritance paths.
1560 if (!Dst->isDerivedFrom(Base: Src, Paths))
1561 return CharUnits::fromQuantity(Quantity: -2ULL);
1562
1563 unsigned NumPublicPaths = 0;
1564 CharUnits Offset;
1565
1566 // Now walk all possible inheritance paths.
1567 for (const CXXBasePath &Path : Paths) {
1568 if (Path.Access != AS_public) // Ignore non-public inheritance.
1569 continue;
1570
1571 ++NumPublicPaths;
1572
1573 for (const CXXBasePathElement &PathElement : Path) {
1574 // If the path contains a virtual base class we can't give any hint.
1575 // -1: no hint.
1576 if (PathElement.Base->isVirtual())
1577 return CharUnits::fromQuantity(Quantity: -1ULL);
1578
1579 if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1580 continue;
1581
1582 // Accumulate the base class offsets.
1583 const ASTRecordLayout &L = Context.getASTRecordLayout(D: PathElement.Class);
1584 Offset += L.getBaseClassOffset(
1585 Base: PathElement.Base->getType()->getAsCXXRecordDecl());
1586 }
1587 }
1588
1589 // -2: Src is not a public base of Dst.
1590 if (NumPublicPaths == 0)
1591 return CharUnits::fromQuantity(Quantity: -2ULL);
1592
1593 // -3: Src is a multiple public base type but never a virtual base type.
1594 if (NumPublicPaths > 1)
1595 return CharUnits::fromQuantity(Quantity: -3ULL);
1596
1597 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1598 // Return the offset of Src from the origin of Dst.
1599 return Offset;
1600}
1601
1602static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1603 // void __cxa_bad_typeid();
1604 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: CGF.VoidTy, isVarArg: false);
1605
1606 return CGF.CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_bad_typeid");
1607}
1608
1609bool ItaniumCXXABI::shouldTypeidBeNullChecked(QualType SrcRecordTy) {
1610 return true;
1611}
1612
1613void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1614 llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1615 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(callee: Fn);
1616 Call->setDoesNotReturn();
1617 CGF.Builder.CreateUnreachable();
1618}
1619
1620llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1621 QualType SrcRecordTy,
1622 Address ThisPtr,
1623 llvm::Type *StdTypeInfoPtrTy) {
1624 auto *ClassDecl = SrcRecordTy->castAsCXXRecordDecl();
1625 llvm::Value *Value = CGF.GetVTablePtr(This: ThisPtr, VTableTy: CGM.GlobalsInt8PtrTy,
1626 VTableClass: ClassDecl);
1627
1628 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1629 // Load the type info.
1630 Value = CGF.Builder.CreateCall(
1631 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::load_relative, Tys: {CGM.Int32Ty}),
1632 Args: {Value, llvm::ConstantInt::getSigned(Ty: CGM.Int32Ty, V: -4)});
1633 } else {
1634 // Load the type info.
1635 Value =
1636 CGF.Builder.CreateConstInBoundsGEP1_64(Ty: StdTypeInfoPtrTy, Ptr: Value, Idx0: -1ULL);
1637 }
1638 return CGF.Builder.CreateAlignedLoad(Ty: StdTypeInfoPtrTy, Addr: Value,
1639 Align: CGF.getPointerAlign());
1640}
1641
1642bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1643 QualType SrcRecordTy) {
1644 return SrcIsPtr;
1645}
1646
1647llvm::Value *ItaniumCXXABI::emitDynamicCastCall(
1648 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1649 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1650 llvm::Type *PtrDiffLTy =
1651 CGF.ConvertType(T: CGF.getContext().getPointerDiffType());
1652
1653 llvm::Value *SrcRTTI =
1654 CGF.CGM.GetAddrOfRTTIDescriptor(Ty: SrcRecordTy.getUnqualifiedType());
1655 llvm::Value *DestRTTI =
1656 CGF.CGM.GetAddrOfRTTIDescriptor(Ty: DestRecordTy.getUnqualifiedType());
1657
1658 // Compute the offset hint.
1659 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1660 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1661 llvm::Value *OffsetHint = llvm::ConstantInt::getSigned(
1662 Ty: PtrDiffLTy,
1663 V: computeOffsetHint(Context&: CGF.getContext(), Src: SrcDecl, Dst: DestDecl).getQuantity());
1664
1665 // Emit the call to __dynamic_cast.
1666 llvm::Value *Value = ThisAddr.emitRawPointer(CGF);
1667 if (CGM.getCodeGenOpts().PointerAuth.CXXVTablePointers) {
1668 // We perform a no-op load of the vtable pointer here to force an
1669 // authentication. In environments that do not support pointer
1670 // authentication this is a an actual no-op that will be elided. When
1671 // pointer authentication is supported and enforced on vtable pointers this
1672 // load can trap.
1673 llvm::Value *Vtable =
1674 CGF.GetVTablePtr(This: ThisAddr, VTableTy: CGM.Int8PtrTy, VTableClass: SrcDecl,
1675 AuthMode: CodeGenFunction::VTableAuthMode::MustTrap);
1676 assert(Vtable);
1677 (void)Vtable;
1678 }
1679
1680 llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1681 Value = CGF.EmitNounwindRuntimeCall(callee: getItaniumDynamicCastFn(CGF), args);
1682
1683 /// C++ [expr.dynamic.cast]p9:
1684 /// A failed cast to reference type throws std::bad_cast
1685 if (DestTy->isReferenceType()) {
1686 llvm::BasicBlock *BadCastBlock =
1687 CGF.createBasicBlock(name: "dynamic_cast.bad_cast");
1688
1689 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Arg: Value);
1690 CGF.Builder.CreateCondBr(Cond: IsNull, True: BadCastBlock, False: CastEnd);
1691
1692 CGF.EmitBlock(BB: BadCastBlock);
1693 EmitBadCastCall(CGF);
1694 }
1695
1696 return Value;
1697}
1698
1699std::optional<CGCXXABI::ExactDynamicCastInfo>
1700ItaniumCXXABI::getExactDynamicCastInfo(QualType SrcRecordTy, QualType DestTy,
1701 QualType DestRecordTy) {
1702 assert(shouldEmitExactDynamicCast(DestRecordTy));
1703
1704 ASTContext &Context = getContext();
1705
1706 // Find all the inheritance paths.
1707 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1708 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1709 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1710 /*DetectVirtual=*/false);
1711 (void)DestDecl->isDerivedFrom(Base: SrcDecl, Paths);
1712
1713 // Find an offset within `DestDecl` where a `SrcDecl` instance and its vptr
1714 // might appear.
1715 std::optional<CharUnits> Offset;
1716 for (const CXXBasePath &Path : Paths) {
1717 // dynamic_cast only finds public inheritance paths.
1718 if (Path.Access != AS_public)
1719 continue;
1720
1721 CharUnits PathOffset;
1722 for (const CXXBasePathElement &PathElement : Path) {
1723 // Find the offset along this inheritance step.
1724 const CXXRecordDecl *Base =
1725 PathElement.Base->getType()->getAsCXXRecordDecl();
1726 if (PathElement.Base->isVirtual()) {
1727 // For a virtual base class, we know that the derived class is exactly
1728 // DestDecl, so we can use the vbase offset from its layout.
1729 const ASTRecordLayout &L = Context.getASTRecordLayout(D: DestDecl);
1730 PathOffset = L.getVBaseClassOffset(VBase: Base);
1731 } else {
1732 const ASTRecordLayout &L =
1733 Context.getASTRecordLayout(D: PathElement.Class);
1734 PathOffset += L.getBaseClassOffset(Base);
1735 }
1736 }
1737
1738 if (!Offset)
1739 Offset = PathOffset;
1740 else if (Offset != PathOffset) {
1741 // Base appears in at least two different places.
1742 return ExactDynamicCastInfo{/*RequiresCastToPrimaryBase=*/true,
1743 .Offset: CharUnits::Zero()};
1744 }
1745 }
1746 if (!Offset)
1747 return std::nullopt;
1748 return ExactDynamicCastInfo{/*RequiresCastToPrimaryBase=*/false, .Offset: *Offset};
1749}
1750
1751llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
1752 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1753 QualType DestTy, QualType DestRecordTy,
1754 const ExactDynamicCastInfo &ExactCastInfo, llvm::BasicBlock *CastSuccess,
1755 llvm::BasicBlock *CastFail) {
1756 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1757 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1758 auto AuthenticateVTable = [&](Address ThisAddr, const CXXRecordDecl *Decl) {
1759 if (!CGF.getLangOpts().PointerAuthCalls)
1760 return;
1761 (void)CGF.GetVTablePtr(This: ThisAddr, VTableTy: CGF.DefaultPtrTy, VTableClass: Decl,
1762 AuthMode: CodeGenFunction::VTableAuthMode::MustTrap);
1763 };
1764
1765 bool PerformPostCastAuthentication = false;
1766 llvm::Value *VTable = nullptr;
1767 if (ExactCastInfo.RequiresCastToPrimaryBase) {
1768 // Base appears in at least two different places. Find the most-derived
1769 // object and see if it's a DestDecl. Note that the most-derived object
1770 // must be at least as aligned as this base class subobject, and must
1771 // have a vptr at offset 0.
1772 llvm::Value *PrimaryBase =
1773 emitDynamicCastToVoid(CGF, Value: ThisAddr, SrcRecordTy);
1774 ThisAddr = Address(PrimaryBase, CGF.VoidPtrTy, ThisAddr.getAlignment());
1775 SrcDecl = DestDecl;
1776 // This unauthenticated load is unavoidable, so we're relying on the
1777 // authenticated load in the dynamic cast to void, and we'll manually
1778 // authenticate the resulting v-table at the end of the cast check.
1779 PerformPostCastAuthentication = CGF.getLangOpts().PointerAuthCalls;
1780 CGPointerAuthInfo StrippingAuthInfo(0, PointerAuthenticationMode::Strip,
1781 false, false, nullptr);
1782 Address VTablePtrPtr = ThisAddr.withElementType(ElemTy: CGF.VoidPtrPtrTy);
1783 VTable = CGF.Builder.CreateLoad(Addr: VTablePtrPtr, Name: "vtable");
1784 if (PerformPostCastAuthentication)
1785 VTable = CGF.EmitPointerAuthAuth(Info: StrippingAuthInfo, Pointer: VTable);
1786 } else
1787 VTable = CGF.GetVTablePtr(This: ThisAddr, VTableTy: CGF.DefaultPtrTy, VTableClass: SrcDecl);
1788
1789 // Compare the vptr against the expected vptr for the destination type at
1790 // this offset.
1791 llvm::Constant *ExpectedVTable = getVTableAddressPoint(
1792 Base: BaseSubobject(SrcDecl, ExactCastInfo.Offset), VTableClass: DestDecl);
1793 llvm::Value *Success = CGF.Builder.CreateICmpEQ(LHS: VTable, RHS: ExpectedVTable);
1794 llvm::Value *AdjustedThisPtr = ThisAddr.emitRawPointer(CGF);
1795
1796 if (!ExactCastInfo.Offset.isZero()) {
1797 CharUnits::QuantityType Offset = ExactCastInfo.Offset.getQuantity();
1798 llvm::Constant *OffsetConstant =
1799 llvm::ConstantInt::get(Ty: CGF.PtrDiffTy, V: -Offset);
1800 AdjustedThisPtr = CGF.Builder.CreateInBoundsGEP(Ty: CGF.CharTy, Ptr: AdjustedThisPtr,
1801 IdxList: OffsetConstant);
1802 PerformPostCastAuthentication = CGF.getLangOpts().PointerAuthCalls;
1803 }
1804
1805 if (PerformPostCastAuthentication) {
1806 // If we've changed the object pointer we authenticate the vtable pointer
1807 // of the resulting object.
1808 llvm::BasicBlock *NonNullBlock = CGF.Builder.GetInsertBlock();
1809 llvm::BasicBlock *PostCastAuthSuccess =
1810 CGF.createBasicBlock(name: "dynamic_cast.postauth.success");
1811 llvm::BasicBlock *PostCastAuthComplete =
1812 CGF.createBasicBlock(name: "dynamic_cast.postauth.complete");
1813 CGF.Builder.CreateCondBr(Cond: Success, True: PostCastAuthSuccess,
1814 False: PostCastAuthComplete);
1815 CGF.EmitBlock(BB: PostCastAuthSuccess);
1816 Address AdjustedThisAddr =
1817 Address(AdjustedThisPtr, CGF.IntPtrTy, CGF.getPointerAlign());
1818 AuthenticateVTable(AdjustedThisAddr, DestDecl);
1819 CGF.EmitBranch(Block: PostCastAuthComplete);
1820 CGF.EmitBlock(BB: PostCastAuthComplete);
1821 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Ty: AdjustedThisPtr->getType(), NumReservedValues: 2);
1822 PHI->addIncoming(V: AdjustedThisPtr, BB: PostCastAuthSuccess);
1823 llvm::Value *NullValue =
1824 llvm::Constant::getNullValue(Ty: AdjustedThisPtr->getType());
1825 PHI->addIncoming(V: NullValue, BB: NonNullBlock);
1826 AdjustedThisPtr = PHI;
1827 }
1828 CGF.Builder.CreateCondBr(Cond: Success, True: CastSuccess, False: CastFail);
1829 return AdjustedThisPtr;
1830}
1831
1832llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
1833 Address ThisAddr,
1834 QualType SrcRecordTy) {
1835 auto *ClassDecl = SrcRecordTy->castAsCXXRecordDecl();
1836 llvm::Value *OffsetToTop;
1837 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1838 // Get the vtable pointer.
1839 llvm::Value *VTable =
1840 CGF.GetVTablePtr(This: ThisAddr, VTableTy: CGF.DefaultPtrTy, VTableClass: ClassDecl);
1841
1842 // Get the offset-to-top from the vtable.
1843 OffsetToTop =
1844 CGF.Builder.CreateConstInBoundsGEP1_32(Ty: CGM.Int32Ty, Ptr: VTable, Idx0: -2U);
1845 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1846 Ty: CGM.Int32Ty, Addr: OffsetToTop, Align: CharUnits::fromQuantity(Quantity: 4), Name: "offset.to.top");
1847 } else {
1848 llvm::Type *PtrDiffLTy =
1849 CGF.ConvertType(T: CGF.getContext().getPointerDiffType());
1850
1851 // Get the vtable pointer.
1852 llvm::Value *VTable =
1853 CGF.GetVTablePtr(This: ThisAddr, VTableTy: CGF.DefaultPtrTy, VTableClass: ClassDecl);
1854
1855 // Get the offset-to-top from the vtable.
1856 OffsetToTop =
1857 CGF.Builder.CreateConstInBoundsGEP1_64(Ty: PtrDiffLTy, Ptr: VTable, Idx0: -2ULL);
1858 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1859 Ty: PtrDiffLTy, Addr: OffsetToTop, Align: CGF.getPointerAlign(), Name: "offset.to.top");
1860 }
1861 // Finally, add the offset to the pointer.
1862 return CGF.Builder.CreateInBoundsGEP(Ty: CGF.Int8Ty, Ptr: ThisAddr.emitRawPointer(CGF),
1863 IdxList: OffsetToTop);
1864}
1865
1866bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1867 llvm::FunctionCallee Fn = getBadCastFn(CGF);
1868 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(callee: Fn);
1869 Call->setDoesNotReturn();
1870 CGF.Builder.CreateUnreachable();
1871 return true;
1872}
1873
1874llvm::Value *
1875ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1876 Address This,
1877 const CXXRecordDecl *ClassDecl,
1878 const CXXRecordDecl *BaseClassDecl) {
1879 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, VTableTy: CGM.Int8PtrTy, VTableClass: ClassDecl);
1880 CharUnits VBaseOffsetOffset =
1881 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD: ClassDecl,
1882 VBase: BaseClassDecl);
1883 llvm::Value *VBaseOffsetPtr =
1884 CGF.Builder.CreateConstGEP1_64(
1885 Ty: CGF.Int8Ty, Ptr: VTablePtr, Idx0: VBaseOffsetOffset.getQuantity(),
1886 Name: "vbase.offset.ptr");
1887
1888 llvm::Value *VBaseOffset;
1889 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1890 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1891 Ty: CGF.Int32Ty, Addr: VBaseOffsetPtr, Align: CharUnits::fromQuantity(Quantity: 4),
1892 Name: "vbase.offset");
1893 } else {
1894 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1895 Ty: CGM.PtrDiffTy, Addr: VBaseOffsetPtr, Align: CGF.getPointerAlign(), Name: "vbase.offset");
1896 }
1897 return VBaseOffset;
1898}
1899
1900void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1901 // Just make sure we're in sync with TargetCXXABI.
1902 assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1903
1904 // The constructor used for constructing this as a base class;
1905 // ignores virtual bases.
1906 CGM.EmitGlobal(D: GlobalDecl(D, Ctor_Base));
1907
1908 // The constructor used for constructing this as a complete class;
1909 // constructs the virtual bases, then calls the base constructor.
1910 if (!D->getParent()->isAbstract()) {
1911 // We don't need to emit the complete ctor if the class is abstract.
1912 CGM.EmitGlobal(D: GlobalDecl(D, Ctor_Complete));
1913 }
1914}
1915
1916CGCXXABI::AddedStructorArgCounts
1917ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1918 SmallVectorImpl<CanQualType> &ArgTys) {
1919 ASTContext &Context = getContext();
1920
1921 // All parameters are already in place except VTT, which goes after 'this'.
1922 // These are Clang types, so we don't need to worry about sret yet.
1923
1924 // Check if we need to add a VTT parameter (which has type global void **).
1925 if ((isa<CXXConstructorDecl>(Val: GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1926 : GD.getDtorType() == Dtor_Base) &&
1927 cast<CXXMethodDecl>(Val: GD.getDecl())->getParent()->getNumVBases() != 0) {
1928 LangAS AS = CGM.GetGlobalVarAddressSpace(D: nullptr);
1929 QualType Q = Context.getAddrSpaceQualType(T: Context.VoidPtrTy, AddressSpace: AS);
1930 ArgTys.insert(I: ArgTys.begin() + 1,
1931 Elt: Context.getPointerType(T: CanQualType::CreateUnsafe(Other: Q)));
1932 return AddedStructorArgCounts::prefix(N: 1);
1933 }
1934 return AddedStructorArgCounts{};
1935}
1936
1937void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1938 // The destructor used for destructing this as a base class; ignores
1939 // virtual bases.
1940 CGM.EmitGlobal(D: GlobalDecl(D, Dtor_Base));
1941
1942 // The destructor used for destructing this as a most-derived class;
1943 // call the base destructor and then destructs any virtual bases.
1944 CGM.EmitGlobal(D: GlobalDecl(D, Dtor_Complete));
1945
1946 // The destructor in a virtual table is always a 'deleting'
1947 // destructor, which calls the complete destructor and then uses the
1948 // appropriate operator delete.
1949 if (D->isVirtual())
1950 CGM.EmitGlobal(D: GlobalDecl(D, Dtor_Deleting));
1951}
1952
1953void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1954 QualType &ResTy,
1955 FunctionArgList &Params) {
1956 const CXXMethodDecl *MD = cast<CXXMethodDecl>(Val: CGF.CurGD.getDecl());
1957 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1958
1959 // Check if we need a VTT parameter as well.
1960 if (NeedsVTTParameter(GD: CGF.CurGD)) {
1961 ASTContext &Context = getContext();
1962
1963 // FIXME: avoid the fake decl
1964 LangAS AS = CGM.GetGlobalVarAddressSpace(D: nullptr);
1965 QualType Q = Context.getAddrSpaceQualType(T: Context.VoidPtrTy, AddressSpace: AS);
1966 QualType T = Context.getPointerType(T: Q);
1967 auto *VTTDecl = ImplicitParamDecl::Create(
1968 C&: Context, /*DC=*/nullptr, IdLoc: MD->getLocation(), Id: &Context.Idents.get(Name: "vtt"),
1969 T, ParamKind: ImplicitParamKind::CXXVTT);
1970 Params.insert(I: Params.begin() + 1, Elt: VTTDecl);
1971 getStructorImplicitParamDecl(CGF) = VTTDecl;
1972 }
1973}
1974
1975void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1976 // Naked functions have no prolog.
1977 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1978 return;
1979
1980 /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1981 /// adjustments are required, because they are all handled by thunks.
1982 setCXXABIThisValue(CGF, ThisPtr: loadIncomingCXXThis(CGF));
1983
1984 /// Initialize the 'vtt' slot if needed.
1985 if (getStructorImplicitParamDecl(CGF)) {
1986 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1987 Addr: CGF.GetAddrOfLocalVar(VD: getStructorImplicitParamDecl(CGF)), Name: "vtt");
1988 }
1989
1990 /// If this is a function that the ABI specifies returns 'this', initialize
1991 /// the return slot to 'this' at the start of the function.
1992 ///
1993 /// Unlike the setting of return types, this is done within the ABI
1994 /// implementation instead of by clients of CGCXXABI because:
1995 /// 1) getThisValue is currently protected
1996 /// 2) in theory, an ABI could implement 'this' returns some other way;
1997 /// HasThisReturn only specifies a contract, not the implementation
1998 if (HasThisReturn(GD: CGF.CurGD))
1999 CGF.Builder.CreateStore(Val: getThisValue(CGF), Addr: CGF.ReturnValue);
2000}
2001
2002CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
2003 CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
2004 bool ForVirtualBase, bool Delegating) {
2005 if (!NeedsVTTParameter(GD: GlobalDecl(D, Type)))
2006 return AddedStructorArgs{};
2007
2008 // Insert the implicit 'vtt' argument as the second argument. Make sure to
2009 // correctly reflect its address space, which can differ from generic on
2010 // some targets.
2011 llvm::Value *VTT =
2012 CGF.GetVTTParameter(GD: GlobalDecl(D, Type), ForVirtualBase, Delegating);
2013 LangAS AS = CGM.GetGlobalVarAddressSpace(D: nullptr);
2014 QualType Q = getContext().getAddrSpaceQualType(T: getContext().VoidPtrTy, AddressSpace: AS);
2015 QualType VTTTy = getContext().getPointerType(T: Q);
2016 return AddedStructorArgs::prefix(Args: {{.Value: VTT, .Type: VTTTy}});
2017}
2018
2019llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
2020 CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
2021 bool ForVirtualBase, bool Delegating) {
2022 GlobalDecl GD(DD, Type);
2023 return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
2024}
2025
2026void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
2027 const CXXDestructorDecl *DD,
2028 CXXDtorType Type, bool ForVirtualBase,
2029 bool Delegating, Address This,
2030 QualType ThisTy) {
2031 GlobalDecl GD(DD, Type);
2032 llvm::Value *VTT =
2033 getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
2034 QualType VTTTy = getContext().getPointerType(T: getContext().VoidPtrTy);
2035
2036 CGCallee Callee;
2037 if (getContext().getLangOpts().AppleKext &&
2038 Type != Dtor_Base && DD->isVirtual())
2039 Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, RD: DD->getParent());
2040 else
2041 Callee = CGCallee::forDirect(functionPtr: CGM.getAddrOfCXXStructor(GD), abstractInfo: GD);
2042
2043 CGF.EmitCXXDestructorCall(Dtor: GD, Callee, This: CGF.getAsNaturalPointerTo(Addr: This, PointeeType: ThisTy),
2044 ThisTy, ImplicitParam: VTT, ImplicitParamTy: VTTTy, E: nullptr);
2045}
2046
2047// Check if any non-inline method has the specified attribute.
2048template <typename T>
2049static bool CXXRecordNonInlineHasAttr(const CXXRecordDecl *RD) {
2050 for (const auto *D : RD->noload_decls()) {
2051 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
2052 if (FD->isInlined() || FD->doesThisDeclarationHaveABody() ||
2053 FD->isPureVirtual())
2054 continue;
2055 if (D->hasAttr<T>())
2056 return true;
2057 }
2058 }
2059
2060 return false;
2061}
2062
2063static void setVTableSelectiveDLLImportExport(CodeGenModule &CGM,
2064 llvm::GlobalVariable *VTable,
2065 const CXXRecordDecl *RD) {
2066 if (VTable->getDLLStorageClass() !=
2067 llvm::GlobalVariable::DefaultStorageClass ||
2068 RD->hasAttr<DLLImportAttr>() || RD->hasAttr<DLLExportAttr>())
2069 return;
2070
2071 if (CGM.getVTables().isVTableExternal(RD)) {
2072 if (CXXRecordNonInlineHasAttr<DLLImportAttr>(RD))
2073 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
2074 } else if (CXXRecordNonInlineHasAttr<DLLExportAttr>(RD))
2075 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
2076}
2077
2078void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
2079 const CXXRecordDecl *RD) {
2080 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, VPtrOffset: CharUnits());
2081 if (VTable->hasInitializer())
2082 return;
2083
2084 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
2085 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
2086 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
2087 llvm::Constant *RTTI =
2088 CGM.GetAddrOfRTTIDescriptor(Ty: CGM.getContext().getCanonicalTagType(TD: RD));
2089
2090 // Create and set the initializer.
2091 ConstantInitBuilder builder(CGM);
2092 auto components = builder.beginStruct();
2093 CGVT.createVTableInitializer(builder&: components, layout: VTLayout, rtti: RTTI,
2094 vtableHasLocalLinkage: llvm::GlobalValue::isLocalLinkage(Linkage));
2095 components.finishAndSetAsInitializer(global: VTable);
2096
2097 // Set the correct linkage.
2098 VTable->setLinkage(Linkage);
2099
2100 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
2101 VTable->setComdat(CGM.getModule().getOrInsertComdat(Name: VTable->getName()));
2102
2103 if (CGM.getTarget().hasPS4DLLImportExport())
2104 setVTableSelectiveDLLImportExport(CGM, VTable, RD);
2105
2106 // Set the right visibility.
2107 CGM.setGVProperties(GV: VTable, D: RD);
2108
2109 // If this is the magic class __cxxabiv1::__fundamental_type_info,
2110 // we will emit the typeinfo for the fundamental types. This is the
2111 // same behaviour as GCC.
2112 const DeclContext *DC = RD->getDeclContext();
2113 if (RD->getIdentifier() &&
2114 RD->getIdentifier()->isStr(Str: "__fundamental_type_info") &&
2115 isa<NamespaceDecl>(Val: DC) && cast<NamespaceDecl>(Val: DC)->getIdentifier() &&
2116 cast<NamespaceDecl>(Val: DC)->getIdentifier()->isStr(Str: "__cxxabiv1") &&
2117 DC->getParent()->isTranslationUnit())
2118 EmitFundamentalRTTIDescriptors(RD);
2119
2120 // Always emit type metadata on non-available_externally definitions, and on
2121 // available_externally definitions if we are performing whole program
2122 // devirtualization or speculative devirtualization. We need the type metadata
2123 // on all vtable definitions to ensure we associate derived classes with base
2124 // classes defined in headers but with a strong definition only in a shared
2125 // library.
2126 if (!VTable->isDeclarationForLinker() ||
2127 CGM.getCodeGenOpts().WholeProgramVTables ||
2128 CGM.getCodeGenOpts().DevirtualizeSpeculatively) {
2129 CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
2130 // For available_externally definitions, add the vtable to
2131 // @llvm.compiler.used so that it isn't deleted before whole program
2132 // analysis.
2133 if (VTable->isDeclarationForLinker()) {
2134 assert(CGM.getCodeGenOpts().WholeProgramVTables ||
2135 CGM.getCodeGenOpts().DevirtualizeSpeculatively);
2136 CGM.addCompilerUsedGlobal(GV: VTable);
2137 }
2138 }
2139
2140 if (VTContext.isRelativeLayout()) {
2141 CGVT.RemoveHwasanMetadata(GV: VTable);
2142 if (!VTable->isDSOLocal())
2143 CGVT.GenerateRelativeVTableAlias(VTable, AliasNameRef: VTable->getName());
2144 }
2145
2146 // Emit symbol for debugger only if requested debug info.
2147 if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
2148 DI->emitVTableSymbol(VTable, RD);
2149}
2150
2151bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
2152 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
2153 if (Vptr.NearestVBase == nullptr)
2154 return false;
2155 return NeedsVTTParameter(GD: CGF.CurGD);
2156}
2157
2158llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
2159 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
2160 const CXXRecordDecl *NearestVBase) {
2161
2162 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
2163 NeedsVTTParameter(GD: CGF.CurGD)) {
2164 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
2165 NearestVBase);
2166 }
2167 return getVTableAddressPoint(Base, VTableClass);
2168}
2169
2170llvm::Constant *
2171ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
2172 const CXXRecordDecl *VTableClass) {
2173 llvm::GlobalValue *VTable = getAddrOfVTable(RD: VTableClass, VPtrOffset: CharUnits());
2174
2175 // Find the appropriate vtable within the vtable group, and the address point
2176 // within that vtable.
2177 const VTableLayout &Layout =
2178 CGM.getItaniumVTableContext().getVTableLayout(RD: VTableClass);
2179 VTableLayout::AddressPointLocation AddressPoint =
2180 Layout.getAddressPoint(Base);
2181 llvm::Value *Indices[] = {
2182 llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: 0),
2183 llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: AddressPoint.VTableIndex),
2184 llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: AddressPoint.AddressPointIndex),
2185 };
2186
2187 // Add inrange attribute to indicate that only the VTableIndex can be
2188 // accessed.
2189 unsigned ComponentSize =
2190 CGM.getDataLayout().getTypeAllocSize(Ty: CGM.getVTableComponentType());
2191 unsigned VTableSize =
2192 ComponentSize * Layout.getVTableSize(i: AddressPoint.VTableIndex);
2193 unsigned Offset = ComponentSize * AddressPoint.AddressPointIndex;
2194 llvm::ConstantRange InRange(
2195 llvm::APInt(32, (int)-Offset, true),
2196 llvm::APInt(32, (int)(VTableSize - Offset), true));
2197 return llvm::ConstantExpr::getGetElementPtr(
2198 Ty: VTable->getValueType(), C: VTable, IdxList: Indices, /*InBounds=*/NW: true, InRange);
2199}
2200
2201llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
2202 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
2203 const CXXRecordDecl *NearestVBase) {
2204 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
2205 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
2206
2207 // Get the secondary vpointer index.
2208 uint64_t VirtualPointerIndex =
2209 CGM.getVTables().getSecondaryVirtualPointerIndex(RD: VTableClass, Base);
2210
2211 /// Load the VTT.
2212 llvm::Value *VTT = CGF.LoadCXXVTT();
2213 if (VirtualPointerIndex)
2214 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(Ty: CGF.GlobalsVoidPtrTy, Ptr: VTT,
2215 Idx0: VirtualPointerIndex);
2216
2217 // And load the address point from the VTT.
2218 llvm::Value *AP =
2219 CGF.Builder.CreateAlignedLoad(Ty: CGF.GlobalsVoidPtrTy, Addr: VTT,
2220 Align: CGF.getPointerAlign());
2221
2222 if (auto &Schema = CGF.CGM.getCodeGenOpts().PointerAuth.CXXVTTVTablePointers) {
2223 CGPointerAuthInfo PointerAuth = CGF.EmitPointerAuthInfo(Schema, StorageAddress: VTT,
2224 SchemaDecl: GlobalDecl(),
2225 SchemaType: QualType());
2226 AP = CGF.EmitPointerAuthAuth(Info: PointerAuth, Pointer: AP);
2227 }
2228
2229 return AP;
2230}
2231
2232llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
2233 CharUnits VPtrOffset) {
2234 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
2235
2236 llvm::GlobalVariable *&VTable = VTables[RD];
2237 if (VTable)
2238 return VTable;
2239
2240 // Queue up this vtable for possible deferred emission.
2241 CGM.addDeferredVTable(RD);
2242
2243 SmallString<256> Name;
2244 llvm::raw_svector_ostream Out(Name);
2245 getMangleContext().mangleCXXVTable(RD, Out);
2246
2247 const VTableLayout &VTLayout =
2248 CGM.getItaniumVTableContext().getVTableLayout(RD);
2249 llvm::Type *VTableType = CGM.getVTables().getVTableType(layout: VTLayout);
2250
2251 // Use pointer to global alignment for the vtable. Otherwise we would align
2252 // them based on the size of the initializer which doesn't make sense as only
2253 // single values are read.
2254 unsigned PAlign = CGM.getVtableGlobalVarAlignment();
2255
2256 VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
2257 Name, Ty: VTableType, Linkage: llvm::GlobalValue::ExternalLinkage,
2258 Alignment: getContext().toCharUnitsFromBits(BitSize: PAlign).getAsAlign());
2259 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2260
2261 if (CGM.getTarget().hasPS4DLLImportExport())
2262 setVTableSelectiveDLLImportExport(CGM, VTable, RD);
2263
2264 CGM.setGVProperties(GV: VTable, D: RD);
2265 return VTable;
2266}
2267
2268CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
2269 GlobalDecl GD,
2270 Address This,
2271 llvm::Type *Ty,
2272 SourceLocation Loc) {
2273 llvm::Type *PtrTy = CGM.GlobalsInt8PtrTy;
2274 auto *MethodDecl = cast<CXXMethodDecl>(Val: GD.getDecl());
2275 llvm::Value *VTable = CGF.GetVTablePtr(This, VTableTy: PtrTy, VTableClass: MethodDecl->getParent());
2276
2277 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
2278 llvm::Value *VFunc, *VTableSlotPtr = nullptr;
2279 auto &Schema = CGM.getCodeGenOpts().PointerAuth.CXXVirtualFunctionPointers;
2280
2281 llvm::Type *ComponentTy = CGM.getVTables().getVTableComponentType();
2282 uint64_t ByteOffset =
2283 VTableIndex * CGM.getDataLayout().getTypeSizeInBits(Ty: ComponentTy) / 8;
2284
2285 if (!Schema && CGF.ShouldEmitVTableTypeCheckedLoad(RD: MethodDecl->getParent())) {
2286 VFunc = CGF.EmitVTableTypeCheckedLoad(RD: MethodDecl->getParent(), VTable,
2287 VTableTy: PtrTy, VTableByteOffset: ByteOffset);
2288 } else {
2289 CGF.EmitTypeMetadataCodeForVCall(RD: MethodDecl->getParent(), VTable, Loc);
2290
2291 llvm::Value *VFuncLoad;
2292 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
2293 VFuncLoad = CGF.Builder.CreateCall(
2294 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::load_relative, Tys: {CGM.Int32Ty}),
2295 Args: {VTable, llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: ByteOffset)});
2296 } else {
2297 VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2298 Ty: PtrTy, Ptr: VTable, Idx0: VTableIndex, Name: "vfn");
2299 VFuncLoad = CGF.Builder.CreateAlignedLoad(Ty: PtrTy, Addr: VTableSlotPtr,
2300 Align: CGF.getPointerAlign());
2301 }
2302
2303 // Add !invariant.load md to virtual function load to indicate that
2304 // function didn't change inside vtable.
2305 // It's safe to add it without -fstrict-vtable-pointers, but it would not
2306 // help in devirtualization because it will only matter if we will have 2
2307 // the same virtual function loads from the same vtable load, which won't
2308 // happen without enabled devirtualization with -fstrict-vtable-pointers.
2309 if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
2310 CGM.getCodeGenOpts().StrictVTablePointers) {
2311 if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(Val: VFuncLoad)) {
2312 VFuncLoadInstr->setMetadata(
2313 KindID: llvm::LLVMContext::MD_invariant_load,
2314 Node: llvm::MDNode::get(Context&: CGM.getLLVMContext(),
2315 MDs: llvm::ArrayRef<llvm::Metadata *>()));
2316 }
2317 }
2318 VFunc = VFuncLoad;
2319 }
2320
2321 CGPointerAuthInfo PointerAuth;
2322 if (Schema) {
2323 assert(VTableSlotPtr && "virtual function pointer not set");
2324 GD = CGM.getItaniumVTableContext().findOriginalMethod(GD: GD.getCanonicalDecl());
2325 PointerAuth = CGF.EmitPointerAuthInfo(Schema, StorageAddress: VTableSlotPtr, SchemaDecl: GD, SchemaType: QualType());
2326 }
2327 CGCallee Callee(GD, VFunc, PointerAuth);
2328 return Callee;
2329}
2330
2331llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
2332 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
2333 Address This, DeleteOrMemberCallExpr E, llvm::CallBase **CallOrInvoke) {
2334 auto *CE = dyn_cast<const CXXMemberCallExpr *>(Val&: E);
2335 auto *D = dyn_cast<const CXXDeleteExpr *>(Val&: E);
2336 assert((CE != nullptr) ^ (D != nullptr));
2337 assert(CE == nullptr || CE->arguments().empty());
2338 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
2339
2340 GlobalDecl GD(Dtor, DtorType);
2341 const CGFunctionInfo *FInfo =
2342 &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
2343 llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(Info: *FInfo);
2344 CGCallee Callee = CGCallee::forVirtual(CE, MD: GD, Addr: This, FTy: Ty);
2345
2346 QualType ThisTy;
2347 if (CE) {
2348 ThisTy = CE->getObjectType();
2349 } else {
2350 ThisTy = D->getDestroyedType();
2351 }
2352
2353 CGF.EmitCXXDestructorCall(Dtor: GD, Callee, This: This.emitRawPointer(CGF), ThisTy,
2354 ImplicitParam: nullptr, ImplicitParamTy: QualType(), E: nullptr, CallOrInvoke);
2355 return nullptr;
2356}
2357
2358void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
2359 CodeGenVTables &VTables = CGM.getVTables();
2360 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
2361 VTables.EmitVTTDefinition(VTT, Linkage: CGM.getVTableLinkage(RD), RD);
2362}
2363
2364bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
2365 const CXXRecordDecl *RD) const {
2366 // We don't emit available_externally vtables if we are in -fapple-kext mode
2367 // because kext mode does not permit devirtualization.
2368 if (CGM.getLangOpts().AppleKext)
2369 return false;
2370
2371 // If the vtable is hidden then it is not safe to emit an available_externally
2372 // copy of vtable.
2373 if (isVTableHidden(RD))
2374 return false;
2375
2376 if (CGM.getCodeGenOpts().ForceEmitVTables)
2377 return true;
2378
2379 // A speculative vtable can only be generated if all virtual inline functions
2380 // defined by this class are emitted. The vtable in the final program contains
2381 // for each virtual inline function not used in the current TU a function that
2382 // is equivalent to the unused function. The function in the actual vtable
2383 // does not have to be declared under the same symbol (e.g., a virtual
2384 // destructor that can be substituted with its base class's destructor). Since
2385 // inline functions are emitted lazily and this emissions does not account for
2386 // speculative emission of a vtable, we might generate a speculative vtable
2387 // with references to inline functions that are not emitted under that name.
2388 // This can lead to problems when devirtualizing a call to such a function,
2389 // that result in linking errors. Hence, if there are any unused virtual
2390 // inline function, we cannot emit the speculative vtable.
2391 // FIXME we can still emit a copy of the vtable if we
2392 // can emit definition of the inline functions.
2393 if (hasAnyUnusedVirtualInlineFunction(RD))
2394 return false;
2395
2396 // For a class with virtual bases, we must also be able to speculatively
2397 // emit the VTT, because CodeGen doesn't have separate notions of "can emit
2398 // the vtable" and "can emit the VTT". For a base subobject, this means we
2399 // need to be able to emit non-virtual base vtables.
2400 if (RD->getNumVBases()) {
2401 for (const auto &B : RD->bases()) {
2402 auto *BRD = B.getType()->getAsCXXRecordDecl();
2403 assert(BRD && "no class for base specifier");
2404 if (B.isVirtual() || !BRD->isDynamicClass())
2405 continue;
2406 if (!canSpeculativelyEmitVTableAsBaseClass(RD: BRD))
2407 return false;
2408 }
2409 }
2410
2411 return true;
2412}
2413
2414bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
2415 if (!canSpeculativelyEmitVTableAsBaseClass(RD))
2416 return false;
2417
2418 if (RD->shouldEmitInExternalSource())
2419 return false;
2420
2421 // For a complete-object vtable (or more specifically, for the VTT), we need
2422 // to be able to speculatively emit the vtables of all dynamic virtual bases.
2423 for (const auto &B : RD->vbases()) {
2424 auto *BRD = B.getType()->getAsCXXRecordDecl();
2425 assert(BRD && "no class for base specifier");
2426 if (!BRD->isDynamicClass())
2427 continue;
2428 if (!canSpeculativelyEmitVTableAsBaseClass(RD: BRD))
2429 return false;
2430 }
2431
2432 return true;
2433}
2434static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
2435 Address InitialPtr,
2436 const CXXRecordDecl *UnadjustedClass,
2437 int64_t NonVirtualAdjustment,
2438 int64_t VirtualAdjustment,
2439 bool IsReturnAdjustment) {
2440 if (!NonVirtualAdjustment && !VirtualAdjustment)
2441 return InitialPtr.emitRawPointer(CGF);
2442
2443 Address V = InitialPtr.withElementType(ElemTy: CGF.Int8Ty);
2444
2445 // In a base-to-derived cast, the non-virtual adjustment is applied first.
2446 if (NonVirtualAdjustment && !IsReturnAdjustment) {
2447 V = CGF.Builder.CreateConstInBoundsByteGEP(Addr: V,
2448 Offset: CharUnits::fromQuantity(Quantity: NonVirtualAdjustment));
2449 }
2450
2451 // Perform the virtual adjustment if we have one.
2452 llvm::Value *ResultPtr;
2453 if (VirtualAdjustment) {
2454 llvm::Value *VTablePtr =
2455 CGF.GetVTablePtr(This: V, VTableTy: CGF.Int8PtrTy, VTableClass: UnadjustedClass);
2456
2457 llvm::Value *Offset;
2458 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2459 Ty: CGF.Int8Ty, Ptr: VTablePtr, Idx0: VirtualAdjustment);
2460 if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
2461 // Load the adjustment offset from the vtable as a 32-bit int.
2462 Offset =
2463 CGF.Builder.CreateAlignedLoad(Ty: CGF.Int32Ty, Addr: OffsetPtr,
2464 Align: CharUnits::fromQuantity(Quantity: 4));
2465 } else {
2466 llvm::Type *PtrDiffTy =
2467 CGF.ConvertType(T: CGF.getContext().getPointerDiffType());
2468
2469 // Load the adjustment offset from the vtable.
2470 Offset = CGF.Builder.CreateAlignedLoad(Ty: PtrDiffTy, Addr: OffsetPtr,
2471 Align: CGF.getPointerAlign());
2472 }
2473 // Adjust our pointer.
2474 ResultPtr = CGF.Builder.CreateInBoundsGEP(Ty: V.getElementType(),
2475 Ptr: V.emitRawPointer(CGF), IdxList: Offset);
2476 } else {
2477 ResultPtr = V.emitRawPointer(CGF);
2478 }
2479
2480 // In a derived-to-base conversion, the non-virtual adjustment is
2481 // applied second.
2482 if (NonVirtualAdjustment && IsReturnAdjustment) {
2483 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(Ty: CGF.Int8Ty, Ptr: ResultPtr,
2484 Idx0: NonVirtualAdjustment);
2485 }
2486
2487 return ResultPtr;
2488}
2489
2490llvm::Value *
2491ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF, Address This,
2492 const CXXRecordDecl *UnadjustedClass,
2493 const ThunkInfo &TI) {
2494 return performTypeAdjustment(CGF, InitialPtr: This, UnadjustedClass, NonVirtualAdjustment: TI.This.NonVirtual,
2495 VirtualAdjustment: TI.This.Virtual.Itanium.VCallOffsetOffset,
2496 /*IsReturnAdjustment=*/false);
2497}
2498
2499llvm::Value *
2500ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
2501 const CXXRecordDecl *UnadjustedClass,
2502 const ReturnAdjustment &RA) {
2503 return performTypeAdjustment(CGF, InitialPtr: Ret, UnadjustedClass, NonVirtualAdjustment: RA.NonVirtual,
2504 VirtualAdjustment: RA.Virtual.Itanium.VBaseOffsetOffset,
2505 /*IsReturnAdjustment=*/true);
2506}
2507
2508void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
2509 RValue RV, QualType ResultType) {
2510 if (!isa<CXXDestructorDecl>(Val: CGF.CurGD.getDecl()))
2511 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
2512
2513 // Destructor thunks in the ARM ABI have indeterminate results.
2514 llvm::Type *T = CGF.ReturnValue.getElementType();
2515 RValue Undef = RValue::get(V: llvm::UndefValue::get(T));
2516 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV: Undef, ResultType);
2517}
2518
2519/************************** Array allocation cookies **************************/
2520
2521CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2522 // The array cookie is a size_t; pad that up to the element alignment.
2523 // The cookie is actually right-justified in that space.
2524 return std::max(a: CharUnits::fromQuantity(Quantity: CGM.SizeSizeInBytes),
2525 b: CGM.getContext().getPreferredTypeAlignInChars(T: elementType));
2526}
2527
2528Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2529 Address NewPtr,
2530 llvm::Value *NumElements,
2531 const CXXNewExpr *expr,
2532 QualType ElementType) {
2533 assert(requiresArrayCookie(expr));
2534
2535 unsigned AS = NewPtr.getAddressSpace();
2536
2537 ASTContext &Ctx = getContext();
2538 CharUnits SizeSize = CGF.getSizeSize();
2539
2540 // The size of the cookie.
2541 CharUnits CookieSize =
2542 std::max(a: SizeSize, b: Ctx.getPreferredTypeAlignInChars(T: ElementType));
2543 assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2544
2545 // Compute an offset to the cookie.
2546 Address CookiePtr = NewPtr;
2547 CharUnits CookieOffset = CookieSize - SizeSize;
2548 if (!CookieOffset.isZero())
2549 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(Addr: CookiePtr, Offset: CookieOffset);
2550
2551 // Write the number of elements into the appropriate slot.
2552 Address NumElementsPtr = CookiePtr.withElementType(ElemTy: CGF.SizeTy);
2553 llvm::Instruction *SI = CGF.Builder.CreateStore(Val: NumElements, Addr: NumElementsPtr);
2554
2555 // Handle the array cookie specially in ASan.
2556 if (CGM.getLangOpts().Sanitize.has(K: SanitizerKind::Address) && AS == 0 &&
2557 (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2558 CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
2559 // The store to the CookiePtr does not need to be instrumented.
2560 SI->setNoSanitizeMetadata();
2561 llvm::FunctionType *FTy =
2562 llvm::FunctionType::get(Result: CGM.VoidTy, Params: NumElementsPtr.getType(), isVarArg: false);
2563 llvm::FunctionCallee F =
2564 CGM.CreateRuntimeFunction(Ty: FTy, Name: "__asan_poison_cxx_array_cookie");
2565 CGF.Builder.CreateCall(Callee: F, Args: NumElementsPtr.emitRawPointer(CGF));
2566 }
2567
2568 // Finally, compute a pointer to the actual data buffer by skipping
2569 // over the cookie completely.
2570 return CGF.Builder.CreateConstInBoundsByteGEP(Addr: NewPtr, Offset: CookieSize);
2571}
2572
2573llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2574 Address allocPtr,
2575 CharUnits cookieSize) {
2576 // The element size is right-justified in the cookie.
2577 Address numElementsPtr = allocPtr;
2578 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2579 if (!numElementsOffset.isZero())
2580 numElementsPtr =
2581 CGF.Builder.CreateConstInBoundsByteGEP(Addr: numElementsPtr, Offset: numElementsOffset);
2582
2583 unsigned AS = allocPtr.getAddressSpace();
2584 numElementsPtr = numElementsPtr.withElementType(ElemTy: CGF.SizeTy);
2585 if (!CGM.getLangOpts().Sanitize.has(K: SanitizerKind::Address) || AS != 0)
2586 return CGF.Builder.CreateLoad(Addr: numElementsPtr);
2587 // In asan mode emit a function call instead of a regular load and let the
2588 // run-time deal with it: if the shadow is properly poisoned return the
2589 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2590 // We can't simply ignore this load using nosanitize metadata because
2591 // the metadata may be lost.
2592 llvm::FunctionType *FTy =
2593 llvm::FunctionType::get(Result: CGF.SizeTy, Params: CGF.DefaultPtrTy, isVarArg: false);
2594 llvm::FunctionCallee F =
2595 CGM.CreateRuntimeFunction(Ty: FTy, Name: "__asan_load_cxx_array_cookie");
2596 return CGF.Builder.CreateCall(Callee: F, Args: numElementsPtr.emitRawPointer(CGF));
2597}
2598
2599CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2600 // ARM says that the cookie is always:
2601 // struct array_cookie {
2602 // std::size_t element_size; // element_size != 0
2603 // std::size_t element_count;
2604 // };
2605 // But the base ABI doesn't give anything an alignment greater than
2606 // 8, so we can dismiss this as typical ABI-author blindness to
2607 // actual language complexity and round up to the element alignment.
2608 return std::max(a: CharUnits::fromQuantity(Quantity: 2 * CGM.SizeSizeInBytes),
2609 b: CGM.getContext().getTypeAlignInChars(T: elementType));
2610}
2611
2612Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2613 Address newPtr,
2614 llvm::Value *numElements,
2615 const CXXNewExpr *expr,
2616 QualType elementType) {
2617 assert(requiresArrayCookie(expr));
2618
2619 // The cookie is always at the start of the buffer.
2620 Address cookie = newPtr;
2621
2622 // The first element is the element size.
2623 cookie = cookie.withElementType(ElemTy: CGF.SizeTy);
2624 llvm::Value *elementSize = llvm::ConstantInt::get(Ty: CGF.SizeTy,
2625 V: getContext().getTypeSizeInChars(T: elementType).getQuantity());
2626 CGF.Builder.CreateStore(Val: elementSize, Addr: cookie);
2627
2628 // The second element is the element count.
2629 cookie = CGF.Builder.CreateConstInBoundsGEP(Addr: cookie, Index: 1);
2630 CGF.Builder.CreateStore(Val: numElements, Addr: cookie);
2631
2632 // Finally, compute a pointer to the actual data buffer by skipping
2633 // over the cookie completely.
2634 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2635 return CGF.Builder.CreateConstInBoundsByteGEP(Addr: newPtr, Offset: cookieSize);
2636}
2637
2638llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2639 Address allocPtr,
2640 CharUnits cookieSize) {
2641 // The number of elements is at offset sizeof(size_t) relative to
2642 // the allocated pointer.
2643 Address numElementsPtr
2644 = CGF.Builder.CreateConstInBoundsByteGEP(Addr: allocPtr, Offset: CGF.getSizeSize());
2645
2646 numElementsPtr = numElementsPtr.withElementType(ElemTy: CGF.SizeTy);
2647 return CGF.Builder.CreateLoad(Addr: numElementsPtr);
2648}
2649
2650/*********************** Static local initialization **************************/
2651
2652static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2653 llvm::PointerType *GuardPtrTy) {
2654 // int __cxa_guard_acquire(__guard *guard_object);
2655 llvm::FunctionType *FTy =
2656 llvm::FunctionType::get(Result: CGM.getTypes().ConvertType(T: CGM.getContext().IntTy),
2657 Params: GuardPtrTy, /*isVarArg=*/false);
2658 return CGM.CreateRuntimeFunction(
2659 Ty: FTy, Name: "__cxa_guard_acquire",
2660 ExtraAttrs: llvm::AttributeList::get(C&: CGM.getLLVMContext(),
2661 Index: llvm::AttributeList::FunctionIndex,
2662 Kinds: llvm::Attribute::NoUnwind));
2663}
2664
2665static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2666 llvm::PointerType *GuardPtrTy) {
2667 // void __cxa_guard_release(__guard *guard_object);
2668 llvm::FunctionType *FTy =
2669 llvm::FunctionType::get(Result: CGM.VoidTy, Params: GuardPtrTy, /*isVarArg=*/false);
2670 return CGM.CreateRuntimeFunction(
2671 Ty: FTy, Name: "__cxa_guard_release",
2672 ExtraAttrs: llvm::AttributeList::get(C&: CGM.getLLVMContext(),
2673 Index: llvm::AttributeList::FunctionIndex,
2674 Kinds: llvm::Attribute::NoUnwind));
2675}
2676
2677static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2678 llvm::PointerType *GuardPtrTy) {
2679 // void __cxa_guard_abort(__guard *guard_object);
2680 llvm::FunctionType *FTy =
2681 llvm::FunctionType::get(Result: CGM.VoidTy, Params: GuardPtrTy, /*isVarArg=*/false);
2682 return CGM.CreateRuntimeFunction(
2683 Ty: FTy, Name: "__cxa_guard_abort",
2684 ExtraAttrs: llvm::AttributeList::get(C&: CGM.getLLVMContext(),
2685 Index: llvm::AttributeList::FunctionIndex,
2686 Kinds: llvm::Attribute::NoUnwind));
2687}
2688
2689namespace {
2690 struct CallGuardAbort final : EHScopeStack::Cleanup {
2691 llvm::GlobalVariable *Guard;
2692 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2693
2694 void Emit(CodeGenFunction &CGF, Flags flags) override {
2695 CGF.EmitNounwindRuntimeCall(callee: getGuardAbortFn(CGM&: CGF.CGM, GuardPtrTy: Guard->getType()),
2696 args: Guard);
2697 }
2698 };
2699}
2700
2701/// The ARM code here follows the Itanium code closely enough that we
2702/// just special-case it at particular places.
2703void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2704 const VarDecl &D,
2705 llvm::GlobalVariable *var,
2706 bool shouldPerformInit) {
2707 CGBuilderTy &Builder = CGF.Builder;
2708
2709 // Inline variables that weren't instantiated from variable templates have
2710 // partially-ordered initialization within their translation unit.
2711 bool NonTemplateInline =
2712 D.isInline() &&
2713 !isTemplateInstantiation(Kind: D.getTemplateSpecializationKind());
2714
2715 // We only need to use thread-safe statics for local non-TLS variables and
2716 // inline variables; other global initialization is always single-threaded
2717 // or (through lazy dynamic loading in multiple threads) unsequenced.
2718 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2719 (D.isLocalVarDecl() || NonTemplateInline) &&
2720 !D.getTLSKind();
2721
2722 // If we have a global variable with internal linkage and thread-safe statics
2723 // are disabled, we can just let the guard variable be of type i8.
2724 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2725
2726 llvm::IntegerType *guardTy;
2727 CharUnits guardAlignment;
2728 if (useInt8GuardVariable) {
2729 guardTy = CGF.Int8Ty;
2730 guardAlignment = CharUnits::One();
2731 } else {
2732 // Guard variables are 64 bits in the generic ABI and size width on ARM
2733 // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2734 if (UseARMGuardVarABI) {
2735 guardTy = CGF.SizeTy;
2736 guardAlignment = CGF.getSizeAlign();
2737 } else {
2738 guardTy = CGF.Int64Ty;
2739 guardAlignment =
2740 CharUnits::fromQuantity(Quantity: CGM.getDataLayout().getABITypeAlign(Ty: guardTy));
2741 }
2742 }
2743 llvm::PointerType *guardPtrTy = llvm::PointerType::get(
2744 C&: CGF.CGM.getLLVMContext(),
2745 AddressSpace: CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
2746
2747 // Create the guard variable if we don't already have it (as we
2748 // might if we're double-emitting this function body).
2749 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(D: &D);
2750 if (!guard) {
2751 // Mangle the name for the guard.
2752 SmallString<256> guardName;
2753 {
2754 llvm::raw_svector_ostream out(guardName);
2755 getMangleContext().mangleStaticGuardVariable(D: &D, out);
2756 }
2757
2758 // Create the guard variable with a zero-initializer.
2759 // Just absorb linkage, visibility and dll storage class from the guarded
2760 // variable.
2761 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2762 false, var->getLinkage(),
2763 llvm::ConstantInt::get(Ty: guardTy, V: 0),
2764 guardName.str());
2765 guard->setDSOLocal(var->isDSOLocal());
2766 guard->setVisibility(var->getVisibility());
2767 guard->setDLLStorageClass(var->getDLLStorageClass());
2768 // If the variable is thread-local, so is its guard variable.
2769 guard->setThreadLocalMode(var->getThreadLocalMode());
2770 guard->setAlignment(guardAlignment.getAsAlign());
2771
2772 // The ABI says: "It is suggested that it be emitted in the same COMDAT
2773 // group as the associated data object." In practice, this doesn't work for
2774 // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2775 llvm::Comdat *C = var->getComdat();
2776 if (!D.isLocalVarDecl() && C &&
2777 (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2778 CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2779 guard->setComdat(C);
2780 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2781 guard->setComdat(CGM.getModule().getOrInsertComdat(Name: guard->getName()));
2782 }
2783
2784 CGM.setStaticLocalDeclGuardAddress(D: &D, C: guard);
2785 }
2786
2787 Address guardAddr = Address(guard, guard->getValueType(), guardAlignment);
2788
2789 // Test whether the variable has completed initialization.
2790 //
2791 // Itanium C++ ABI 3.3.2:
2792 // The following is pseudo-code showing how these functions can be used:
2793 // if (obj_guard.first_byte == 0) {
2794 // if ( __cxa_guard_acquire (&obj_guard) ) {
2795 // try {
2796 // ... initialize the object ...;
2797 // } catch (...) {
2798 // __cxa_guard_abort (&obj_guard);
2799 // throw;
2800 // }
2801 // ... queue object destructor with __cxa_atexit() ...;
2802 // __cxa_guard_release (&obj_guard);
2803 // }
2804 // }
2805 //
2806 // If threadsafe statics are enabled, but we don't have inline atomics, just
2807 // call __cxa_guard_acquire unconditionally. The "inline" check isn't
2808 // actually inline, and the user might not expect calls to __atomic libcalls.
2809
2810 unsigned MaxInlineWidthInBits = CGF.getTarget().getMaxAtomicInlineWidth();
2811 llvm::BasicBlock *EndBlock = CGF.createBasicBlock(name: "init.end");
2812 if (!threadsafe || MaxInlineWidthInBits) {
2813 // Load the first byte of the guard variable.
2814 llvm::LoadInst *LI =
2815 Builder.CreateLoad(Addr: guardAddr.withElementType(ElemTy: CGM.Int8Ty));
2816
2817 // Itanium ABI:
2818 // An implementation supporting thread-safety on multiprocessor
2819 // systems must also guarantee that references to the initialized
2820 // object do not occur before the load of the initialization flag.
2821 //
2822 // In LLVM, we do this by marking the load Acquire.
2823 if (threadsafe)
2824 LI->setAtomic(Ordering: llvm::AtomicOrdering::Acquire);
2825
2826 // For ARM, we should only check the first bit, rather than the entire byte:
2827 //
2828 // ARM C++ ABI 3.2.3.1:
2829 // To support the potential use of initialization guard variables
2830 // as semaphores that are the target of ARM SWP and LDREX/STREX
2831 // synchronizing instructions we define a static initialization
2832 // guard variable to be a 4-byte aligned, 4-byte word with the
2833 // following inline access protocol.
2834 // #define INITIALIZED 1
2835 // if ((obj_guard & INITIALIZED) != INITIALIZED) {
2836 // if (__cxa_guard_acquire(&obj_guard))
2837 // ...
2838 // }
2839 //
2840 // and similarly for ARM64:
2841 //
2842 // ARM64 C++ ABI 3.2.2:
2843 // This ABI instead only specifies the value bit 0 of the static guard
2844 // variable; all other bits are platform defined. Bit 0 shall be 0 when the
2845 // variable is not initialized and 1 when it is.
2846 llvm::Value *V =
2847 (UseARMGuardVarABI && !useInt8GuardVariable)
2848 ? Builder.CreateAnd(LHS: LI, RHS: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: 1))
2849 : LI;
2850 llvm::Value *NeedsInit = Builder.CreateIsNull(Arg: V, Name: "guard.uninitialized");
2851
2852 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock(name: "init.check");
2853
2854 // Check if the first byte of the guard variable is zero.
2855 CGF.EmitCXXGuardedInitBranch(NeedsInit, InitBlock: InitCheckBlock, NoInitBlock: EndBlock,
2856 Kind: CodeGenFunction::GuardKind::VariableGuard, D: &D);
2857
2858 CGF.EmitBlock(BB: InitCheckBlock);
2859 }
2860
2861 // The semantics of dynamic initialization of variables with static or thread
2862 // storage duration depends on whether they are declared at block-scope. The
2863 // initialization of such variables at block-scope can be aborted with an
2864 // exception and later retried (per C++20 [stmt.dcl]p4), and recursive entry
2865 // to their initialization has undefined behavior (also per C++20
2866 // [stmt.dcl]p4). For such variables declared at non-block scope, exceptions
2867 // lead to termination (per C++20 [except.terminate]p1), and recursive
2868 // references to the variables are governed only by the lifetime rules (per
2869 // C++20 [class.cdtor]p2), which means such references are perfectly fine as
2870 // long as they avoid touching memory. As a result, block-scope variables must
2871 // not be marked as initialized until after initialization completes (unless
2872 // the mark is reverted following an exception), but non-block-scope variables
2873 // must be marked prior to initialization so that recursive accesses during
2874 // initialization do not restart initialization.
2875
2876 // Variables used when coping with thread-safe statics and exceptions.
2877 if (threadsafe) {
2878 // Call __cxa_guard_acquire.
2879 llvm::Value *V
2880 = CGF.EmitNounwindRuntimeCall(callee: getGuardAcquireFn(CGM, GuardPtrTy: guardPtrTy), args: guard);
2881
2882 llvm::BasicBlock *InitBlock = CGF.createBasicBlock(name: "init");
2883
2884 Builder.CreateCondBr(Cond: Builder.CreateIsNotNull(Arg: V, Name: "tobool"),
2885 True: InitBlock, False: EndBlock);
2886
2887 // Call __cxa_guard_abort along the exceptional edge.
2888 CGF.EHStack.pushCleanup<CallGuardAbort>(Kind: EHCleanup, A: guard);
2889
2890 CGF.EmitBlock(BB: InitBlock);
2891 } else if (!D.isLocalVarDecl()) {
2892 // For non-local variables, store 1 into the first byte of the guard
2893 // variable before the object initialization begins so that references
2894 // to the variable during initialization don't restart initialization.
2895 Builder.CreateStore(Val: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: 1),
2896 Addr: guardAddr.withElementType(ElemTy: CGM.Int8Ty));
2897 }
2898
2899 // Emit the initializer and add a global destructor if appropriate.
2900 CGF.EmitCXXGlobalVarDeclInit(D, GV: var, PerformInit: shouldPerformInit);
2901
2902 if (threadsafe) {
2903 // Pop the guard-abort cleanup if we pushed one.
2904 CGF.PopCleanupBlock();
2905
2906 // Call __cxa_guard_release. This cannot throw.
2907 CGF.EmitNounwindRuntimeCall(callee: getGuardReleaseFn(CGM, GuardPtrTy: guardPtrTy),
2908 args: guardAddr.emitRawPointer(CGF));
2909 } else if (D.isLocalVarDecl()) {
2910 // For local variables, store 1 into the first byte of the guard variable
2911 // after the object initialization completes so that initialization is
2912 // retried if initialization is interrupted by an exception.
2913 Builder.CreateStore(Val: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: 1),
2914 Addr: guardAddr.withElementType(ElemTy: CGM.Int8Ty));
2915 }
2916
2917 CGF.EmitBlock(BB: EndBlock);
2918}
2919
2920/// Register a global destructor using __cxa_atexit.
2921static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2922 llvm::FunctionCallee dtor,
2923 llvm::Constant *addr, bool TLS) {
2924 assert(!CGF.getTarget().getTriple().isOSAIX() &&
2925 "unexpected call to emitGlobalDtorWithCXAAtExit");
2926 assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2927 "__cxa_atexit is disabled");
2928 const char *Name = "__cxa_atexit";
2929 if (TLS) {
2930 const llvm::Triple &T = CGF.getTarget().getTriple();
2931 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit";
2932 }
2933
2934 // We're assuming that the destructor function is something we can
2935 // reasonably call with the default CC.
2936 llvm::Type *dtorTy = CGF.DefaultPtrTy;
2937
2938 // Preserve address space of addr.
2939 auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
2940 auto AddrPtrTy = AddrAS ? llvm::PointerType::get(C&: CGF.getLLVMContext(), AddressSpace: AddrAS)
2941 : CGF.Int8PtrTy;
2942
2943 // Create a variable that binds the atexit to this shared object.
2944 llvm::Constant *handle =
2945 CGF.CGM.CreateRuntimeVariable(Ty: CGF.Int8Ty, Name: "__dso_handle");
2946 auto *GV = cast<llvm::GlobalValue>(Val: handle->stripPointerCasts());
2947 GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2948
2949 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2950 llvm::Type *paramTys[] = {dtorTy, AddrPtrTy, handle->getType()};
2951 llvm::FunctionType *atexitTy =
2952 llvm::FunctionType::get(Result: CGF.IntTy, Params: paramTys, isVarArg: false);
2953
2954 // Fetch the actual function.
2955 llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(Ty: atexitTy, Name);
2956 if (llvm::Function *fn = dyn_cast<llvm::Function>(Val: atexit.getCallee()))
2957 fn->setDoesNotThrow();
2958
2959 const auto &Context = CGF.CGM.getContext();
2960 FunctionProtoType::ExtProtoInfo EPI(Context.getDefaultCallingConvention(
2961 /*IsVariadic=*/false, /*IsCXXMethod=*/false));
2962 QualType fnType =
2963 Context.getFunctionType(ResultTy: Context.VoidTy, Args: {Context.VoidPtrTy}, EPI);
2964 llvm::Constant *dtorCallee = cast<llvm::Constant>(Val: dtor.getCallee());
2965 dtorCallee = CGF.CGM.getFunctionPointer(Pointer: dtorCallee, FunctionType: fnType);
2966
2967 if (!addr)
2968 // addr is null when we are trying to register a dtor annotated with
2969 // __attribute__((destructor)) in a constructor function. Using null here is
2970 // okay because this argument is just passed back to the destructor
2971 // function.
2972 addr = llvm::Constant::getNullValue(Ty: CGF.Int8PtrTy);
2973
2974 llvm::Value *args[] = {dtorCallee, addr, handle};
2975 CGF.EmitNounwindRuntimeCall(callee: atexit, args);
2976}
2977
2978static llvm::Function *createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM,
2979 StringRef FnName) {
2980 // Create a function that registers/unregisters destructors that have the same
2981 // priority.
2982 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: CGM.VoidTy, isVarArg: false);
2983 llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction(
2984 ty: FTy, name: FnName, FI: CGM.getTypes().arrangeNullaryFunction(), Loc: SourceLocation());
2985
2986 return GlobalInitOrCleanupFn;
2987}
2988
2989void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
2990 for (const auto &I : DtorsUsingAtExit) {
2991 int Priority = I.first;
2992 std::string GlobalCleanupFnName =
2993 std::string("__GLOBAL_cleanup_") + llvm::to_string(Value: Priority);
2994
2995 llvm::Function *GlobalCleanupFn =
2996 createGlobalInitOrCleanupFn(CGM&: *this, FnName: GlobalCleanupFnName);
2997
2998 CodeGenFunction CGF(*this);
2999 CGF.StartFunction(GD: GlobalDecl(), RetTy: getContext().VoidTy, Fn: GlobalCleanupFn,
3000 FnInfo: getTypes().arrangeNullaryFunction(), Args: FunctionArgList(),
3001 Loc: SourceLocation(), StartLoc: SourceLocation());
3002 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
3003
3004 // Get the destructor function type, void(*)(void).
3005 llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(Result: CGF.VoidTy, isVarArg: false);
3006
3007 // Destructor functions are run/unregistered in non-ascending
3008 // order of their priorities.
3009 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
3010 auto itv = Dtors.rbegin();
3011 while (itv != Dtors.rend()) {
3012 llvm::Function *Dtor = *itv;
3013
3014 // We're assuming that the destructor function is something we can
3015 // reasonably call with the correct CC.
3016 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub: Dtor);
3017 llvm::Value *NeedsDestruct =
3018 CGF.Builder.CreateIsNull(Arg: V, Name: "needs_destruct");
3019
3020 llvm::BasicBlock *DestructCallBlock =
3021 CGF.createBasicBlock(name: "destruct.call");
3022 llvm::BasicBlock *EndBlock = CGF.createBasicBlock(
3023 name: (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end");
3024 // Check if unatexit returns a value of 0. If it does, jump to
3025 // DestructCallBlock, otherwise jump to EndBlock directly.
3026 CGF.Builder.CreateCondBr(Cond: NeedsDestruct, True: DestructCallBlock, False: EndBlock);
3027
3028 CGF.EmitBlock(BB: DestructCallBlock);
3029
3030 // Emit the call to casted Dtor.
3031 llvm::CallInst *CI = CGF.Builder.CreateCall(FTy: dtorFuncTy, Callee: Dtor);
3032 // Make sure the call and the callee agree on calling convention.
3033 CI->setCallingConv(Dtor->getCallingConv());
3034
3035 CGF.EmitBlock(BB: EndBlock);
3036
3037 itv++;
3038 }
3039
3040 CGF.FinishFunction();
3041 AddGlobalDtor(Dtor: GlobalCleanupFn, Priority);
3042 }
3043}
3044
3045void CodeGenModule::registerGlobalDtorsWithAtExit() {
3046 for (const auto &I : DtorsUsingAtExit) {
3047 int Priority = I.first;
3048 std::string GlobalInitFnName =
3049 std::string("__GLOBAL_init_") + llvm::to_string(Value: Priority);
3050 llvm::Function *GlobalInitFn =
3051 createGlobalInitOrCleanupFn(CGM&: *this, FnName: GlobalInitFnName);
3052
3053 CodeGenFunction CGF(*this);
3054 CGF.StartFunction(GD: GlobalDecl(), RetTy: getContext().VoidTy, Fn: GlobalInitFn,
3055 FnInfo: getTypes().arrangeNullaryFunction(), Args: FunctionArgList(),
3056 Loc: SourceLocation(), StartLoc: SourceLocation());
3057 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
3058
3059 // Since constructor functions are run in non-descending order of their
3060 // priorities, destructors are registered in non-descending order of their
3061 // priorities, and since destructor functions are run in the reverse order
3062 // of their registration, destructor functions are run in non-ascending
3063 // order of their priorities.
3064 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
3065 for (auto *Dtor : Dtors) {
3066 // Register the destructor function calling __cxa_atexit if it is
3067 // available. Otherwise fall back on calling atexit.
3068 if (getCodeGenOpts().CXAAtExit) {
3069 emitGlobalDtorWithCXAAtExit(CGF, dtor: Dtor, addr: nullptr, TLS: false);
3070 } else {
3071 // We're assuming that the destructor function is something we can
3072 // reasonably call with the correct CC.
3073 CGF.registerGlobalDtorWithAtExit(dtorStub: Dtor);
3074 }
3075 }
3076
3077 CGF.FinishFunction();
3078 AddGlobalCtor(Ctor: GlobalInitFn, Priority);
3079 }
3080
3081 if (getCXXABI().useSinitAndSterm())
3082 unregisterGlobalDtorsWithUnAtExit();
3083}
3084
3085/// Register a global destructor as best as we know how.
3086void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
3087 llvm::FunctionCallee dtor,
3088 llvm::Constant *addr) {
3089 if (D.isNoDestroy(CGM.getContext()))
3090 return;
3091
3092 // HLSL doesn't support atexit.
3093 if (CGM.getLangOpts().HLSL)
3094 return CGM.AddCXXDtorEntry(DtorFn: dtor, Object: addr);
3095
3096 // OpenMP offloading supports C++ constructors and destructors but we do not
3097 // always have 'atexit' available. Instead lower these to use the LLVM global
3098 // destructors which we can handle directly in the runtime. Note that this is
3099 // not strictly 1-to-1 with using `atexit` because we no longer tear down
3100 // globals in reverse order of when they were constructed.
3101 if (!CGM.getLangOpts().hasAtExit() && !D.isStaticLocal())
3102 return CGF.registerGlobalDtorWithLLVM(D, fn: dtor, addr);
3103
3104 // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
3105 // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
3106 // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
3107 // We can always use __cxa_thread_atexit.
3108 if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
3109 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, TLS: D.getTLSKind());
3110
3111 // In Apple kexts, we want to add a global destructor entry.
3112 // FIXME: shouldn't this be guarded by some variable?
3113 if (CGM.getLangOpts().AppleKext) {
3114 // Generate a global destructor entry.
3115 return CGM.AddCXXDtorEntry(DtorFn: dtor, Object: addr);
3116 }
3117
3118 CGF.registerGlobalDtorWithAtExit(D, fn: dtor, addr);
3119}
3120
3121static bool isThreadWrapperReplaceable(const VarDecl *VD,
3122 CodeGen::CodeGenModule &CGM) {
3123 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
3124 // Darwin prefers to have references to thread local variables to go through
3125 // the thread wrapper instead of directly referencing the backing variable.
3126 return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
3127 CGM.getTarget().getTriple().isOSDarwin();
3128}
3129
3130/// Get the appropriate linkage for the wrapper function. This is essentially
3131/// the weak form of the variable's linkage; every translation unit which needs
3132/// the wrapper emits a copy, and we want the linker to merge them.
3133static llvm::GlobalValue::LinkageTypes
3134getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
3135 llvm::GlobalValue::LinkageTypes VarLinkage =
3136 CGM.getLLVMLinkageVarDefinition(VD);
3137
3138 // For internal linkage variables, we don't need an external or weak wrapper.
3139 if (llvm::GlobalValue::isLocalLinkage(Linkage: VarLinkage))
3140 return VarLinkage;
3141
3142 // If the thread wrapper is replaceable, give it appropriate linkage.
3143 if (isThreadWrapperReplaceable(VD, CGM))
3144 if (!llvm::GlobalVariable::isLinkOnceLinkage(Linkage: VarLinkage) &&
3145 !llvm::GlobalVariable::isWeakODRLinkage(Linkage: VarLinkage))
3146 return VarLinkage;
3147 return llvm::GlobalValue::WeakODRLinkage;
3148}
3149
3150llvm::Function *
3151ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
3152 llvm::Value *Val) {
3153 // Mangle the name for the thread_local wrapper function.
3154 SmallString<256> WrapperName;
3155 {
3156 llvm::raw_svector_ostream Out(WrapperName);
3157 getMangleContext().mangleItaniumThreadLocalWrapper(D: VD, Out);
3158 }
3159
3160 // FIXME: If VD is a definition, we should regenerate the function attributes
3161 // before returning.
3162 if (llvm::Value *V = CGM.getModule().getNamedValue(Name: WrapperName))
3163 return cast<llvm::Function>(Val: V);
3164
3165 QualType RetQT = VD->getType();
3166 if (RetQT->isReferenceType())
3167 RetQT = RetQT.getNonReferenceType();
3168
3169 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
3170 resultType: getContext().getPointerType(T: RetQT), args: FunctionArgList());
3171
3172 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(Info: FI);
3173 llvm::Function *Wrapper =
3174 llvm::Function::Create(Ty: FnTy, Linkage: getThreadLocalWrapperLinkage(VD, CGM),
3175 N: WrapperName.str(), M: &CGM.getModule());
3176
3177 if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker())
3178 Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Name: Wrapper->getName()));
3179
3180 CGM.SetLLVMFunctionAttributes(GD: GlobalDecl(), Info: FI, F: Wrapper, /*IsThunk=*/false);
3181
3182 // Always resolve references to the wrapper at link time.
3183 if (!Wrapper->hasLocalLinkage())
3184 if (!isThreadWrapperReplaceable(VD, CGM) ||
3185 llvm::GlobalVariable::isLinkOnceLinkage(Linkage: Wrapper->getLinkage()) ||
3186 llvm::GlobalVariable::isWeakODRLinkage(Linkage: Wrapper->getLinkage()) ||
3187 VD->getVisibility() == HiddenVisibility)
3188 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
3189
3190 if (isThreadWrapperReplaceable(VD, CGM)) {
3191 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3192 Wrapper->addFnAttr(Kind: llvm::Attribute::NoUnwind);
3193 }
3194
3195 ThreadWrappers.push_back(Elt: {VD, Wrapper});
3196 return Wrapper;
3197}
3198
3199void ItaniumCXXABI::EmitThreadLocalInitFuncs(
3200 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
3201 ArrayRef<llvm::Function *> CXXThreadLocalInits,
3202 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
3203 llvm::Function *InitFunc = nullptr;
3204
3205 // Separate initializers into those with ordered (or partially-ordered)
3206 // initialization and those with unordered initialization.
3207 llvm::SmallVector<llvm::Function *, 8> OrderedInits;
3208 llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
3209 for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
3210 if (isTemplateInstantiation(
3211 Kind: CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
3212 UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
3213 CXXThreadLocalInits[I];
3214 else
3215 OrderedInits.push_back(Elt: CXXThreadLocalInits[I]);
3216 }
3217
3218 if (!OrderedInits.empty()) {
3219 // Generate a guarded initialization function.
3220 llvm::FunctionType *FTy =
3221 llvm::FunctionType::get(Result: CGM.VoidTy, /*isVarArg=*/false);
3222 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
3223 InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(ty: FTy, name: "__tls_init", FI,
3224 Loc: SourceLocation(),
3225 /*TLS=*/true);
3226 llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
3227 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
3228 llvm::GlobalVariable::InternalLinkage,
3229 llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: 0), "__tls_guard");
3230 Guard->setThreadLocal(true);
3231 Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
3232
3233 CharUnits GuardAlign = CharUnits::One();
3234 Guard->setAlignment(GuardAlign.getAsAlign());
3235
3236 CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
3237 Fn: InitFunc, CXXThreadLocals: OrderedInits, Guard: ConstantAddress(Guard, CGM.Int8Ty, GuardAlign));
3238 // On Darwin platforms, use CXX_FAST_TLS calling convention.
3239 if (CGM.getTarget().getTriple().isOSDarwin()) {
3240 InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3241 InitFunc->addFnAttr(Kind: llvm::Attribute::NoUnwind);
3242 }
3243 }
3244
3245 // Create declarations for thread wrappers for all thread-local variables
3246 // with non-discardable definitions in this translation unit.
3247 for (const VarDecl *VD : CXXThreadLocals) {
3248 if (VD->hasDefinition() &&
3249 !isDiscardableGVALinkage(L: getContext().GetGVALinkageForVariable(VD))) {
3250 llvm::GlobalValue *GV = CGM.GetGlobalValue(Ref: CGM.getMangledName(GD: VD));
3251 getOrCreateThreadLocalWrapper(VD, Val: GV);
3252 }
3253 }
3254
3255 // Emit all referenced thread wrappers.
3256 for (auto VDAndWrapper : ThreadWrappers) {
3257 const VarDecl *VD = VDAndWrapper.first;
3258 llvm::GlobalVariable *Var =
3259 cast<llvm::GlobalVariable>(Val: CGM.GetGlobalValue(Ref: CGM.getMangledName(GD: VD)));
3260 llvm::Function *Wrapper = VDAndWrapper.second;
3261
3262 // Some targets require that all access to thread local variables go through
3263 // the thread wrapper. This means that we cannot attempt to create a thread
3264 // wrapper or a thread helper.
3265 if (!VD->hasDefinition()) {
3266 if (isThreadWrapperReplaceable(VD, CGM)) {
3267 Wrapper->setLinkage(llvm::Function::ExternalLinkage);
3268 continue;
3269 }
3270
3271 // If this isn't a TU in which this variable is defined, the thread
3272 // wrapper is discardable.
3273 if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
3274 Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
3275 }
3276
3277 CGM.SetLLVMFunctionAttributesForDefinition(D: nullptr, F: Wrapper);
3278
3279 // Mangle the name for the thread_local initialization function.
3280 SmallString<256> InitFnName;
3281 {
3282 llvm::raw_svector_ostream Out(InitFnName);
3283 getMangleContext().mangleItaniumThreadLocalInit(D: VD, Out);
3284 }
3285
3286 llvm::FunctionType *InitFnTy = llvm::FunctionType::get(Result: CGM.VoidTy, isVarArg: false);
3287
3288 // If we have a definition for the variable, emit the initialization
3289 // function as an alias to the global Init function (if any). Otherwise,
3290 // produce a declaration of the initialization function.
3291 llvm::GlobalValue *Init = nullptr;
3292 bool InitIsInitFunc = false;
3293 bool HasConstantInitialization = false;
3294 if (!usesThreadWrapperFunction(VD)) {
3295 HasConstantInitialization = true;
3296 } else if (VD->hasDefinition()) {
3297 InitIsInitFunc = true;
3298 llvm::Function *InitFuncToUse = InitFunc;
3299 if (isTemplateInstantiation(Kind: VD->getTemplateSpecializationKind()))
3300 InitFuncToUse = UnorderedInits.lookup(Val: VD->getCanonicalDecl());
3301 if (InitFuncToUse)
3302 Init = llvm::GlobalAlias::create(Linkage: Var->getLinkage(), Name: InitFnName.str(),
3303 Aliasee: InitFuncToUse);
3304 } else {
3305 // Emit a weak global function referring to the initialization function.
3306 // This function will not exist if the TU defining the thread_local
3307 // variable in question does not need any dynamic initialization for
3308 // its thread_local variables.
3309 Init = llvm::Function::Create(Ty: InitFnTy,
3310 Linkage: llvm::GlobalVariable::ExternalWeakLinkage,
3311 N: InitFnName.str(), M: &CGM.getModule());
3312 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
3313 CGM.SetLLVMFunctionAttributes(
3314 GD: GlobalDecl(), Info: FI, F: cast<llvm::Function>(Val: Init), /*IsThunk=*/false);
3315 }
3316
3317 if (Init) {
3318 Init->setVisibility(Var->getVisibility());
3319 // Don't mark an extern_weak function DSO local on windows.
3320 if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage())
3321 Init->setDSOLocal(Var->isDSOLocal());
3322 }
3323
3324 llvm::LLVMContext &Context = CGM.getModule().getContext();
3325
3326 // The linker on AIX is not happy with missing weak symbols. However,
3327 // other TUs will not know whether the initialization routine exists
3328 // so create an empty, init function to satisfy the linker.
3329 // This is needed whenever a thread wrapper function is not used, and
3330 // also when the symbol is weak.
3331 if (CGM.getTriple().isOSAIX() && VD->hasDefinition() &&
3332 isEmittedWithConstantInitializer(VD, InspectInitForWeakDef: true) &&
3333 !mayNeedDestruction(VD)) {
3334 // Init should be null. If it were non-null, then the logic above would
3335 // either be defining the function to be an alias or declaring the
3336 // function with the expectation that the definition of the variable
3337 // is elsewhere.
3338 assert(Init == nullptr && "Expected Init to be null.");
3339
3340 llvm::Function *Func = llvm::Function::Create(
3341 Ty: InitFnTy, Linkage: Var->getLinkage(), N: InitFnName.str(), M: &CGM.getModule());
3342 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
3343 CGM.SetLLVMFunctionAttributes(GD: GlobalDecl(), Info: FI,
3344 F: cast<llvm::Function>(Val: Func),
3345 /*IsThunk=*/false);
3346 // Create a function body that just returns
3347 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, Name: "", Parent: Func);
3348 CGBuilderTy Builder(CGM, Entry);
3349 Builder.CreateRetVoid();
3350 }
3351
3352 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, Name: "", Parent: Wrapper);
3353 CGBuilderTy Builder(CGM, Entry);
3354 if (HasConstantInitialization) {
3355 // No dynamic initialization to invoke.
3356 } else if (InitIsInitFunc) {
3357 if (Init) {
3358 llvm::CallInst *CallVal = Builder.CreateCall(FTy: InitFnTy, Callee: Init);
3359 if (isThreadWrapperReplaceable(VD, CGM)) {
3360 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3361 llvm::Function *Fn =
3362 cast<llvm::Function>(Val: cast<llvm::GlobalAlias>(Val: Init)->getAliasee());
3363 Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3364 }
3365 }
3366 } else if (CGM.getTriple().isOSAIX()) {
3367 // On AIX, except if constinit and also neither of class type or of
3368 // (possibly multi-dimensional) array of class type, thread_local vars
3369 // will have init routines regardless of whether they are
3370 // const-initialized. Since the routine is guaranteed to exist, we can
3371 // unconditionally call it without testing for its existance. This
3372 // avoids potentially unresolved weak symbols which the AIX linker
3373 // isn't happy with.
3374 Builder.CreateCall(FTy: InitFnTy, Callee: Init);
3375 } else {
3376 // Don't know whether we have an init function. Call it if it exists.
3377 llvm::Value *Have = Builder.CreateIsNotNull(Arg: Init);
3378 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, Name: "", Parent: Wrapper);
3379 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, Name: "", Parent: Wrapper);
3380 Builder.CreateCondBr(Cond: Have, True: InitBB, False: ExitBB);
3381
3382 Builder.SetInsertPoint(InitBB);
3383 Builder.CreateCall(FTy: InitFnTy, Callee: Init);
3384 Builder.CreateBr(Dest: ExitBB);
3385
3386 Builder.SetInsertPoint(ExitBB);
3387 }
3388
3389 // For a reference, the result of the wrapper function is a pointer to
3390 // the referenced object.
3391 llvm::Value *Val = Builder.CreateThreadLocalAddress(Ptr: Var);
3392
3393 if (VD->getType()->isReferenceType()) {
3394 CharUnits Align = CGM.getContext().getDeclAlign(D: VD);
3395 Val = Builder.CreateAlignedLoad(Ty: Var->getValueType(), Addr: Val, Align);
3396 }
3397 Val = Builder.CreateAddrSpaceCast(V: Val, DestTy: Wrapper->getReturnType());
3398
3399 Builder.CreateRet(V: Val);
3400 }
3401}
3402
3403LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
3404 const VarDecl *VD,
3405 QualType LValType) {
3406 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(D: VD);
3407 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
3408
3409 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Callee: Wrapper);
3410 CallVal->setCallingConv(Wrapper->getCallingConv());
3411
3412 LValue LV;
3413 if (VD->getType()->isReferenceType())
3414 LV = CGF.MakeNaturalAlignRawAddrLValue(V: CallVal, T: LValType);
3415 else
3416 LV = CGF.MakeRawAddrLValue(V: CallVal, T: LValType,
3417 Alignment: CGF.getContext().getDeclAlign(D: VD));
3418 // FIXME: need setObjCGCLValueClass?
3419 return LV;
3420}
3421
3422/// Return whether the given global decl needs a VTT parameter, which it does
3423/// if it's a base constructor or destructor with virtual bases.
3424bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
3425 const CXXMethodDecl *MD = cast<CXXMethodDecl>(Val: GD.getDecl());
3426
3427 // We don't have any virtual bases, just return early.
3428 if (!MD->getParent()->getNumVBases())
3429 return false;
3430
3431 // Check if we have a base constructor.
3432 if (isa<CXXConstructorDecl>(Val: MD) && GD.getCtorType() == Ctor_Base)
3433 return true;
3434
3435 // Check if we have a base destructor.
3436 if (isa<CXXDestructorDecl>(Val: MD) && GD.getDtorType() == Dtor_Base)
3437 return true;
3438
3439 return false;
3440}
3441
3442llvm::Constant *
3443ItaniumCXXABI::getOrCreateVirtualFunctionPointerThunk(const CXXMethodDecl *MD) {
3444 SmallString<256> MethodName;
3445 llvm::raw_svector_ostream Out(MethodName);
3446 getMangleContext().mangleCXXName(GD: MD, Out);
3447 MethodName += "_vfpthunk_";
3448 StringRef ThunkName = MethodName.str();
3449 llvm::Function *ThunkFn;
3450 if ((ThunkFn = cast_or_null<llvm::Function>(
3451 Val: CGM.getModule().getNamedValue(Name: ThunkName))))
3452 return ThunkFn;
3453
3454 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeCXXMethodDeclaration(MD);
3455 llvm::FunctionType *ThunkTy = CGM.getTypes().GetFunctionType(Info: FnInfo);
3456 llvm::GlobalValue::LinkageTypes Linkage =
3457 MD->isExternallyVisible() ? llvm::GlobalValue::LinkOnceODRLinkage
3458 : llvm::GlobalValue::InternalLinkage;
3459 ThunkFn =
3460 llvm::Function::Create(Ty: ThunkTy, Linkage, N: ThunkName, M: &CGM.getModule());
3461 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
3462 ThunkFn->setVisibility(llvm::GlobalValue::HiddenVisibility);
3463 assert(ThunkFn->getName() == ThunkName && "name was uniqued!");
3464
3465 CGM.SetLLVMFunctionAttributes(GD: MD, Info: FnInfo, F: ThunkFn, /*IsThunk=*/true);
3466 CGM.SetLLVMFunctionAttributesForDefinition(D: MD, F: ThunkFn);
3467
3468 // Stack protection sometimes gets inserted after the musttail call.
3469 ThunkFn->removeFnAttr(Kind: llvm::Attribute::StackProtect);
3470 ThunkFn->removeFnAttr(Kind: llvm::Attribute::StackProtectStrong);
3471 ThunkFn->removeFnAttr(Kind: llvm::Attribute::StackProtectReq);
3472
3473 // Start codegen.
3474 CodeGenFunction CGF(CGM);
3475 CGF.CurGD = GlobalDecl(MD);
3476 CGF.CurFuncIsThunk = true;
3477
3478 // Build FunctionArgs.
3479 FunctionArgList FunctionArgs;
3480 CGF.BuildFunctionArgList(GD: CGF.CurGD, Args&: FunctionArgs);
3481
3482 CGF.StartFunction(GD: GlobalDecl(), RetTy: FnInfo.getReturnType(), Fn: ThunkFn, FnInfo,
3483 Args: FunctionArgs, Loc: MD->getLocation(), StartLoc: SourceLocation());
3484 llvm::Value *ThisVal = loadIncomingCXXThis(CGF);
3485 setCXXABIThisValue(CGF, ThisPtr: ThisVal);
3486
3487 CallArgList CallArgs;
3488 for (const VarDecl *VD : FunctionArgs)
3489 CGF.EmitDelegateCallArg(args&: CallArgs, param: VD, loc: SourceLocation());
3490
3491 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
3492 RequiredArgs Required = RequiredArgs::forPrototypePlus(prototype: FPT, /*this*/ additional: 1);
3493 const CGFunctionInfo &CallInfo =
3494 CGM.getTypes().arrangeCXXMethodCall(args: CallArgs, type: FPT, required: Required, numPrefixArgs: 0);
3495 CGCallee Callee = CGCallee::forVirtual(CE: nullptr, MD: GlobalDecl(MD),
3496 Addr: getThisAddress(CGF), FTy: ThunkTy);
3497 llvm::CallBase *CallOrInvoke;
3498 CGF.EmitCall(CallInfo, Callee, ReturnValue: ReturnValueSlot(), Args: CallArgs, CallOrInvoke: &CallOrInvoke,
3499 /*IsMustTail=*/true, Loc: SourceLocation(), IsVirtualFunctionPointerThunk: true);
3500 auto *Call = cast<llvm::CallInst>(Val: CallOrInvoke);
3501 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
3502 if (Call->getType()->isVoidTy())
3503 CGF.Builder.CreateRetVoid();
3504 else
3505 CGF.Builder.CreateRet(V: Call);
3506
3507 // Finish the function to maintain CodeGenFunction invariants.
3508 // FIXME: Don't emit unreachable code.
3509 CGF.EmitBlock(BB: CGF.createBasicBlock());
3510 CGF.FinishFunction();
3511 return ThunkFn;
3512}
3513
3514namespace {
3515class ItaniumRTTIBuilder {
3516 CodeGenModule &CGM; // Per-module state.
3517 llvm::LLVMContext &VMContext;
3518 const ItaniumCXXABI &CXXABI; // Per-module state.
3519
3520 /// Fields - The fields of the RTTI descriptor currently being built.
3521 SmallVector<llvm::Constant *, 16> Fields;
3522
3523 /// GetAddrOfTypeName - Returns the mangled type name of the given type.
3524 llvm::GlobalVariable *
3525 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
3526
3527 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
3528 /// descriptor of the given type.
3529 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
3530
3531 /// BuildVTablePointer - Build the vtable pointer for the given type.
3532 void BuildVTablePointer(const Type *Ty, llvm::Constant *StorageAddress);
3533
3534 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3535 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
3536 void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
3537
3538 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3539 /// classes with bases that do not satisfy the abi::__si_class_type_info
3540 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3541 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
3542
3543 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
3544 /// for pointer types.
3545 void BuildPointerTypeInfo(QualType PointeeTy);
3546
3547 /// BuildObjCObjectTypeInfo - Build the appropriate kind of
3548 /// type_info for an object type.
3549 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
3550
3551 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3552 /// struct, used for member pointer types.
3553 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
3554
3555public:
3556 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
3557 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
3558
3559 // Pointer type info flags.
3560 enum {
3561 /// PTI_Const - Type has const qualifier.
3562 PTI_Const = 0x1,
3563
3564 /// PTI_Volatile - Type has volatile qualifier.
3565 PTI_Volatile = 0x2,
3566
3567 /// PTI_Restrict - Type has restrict qualifier.
3568 PTI_Restrict = 0x4,
3569
3570 /// PTI_Incomplete - Type is incomplete.
3571 PTI_Incomplete = 0x8,
3572
3573 /// PTI_ContainingClassIncomplete - Containing class is incomplete.
3574 /// (in pointer to member).
3575 PTI_ContainingClassIncomplete = 0x10,
3576
3577 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
3578 //PTI_TransactionSafe = 0x20,
3579
3580 /// PTI_Noexcept - Pointee is noexcept function (C++1z).
3581 PTI_Noexcept = 0x40,
3582 };
3583
3584 // VMI type info flags.
3585 enum {
3586 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
3587 VMI_NonDiamondRepeat = 0x1,
3588
3589 /// VMI_DiamondShaped - Class is diamond shaped.
3590 VMI_DiamondShaped = 0x2
3591 };
3592
3593 // Base class type info flags.
3594 enum {
3595 /// BCTI_Virtual - Base class is virtual.
3596 BCTI_Virtual = 0x1,
3597
3598 /// BCTI_Public - Base class is public.
3599 BCTI_Public = 0x2
3600 };
3601
3602 /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
3603 /// link to an existing RTTI descriptor if one already exists.
3604 llvm::Constant *BuildTypeInfo(QualType Ty);
3605
3606 /// BuildTypeInfo - Build the RTTI type info struct for the given type.
3607 llvm::Constant *BuildTypeInfo(
3608 QualType Ty,
3609 llvm::GlobalVariable::LinkageTypes Linkage,
3610 llvm::GlobalValue::VisibilityTypes Visibility,
3611 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
3612};
3613}
3614
3615llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
3616 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
3617 SmallString<256> Name;
3618 llvm::raw_svector_ostream Out(Name);
3619 CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(T: Ty, Out);
3620
3621 // We know that the mangled name of the type starts at index 4 of the
3622 // mangled name of the typename, so we can just index into it in order to
3623 // get the mangled name of the type.
3624 llvm::Constant *Init;
3625 if (CGM.getTriple().isOSzOS()) {
3626 // On z/OS, typename is stored as 2 encodings: EBCDIC followed by ASCII.
3627 SmallString<256> DualEncodedName;
3628 llvm::ConverterEBCDIC::convertToEBCDIC(Source: Name.substr(Start: 4), Result&: DualEncodedName);
3629 DualEncodedName += '\0';
3630 DualEncodedName += Name.substr(Start: 4);
3631 Init = llvm::ConstantDataArray::getString(Context&: VMContext, Initializer: DualEncodedName);
3632 } else
3633 Init = llvm::ConstantDataArray::getString(Context&: VMContext, Initializer: Name.substr(Start: 4));
3634
3635 auto Align = CGM.getContext().getTypeAlignInChars(T: CGM.getContext().CharTy);
3636
3637 llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
3638 Name, Ty: Init->getType(), Linkage, Alignment: Align.getAsAlign());
3639
3640 GV->setInitializer(Init);
3641
3642 return GV;
3643}
3644
3645llvm::Constant *
3646ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
3647 // Mangle the RTTI name.
3648 SmallString<256> Name;
3649 llvm::raw_svector_ostream Out(Name);
3650 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(T: Ty, Out);
3651
3652 // Look for an existing global.
3653 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
3654
3655 if (!GV) {
3656 // Create a new global variable.
3657 // Note for the future: If we would ever like to do deferred emission of
3658 // RTTI, check if emitting vtables opportunistically need any adjustment.
3659
3660 GV = new llvm::GlobalVariable(
3661 CGM.getModule(), CGM.GlobalsInt8PtrTy,
3662 /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr, Name);
3663 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
3664 CGM.setGVProperties(GV, D: RD);
3665 // Import the typeinfo symbol when all non-inline virtual methods are
3666 // imported.
3667 if (CGM.getTarget().hasPS4DLLImportExport()) {
3668 if (RD && CXXRecordNonInlineHasAttr<DLLImportAttr>(RD)) {
3669 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
3670 CGM.setDSOLocal(GV);
3671 }
3672 }
3673 }
3674
3675 return GV;
3676}
3677
3678/// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
3679/// info for that type is defined in the standard library.
3680static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
3681 // Itanium C++ ABI 2.9.2:
3682 // Basic type information (e.g. for "int", "bool", etc.) will be kept in
3683 // the run-time support library. Specifically, the run-time support
3684 // library should contain type_info objects for the types X, X* and
3685 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
3686 // unsigned char, signed char, short, unsigned short, int, unsigned int,
3687 // long, unsigned long, long long, unsigned long long, float, double,
3688 // long double, char16_t, char32_t, and the IEEE 754r decimal and
3689 // half-precision floating point types.
3690 //
3691 // GCC also emits RTTI for __int128.
3692 // FIXME: We do not emit RTTI information for decimal types here.
3693
3694 // Types added here must also be added to EmitFundamentalRTTIDescriptors.
3695 switch (Ty->getKind()) {
3696 case BuiltinType::Void:
3697 case BuiltinType::NullPtr:
3698 case BuiltinType::Bool:
3699 case BuiltinType::WChar_S:
3700 case BuiltinType::WChar_U:
3701 case BuiltinType::Char_U:
3702 case BuiltinType::Char_S:
3703 case BuiltinType::UChar:
3704 case BuiltinType::SChar:
3705 case BuiltinType::Short:
3706 case BuiltinType::UShort:
3707 case BuiltinType::Int:
3708 case BuiltinType::UInt:
3709 case BuiltinType::Long:
3710 case BuiltinType::ULong:
3711 case BuiltinType::LongLong:
3712 case BuiltinType::ULongLong:
3713 case BuiltinType::Half:
3714 case BuiltinType::Float:
3715 case BuiltinType::Double:
3716 case BuiltinType::LongDouble:
3717 case BuiltinType::Float16:
3718 case BuiltinType::Float128:
3719 case BuiltinType::Ibm128:
3720 case BuiltinType::Char8:
3721 case BuiltinType::Char16:
3722 case BuiltinType::Char32:
3723 case BuiltinType::Int128:
3724 case BuiltinType::UInt128:
3725 return true;
3726
3727#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3728 case BuiltinType::Id:
3729#include "clang/Basic/OpenCLImageTypes.def"
3730#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3731 case BuiltinType::Id:
3732#include "clang/Basic/OpenCLExtensionTypes.def"
3733 case BuiltinType::OCLSampler:
3734 case BuiltinType::OCLEvent:
3735 case BuiltinType::OCLClkEvent:
3736 case BuiltinType::OCLQueue:
3737 case BuiltinType::OCLReserveID:
3738#define SVE_TYPE(Name, Id, SingletonId) \
3739 case BuiltinType::Id:
3740#include "clang/Basic/AArch64ACLETypes.def"
3741#define PPC_VECTOR_TYPE(Name, Id, Size) \
3742 case BuiltinType::Id:
3743#include "clang/Basic/PPCTypes.def"
3744#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3745#include "clang/Basic/RISCVVTypes.def"
3746#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3747#include "clang/Basic/WebAssemblyReferenceTypes.def"
3748#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id:
3749#include "clang/Basic/AMDGPUTypes.def"
3750#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3751#include "clang/Basic/HLSLIntangibleTypes.def"
3752 case BuiltinType::ShortAccum:
3753 case BuiltinType::Accum:
3754 case BuiltinType::LongAccum:
3755 case BuiltinType::UShortAccum:
3756 case BuiltinType::UAccum:
3757 case BuiltinType::ULongAccum:
3758 case BuiltinType::ShortFract:
3759 case BuiltinType::Fract:
3760 case BuiltinType::LongFract:
3761 case BuiltinType::UShortFract:
3762 case BuiltinType::UFract:
3763 case BuiltinType::ULongFract:
3764 case BuiltinType::SatShortAccum:
3765 case BuiltinType::SatAccum:
3766 case BuiltinType::SatLongAccum:
3767 case BuiltinType::SatUShortAccum:
3768 case BuiltinType::SatUAccum:
3769 case BuiltinType::SatULongAccum:
3770 case BuiltinType::SatShortFract:
3771 case BuiltinType::SatFract:
3772 case BuiltinType::SatLongFract:
3773 case BuiltinType::SatUShortFract:
3774 case BuiltinType::SatUFract:
3775 case BuiltinType::SatULongFract:
3776 case BuiltinType::BFloat16:
3777 return false;
3778
3779 case BuiltinType::Dependent:
3780#define BUILTIN_TYPE(Id, SingletonId)
3781#define PLACEHOLDER_TYPE(Id, SingletonId) \
3782 case BuiltinType::Id:
3783#include "clang/AST/BuiltinTypes.def"
3784 llvm_unreachable("asking for RRTI for a placeholder type!");
3785
3786 case BuiltinType::ObjCId:
3787 case BuiltinType::ObjCClass:
3788 case BuiltinType::ObjCSel:
3789 llvm_unreachable("FIXME: Objective-C types are unsupported!");
3790 }
3791
3792 llvm_unreachable("Invalid BuiltinType Kind!");
3793}
3794
3795static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3796 QualType PointeeTy = PointerTy->getPointeeType();
3797 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Val&: PointeeTy);
3798 if (!BuiltinTy)
3799 return false;
3800
3801 // Check the qualifiers.
3802 Qualifiers Quals = PointeeTy.getQualifiers();
3803 Quals.removeConst();
3804
3805 if (!Quals.empty())
3806 return false;
3807
3808 return TypeInfoIsInStandardLibrary(Ty: BuiltinTy);
3809}
3810
3811/// IsStandardLibraryRTTIDescriptor - Returns whether the type
3812/// information for the given type exists in the standard library.
3813static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
3814 // Type info for builtin types is defined in the standard library.
3815 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Val&: Ty))
3816 return TypeInfoIsInStandardLibrary(Ty: BuiltinTy);
3817
3818 // Type info for some pointer types to builtin types is defined in the
3819 // standard library.
3820 if (const PointerType *PointerTy = dyn_cast<PointerType>(Val&: Ty))
3821 return TypeInfoIsInStandardLibrary(PointerTy);
3822
3823 return false;
3824}
3825
3826/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3827/// the given type exists somewhere else, and that we should not emit the type
3828/// information in this translation unit. Assumes that it is not a
3829/// standard-library type.
3830static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
3831 QualType Ty) {
3832 ASTContext &Context = CGM.getContext();
3833
3834 // If RTTI is disabled, assume it might be disabled in the
3835 // translation unit that defines any potential key function, too.
3836 if (!Context.getLangOpts().RTTI) return false;
3837
3838 if (const RecordType *RecordTy = dyn_cast<RecordType>(Val&: Ty)) {
3839 const CXXRecordDecl *RD =
3840 cast<CXXRecordDecl>(Val: RecordTy->getDecl())->getDefinitionOrSelf();
3841 if (!RD->hasDefinition())
3842 return false;
3843
3844 if (!RD->isDynamicClass())
3845 return false;
3846
3847 // FIXME: this may need to be reconsidered if the key function
3848 // changes.
3849 // N.B. We must always emit the RTTI data ourselves if there exists a key
3850 // function.
3851 bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3852
3853 // Don't import the RTTI but emit it locally.
3854 if (CGM.getTriple().isOSCygMing())
3855 return false;
3856
3857 if (CGM.getVTables().isVTableExternal(RD)) {
3858 if (CGM.getTarget().hasPS4DLLImportExport())
3859 return true;
3860
3861 return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
3862 ? false
3863 : true;
3864 }
3865 if (IsDLLImport)
3866 return true;
3867 }
3868
3869 return false;
3870}
3871
3872/// IsIncompleteClassType - Returns whether the given record type is incomplete.
3873static bool IsIncompleteClassType(const RecordType *RecordTy) {
3874 return !RecordTy->getDecl()->getDefinitionOrSelf()->isCompleteDefinition();
3875}
3876
3877/// ContainsIncompleteClassType - Returns whether the given type contains an
3878/// incomplete class type. This is true if
3879///
3880/// * The given type is an incomplete class type.
3881/// * The given type is a pointer type whose pointee type contains an
3882/// incomplete class type.
3883/// * The given type is a member pointer type whose class is an incomplete
3884/// class type.
3885/// * The given type is a member pointer type whoise pointee type contains an
3886/// incomplete class type.
3887/// is an indirect or direct pointer to an incomplete class type.
3888static bool ContainsIncompleteClassType(QualType Ty) {
3889 if (const RecordType *RecordTy = dyn_cast<RecordType>(Val&: Ty)) {
3890 if (IsIncompleteClassType(RecordTy))
3891 return true;
3892 }
3893
3894 if (const PointerType *PointerTy = dyn_cast<PointerType>(Val&: Ty))
3895 return ContainsIncompleteClassType(Ty: PointerTy->getPointeeType());
3896
3897 if (const MemberPointerType *MemberPointerTy =
3898 dyn_cast<MemberPointerType>(Val&: Ty)) {
3899 // Check if the class type is incomplete.
3900 if (!MemberPointerTy->getMostRecentCXXRecordDecl()->hasDefinition())
3901 return true;
3902
3903 return ContainsIncompleteClassType(Ty: MemberPointerTy->getPointeeType());
3904 }
3905
3906 return false;
3907}
3908
3909// CanUseSingleInheritance - Return whether the given record decl has a "single,
3910// public, non-virtual base at offset zero (i.e. the derived class is dynamic
3911// iff the base is)", according to Itanium C++ ABI, 2.95p6b.
3912static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
3913 // Check the number of bases.
3914 if (RD->getNumBases() != 1)
3915 return false;
3916
3917 // Get the base.
3918 CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
3919
3920 // Check that the base is not virtual.
3921 if (Base->isVirtual())
3922 return false;
3923
3924 // Check that the base is public.
3925 if (Base->getAccessSpecifier() != AS_public)
3926 return false;
3927
3928 // Check that the class is dynamic iff the base is.
3929 auto *BaseDecl = Base->getType()->castAsCXXRecordDecl();
3930 if (!BaseDecl->isEmpty() &&
3931 BaseDecl->isDynamicClass() != RD->isDynamicClass())
3932 return false;
3933
3934 return true;
3935}
3936
3937void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty,
3938 llvm::Constant *StorageAddress) {
3939 // abi::__class_type_info.
3940 static const char * const ClassTypeInfo =
3941 "_ZTVN10__cxxabiv117__class_type_infoE";
3942 // abi::__si_class_type_info.
3943 static const char * const SIClassTypeInfo =
3944 "_ZTVN10__cxxabiv120__si_class_type_infoE";
3945 // abi::__vmi_class_type_info.
3946 static const char * const VMIClassTypeInfo =
3947 "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3948
3949 const char *VTableName = nullptr;
3950
3951 switch (Ty->getTypeClass()) {
3952#define TYPE(Class, Base)
3953#define ABSTRACT_TYPE(Class, Base)
3954#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3955#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3956#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3957#include "clang/AST/TypeNodes.inc"
3958 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3959
3960 case Type::LValueReference:
3961 case Type::RValueReference:
3962 llvm_unreachable("References shouldn't get here");
3963
3964 case Type::Auto:
3965 case Type::DeducedTemplateSpecialization:
3966 llvm_unreachable("Undeduced type shouldn't get here");
3967
3968 case Type::Pipe:
3969 llvm_unreachable("Pipe types shouldn't get here");
3970
3971 case Type::ArrayParameter:
3972 llvm_unreachable("Array Parameter types should not get here.");
3973
3974 case Type::Builtin:
3975 case Type::BitInt:
3976 case Type::OverflowBehavior:
3977 // GCC treats vector and complex types as fundamental types.
3978 case Type::Vector:
3979 case Type::ExtVector:
3980 case Type::ConstantMatrix:
3981 case Type::Complex:
3982 case Type::Atomic:
3983 // FIXME: GCC treats block pointers as fundamental types?!
3984 case Type::BlockPointer:
3985 // abi::__fundamental_type_info.
3986 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3987 break;
3988
3989 case Type::ConstantArray:
3990 case Type::IncompleteArray:
3991 case Type::VariableArray:
3992 // abi::__array_type_info.
3993 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3994 break;
3995
3996 case Type::FunctionNoProto:
3997 case Type::FunctionProto:
3998 // abi::__function_type_info.
3999 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
4000 break;
4001
4002 case Type::Enum:
4003 // abi::__enum_type_info.
4004 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
4005 break;
4006
4007 case Type::Record: {
4008 const auto *RD = cast<CXXRecordDecl>(Val: cast<RecordType>(Val: Ty)->getDecl())
4009 ->getDefinitionOrSelf();
4010
4011 if (!RD->hasDefinition() || !RD->getNumBases()) {
4012 VTableName = ClassTypeInfo;
4013 } else if (CanUseSingleInheritance(RD)) {
4014 VTableName = SIClassTypeInfo;
4015 } else {
4016 VTableName = VMIClassTypeInfo;
4017 }
4018
4019 break;
4020 }
4021
4022 case Type::ObjCObject:
4023 // Ignore protocol qualifiers.
4024 Ty = cast<ObjCObjectType>(Val: Ty)->getBaseType().getTypePtr();
4025
4026 // Handle id and Class.
4027 if (isa<BuiltinType>(Val: Ty)) {
4028 VTableName = ClassTypeInfo;
4029 break;
4030 }
4031
4032 assert(isa<ObjCInterfaceType>(Ty));
4033 [[fallthrough]];
4034
4035 case Type::ObjCInterface:
4036 if (cast<ObjCInterfaceType>(Val: Ty)->getDecl()->getSuperClass()) {
4037 VTableName = SIClassTypeInfo;
4038 } else {
4039 VTableName = ClassTypeInfo;
4040 }
4041 break;
4042
4043 case Type::ObjCObjectPointer:
4044 case Type::Pointer:
4045 // abi::__pointer_type_info.
4046 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
4047 break;
4048
4049 case Type::MemberPointer:
4050 // abi::__pointer_to_member_type_info.
4051 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
4052 break;
4053
4054 case Type::HLSLAttributedResource:
4055 case Type::HLSLInlineSpirv:
4056 llvm_unreachable("HLSL doesn't support virtual functions");
4057 }
4058
4059 llvm::Constant *VTable = nullptr;
4060
4061 // Check if the alias exists. If it doesn't, then get or create the global.
4062 if (CGM.getItaniumVTableContext().isRelativeLayout())
4063 VTable = CGM.getModule().getNamedAlias(Name: VTableName);
4064 if (!VTable) {
4065 llvm::Type *Ty = llvm::ArrayType::get(ElementType: CGM.GlobalsInt8PtrTy, NumElements: 0);
4066 VTable = CGM.getModule().getOrInsertGlobal(Name: VTableName, Ty);
4067 }
4068
4069 CGM.setDSOLocal(cast<llvm::GlobalValue>(Val: VTable->stripPointerCasts()));
4070
4071 llvm::Type *PtrDiffTy =
4072 CGM.getTypes().ConvertType(T: CGM.getContext().getPointerDiffType());
4073
4074 // The vtable address point is 2.
4075 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
4076 // The vtable address point is 8 bytes after its start:
4077 // 4 for the offset to top + 4 for the relative offset to rtti.
4078 llvm::Constant *Eight = llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: 8);
4079 VTable = llvm::ConstantExpr::getInBoundsPtrAdd(Ptr: VTable, Offset: Eight);
4080 } else {
4081 llvm::Constant *Two = llvm::ConstantInt::get(Ty: PtrDiffTy, V: 2);
4082 VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(Ty: CGM.GlobalsInt8PtrTy,
4083 C: VTable, Idx: Two);
4084 }
4085
4086 if (const auto &Schema =
4087 CGM.getCodeGenOpts().PointerAuth.CXXTypeInfoVTablePointer)
4088 VTable = CGM.getConstantSignedPointer(
4089 Pointer: VTable, Schema,
4090 StorageAddress: Schema.isAddressDiscriminated() ? StorageAddress : nullptr,
4091 SchemaDecl: GlobalDecl(), SchemaType: QualType(Ty, 0));
4092
4093 Fields.push_back(Elt: VTable);
4094}
4095
4096/// Return the linkage that the type info and type info name constants
4097/// should have for the given type.
4098static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
4099 QualType Ty) {
4100 // Itanium C++ ABI 2.9.5p7:
4101 // In addition, it and all of the intermediate abi::__pointer_type_info
4102 // structs in the chain down to the abi::__class_type_info for the
4103 // incomplete class type must be prevented from resolving to the
4104 // corresponding type_info structs for the complete class type, possibly
4105 // by making them local static objects. Finally, a dummy class RTTI is
4106 // generated for the incomplete type that will not resolve to the final
4107 // complete class RTTI (because the latter need not exist), possibly by
4108 // making it a local static object.
4109 if (ContainsIncompleteClassType(Ty))
4110 return llvm::GlobalValue::InternalLinkage;
4111
4112 switch (Ty->getLinkage()) {
4113 case Linkage::Invalid:
4114 llvm_unreachable("Linkage hasn't been computed!");
4115
4116 case Linkage::None:
4117 case Linkage::Internal:
4118 case Linkage::UniqueExternal:
4119 return llvm::GlobalValue::InternalLinkage;
4120
4121 case Linkage::VisibleNone:
4122 case Linkage::Module:
4123 case Linkage::External:
4124 // RTTI is not enabled, which means that this type info struct is going
4125 // to be used for exception handling. Give it linkonce_odr linkage.
4126 if (!CGM.getLangOpts().RTTI)
4127 return llvm::GlobalValue::LinkOnceODRLinkage;
4128
4129 if (const RecordType *Record = dyn_cast<RecordType>(Val&: Ty)) {
4130 const auto *RD =
4131 cast<CXXRecordDecl>(Val: Record->getDecl())->getDefinitionOrSelf();
4132 if (RD->hasAttr<WeakAttr>())
4133 return llvm::GlobalValue::WeakODRLinkage;
4134 if (CGM.getTriple().isWindowsItaniumEnvironment())
4135 if (RD->hasAttr<DLLImportAttr>() &&
4136 ShouldUseExternalRTTIDescriptor(CGM, Ty))
4137 return llvm::GlobalValue::ExternalLinkage;
4138 // MinGW always uses LinkOnceODRLinkage for type info.
4139 if (RD->isDynamicClass() &&
4140 !CGM.getContext().getTargetInfo().getTriple().isOSCygMing())
4141 return CGM.getVTableLinkage(RD);
4142 }
4143
4144 return llvm::GlobalValue::LinkOnceODRLinkage;
4145 }
4146
4147 llvm_unreachable("Invalid linkage!");
4148}
4149
4150llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
4151 // We want to operate on the canonical type.
4152 Ty = Ty.getCanonicalType();
4153
4154 // Check if we've already emitted an RTTI descriptor for this type.
4155 SmallString<256> Name;
4156 llvm::raw_svector_ostream Out(Name);
4157 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(T: Ty, Out);
4158
4159 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
4160 if (OldGV && !OldGV->isDeclaration()) {
4161 assert(!OldGV->hasAvailableExternallyLinkage() &&
4162 "available_externally typeinfos not yet implemented");
4163
4164 return OldGV;
4165 }
4166
4167 // Check if there is already an external RTTI descriptor for this type.
4168 if (IsStandardLibraryRTTIDescriptor(Ty) ||
4169 ShouldUseExternalRTTIDescriptor(CGM, Ty))
4170 return GetAddrOfExternalRTTIDescriptor(Ty);
4171
4172 // Emit the standard library with external linkage.
4173 llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
4174
4175 // Give the type_info object and name the formal visibility of the
4176 // type itself.
4177 llvm::GlobalValue::VisibilityTypes llvmVisibility;
4178 if (llvm::GlobalValue::isLocalLinkage(Linkage))
4179 // If the linkage is local, only default visibility makes sense.
4180 llvmVisibility = llvm::GlobalValue::DefaultVisibility;
4181 else if (CXXABI.classifyRTTIUniqueness(CanTy: Ty, Linkage) ==
4182 ItaniumCXXABI::RUK_NonUniqueHidden)
4183 llvmVisibility = llvm::GlobalValue::HiddenVisibility;
4184 else
4185 llvmVisibility = CodeGenModule::GetLLVMVisibility(V: Ty->getVisibility());
4186
4187 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4188 llvm::GlobalValue::DefaultStorageClass;
4189 if (auto RD = Ty->getAsCXXRecordDecl()) {
4190 if ((CGM.getTriple().isWindowsItaniumEnvironment() &&
4191 RD->hasAttr<DLLExportAttr>()) ||
4192 (CGM.shouldMapVisibilityToDLLExport(D: RD) &&
4193 !llvm::GlobalValue::isLocalLinkage(Linkage) &&
4194 llvmVisibility == llvm::GlobalValue::DefaultVisibility))
4195 DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
4196 }
4197 return BuildTypeInfo(Ty, Linkage, Visibility: llvmVisibility, DLLStorageClass);
4198}
4199
4200llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
4201 QualType Ty,
4202 llvm::GlobalVariable::LinkageTypes Linkage,
4203 llvm::GlobalValue::VisibilityTypes Visibility,
4204 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
4205 SmallString<256> Name;
4206 llvm::raw_svector_ostream Out(Name);
4207 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(T: Ty, Out);
4208 llvm::Module &M = CGM.getModule();
4209 llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
4210 // int8 is an arbitrary type to be replaced later with replaceInitializer.
4211 llvm::GlobalVariable *GV =
4212 new llvm::GlobalVariable(M, CGM.Int8Ty, /*isConstant=*/true, Linkage,
4213 /*Initializer=*/nullptr, Name);
4214
4215 // Add the vtable pointer.
4216 BuildVTablePointer(Ty: cast<Type>(Val&: Ty), StorageAddress: GV);
4217
4218 // And the name.
4219 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
4220 llvm::Constant *TypeNameField;
4221
4222 // If we're supposed to demote the visibility, be sure to set a flag
4223 // to use a string comparison for type_info comparisons.
4224 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
4225 CXXABI.classifyRTTIUniqueness(CanTy: Ty, Linkage);
4226 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
4227 // The flag is the sign bit, which on ARM64 is defined to be clear
4228 // for global pointers. This is very ARM64-specific.
4229 TypeNameField = llvm::ConstantExpr::getPtrToInt(C: TypeName, Ty: CGM.Int64Ty);
4230 llvm::Constant *flag =
4231 llvm::ConstantInt::get(Ty: CGM.Int64Ty, V: ((uint64_t)1) << 63);
4232 TypeNameField = llvm::ConstantExpr::getAdd(C1: TypeNameField, C2: flag);
4233 TypeNameField =
4234 llvm::ConstantExpr::getIntToPtr(C: TypeNameField, Ty: CGM.GlobalsInt8PtrTy);
4235 } else {
4236 TypeNameField = TypeName;
4237 }
4238 Fields.push_back(Elt: TypeNameField);
4239
4240 switch (Ty->getTypeClass()) {
4241#define TYPE(Class, Base)
4242#define ABSTRACT_TYPE(Class, Base)
4243#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
4244#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
4245#define DEPENDENT_TYPE(Class, Base) case Type::Class:
4246#include "clang/AST/TypeNodes.inc"
4247 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
4248
4249 // GCC treats vector types as fundamental types.
4250 case Type::Builtin:
4251 case Type::Vector:
4252 case Type::ExtVector:
4253 case Type::ConstantMatrix:
4254 case Type::Complex:
4255 case Type::BlockPointer:
4256 // Itanium C++ ABI 2.9.5p4:
4257 // abi::__fundamental_type_info adds no data members to std::type_info.
4258 break;
4259
4260 case Type::LValueReference:
4261 case Type::RValueReference:
4262 llvm_unreachable("References shouldn't get here");
4263
4264 case Type::Auto:
4265 case Type::DeducedTemplateSpecialization:
4266 llvm_unreachable("Undeduced type shouldn't get here");
4267
4268 case Type::Pipe:
4269 break;
4270
4271 case Type::BitInt:
4272 break;
4273
4274 case Type::ConstantArray:
4275 case Type::IncompleteArray:
4276 case Type::VariableArray:
4277 case Type::ArrayParameter:
4278 // Itanium C++ ABI 2.9.5p5:
4279 // abi::__array_type_info adds no data members to std::type_info.
4280 break;
4281
4282 case Type::FunctionNoProto:
4283 case Type::FunctionProto:
4284 // Itanium C++ ABI 2.9.5p5:
4285 // abi::__function_type_info adds no data members to std::type_info.
4286 break;
4287
4288 case Type::Enum:
4289 // Itanium C++ ABI 2.9.5p5:
4290 // abi::__enum_type_info adds no data members to std::type_info.
4291 break;
4292
4293 case Type::Record: {
4294 const auto *RD = cast<CXXRecordDecl>(Val: cast<RecordType>(Val&: Ty)->getDecl())
4295 ->getDefinitionOrSelf();
4296 if (!RD->hasDefinition() || !RD->getNumBases()) {
4297 // We don't need to emit any fields.
4298 break;
4299 }
4300
4301 if (CanUseSingleInheritance(RD))
4302 BuildSIClassTypeInfo(RD);
4303 else
4304 BuildVMIClassTypeInfo(RD);
4305
4306 break;
4307 }
4308
4309 case Type::ObjCObject:
4310 case Type::ObjCInterface:
4311 BuildObjCObjectTypeInfo(Ty: cast<ObjCObjectType>(Val&: Ty));
4312 break;
4313
4314 case Type::ObjCObjectPointer:
4315 BuildPointerTypeInfo(PointeeTy: cast<ObjCObjectPointerType>(Val&: Ty)->getPointeeType());
4316 break;
4317
4318 case Type::Pointer:
4319 BuildPointerTypeInfo(PointeeTy: cast<PointerType>(Val&: Ty)->getPointeeType());
4320 break;
4321
4322 case Type::MemberPointer:
4323 BuildPointerToMemberTypeInfo(Ty: cast<MemberPointerType>(Val&: Ty));
4324 break;
4325
4326 case Type::Atomic:
4327 // No fields, at least for the moment.
4328 break;
4329
4330 case Type::OverflowBehavior:
4331 break;
4332
4333 case Type::HLSLAttributedResource:
4334 case Type::HLSLInlineSpirv:
4335 llvm_unreachable("HLSL doesn't support RTTI");
4336 }
4337
4338 GV->replaceInitializer(InitVal: llvm::ConstantStruct::getAnon(V: Fields));
4339
4340 // Export the typeinfo in the same circumstances as the vtable is exported.
4341 auto GVDLLStorageClass = DLLStorageClass;
4342 if (CGM.getTarget().hasPS4DLLImportExport() &&
4343 GVDLLStorageClass != llvm::GlobalVariable::DLLExportStorageClass) {
4344 if (const RecordType *RecordTy = dyn_cast<RecordType>(Val&: Ty)) {
4345 const auto *RD =
4346 cast<CXXRecordDecl>(Val: RecordTy->getDecl())->getDefinitionOrSelf();
4347 if (RD->hasAttr<DLLExportAttr>() ||
4348 CXXRecordNonInlineHasAttr<DLLExportAttr>(RD))
4349 GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
4350 }
4351 }
4352
4353 // If there's already an old global variable, replace it with the new one.
4354 if (OldGV) {
4355 GV->takeName(V: OldGV);
4356 OldGV->replaceAllUsesWith(V: GV);
4357 OldGV->eraseFromParent();
4358 }
4359
4360 if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
4361 GV->setComdat(M.getOrInsertComdat(Name: GV->getName()));
4362
4363 CharUnits Align = CGM.getContext().toCharUnitsFromBits(
4364 BitSize: CGM.getTarget().getPointerAlign(AddrSpace: CGM.GetGlobalVarAddressSpace(D: nullptr)));
4365 GV->setAlignment(Align.getAsAlign());
4366
4367 // The Itanium ABI specifies that type_info objects must be globally
4368 // unique, with one exception: if the type is an incomplete class
4369 // type or a (possibly indirect) pointer to one. That exception
4370 // affects the general case of comparing type_info objects produced
4371 // by the typeid operator, which is why the comparison operators on
4372 // std::type_info generally use the type_info name pointers instead
4373 // of the object addresses. However, the language's built-in uses
4374 // of RTTI generally require class types to be complete, even when
4375 // manipulating pointers to those class types. This allows the
4376 // implementation of dynamic_cast to rely on address equality tests,
4377 // which is much faster.
4378
4379 // All of this is to say that it's important that both the type_info
4380 // object and the type_info name be uniqued when weakly emitted.
4381
4382 TypeName->setVisibility(Visibility);
4383 CGM.setDSOLocal(TypeName);
4384
4385 GV->setVisibility(Visibility);
4386 CGM.setDSOLocal(GV);
4387
4388 TypeName->setDLLStorageClass(DLLStorageClass);
4389 GV->setDLLStorageClass(GVDLLStorageClass);
4390
4391 TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
4392 GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
4393
4394 return GV;
4395}
4396
4397/// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
4398/// for the given Objective-C object type.
4399void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
4400 // Drop qualifiers.
4401 const Type *T = OT->getBaseType().getTypePtr();
4402 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
4403
4404 // The builtin types are abi::__class_type_infos and don't require
4405 // extra fields.
4406 if (isa<BuiltinType>(Val: T)) return;
4407
4408 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(Val: T)->getDecl();
4409 ObjCInterfaceDecl *Super = Class->getSuperClass();
4410
4411 // Root classes are also __class_type_info.
4412 if (!Super) return;
4413
4414 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Decl: Super);
4415
4416 // Everything else is single inheritance.
4417 llvm::Constant *BaseTypeInfo =
4418 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Ty: SuperTy);
4419 Fields.push_back(Elt: BaseTypeInfo);
4420}
4421
4422/// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
4423/// inheritance, according to the Itanium C++ ABI, 2.95p6b.
4424void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
4425 // Itanium C++ ABI 2.9.5p6b:
4426 // It adds to abi::__class_type_info a single member pointing to the
4427 // type_info structure for the base type,
4428 llvm::Constant *BaseTypeInfo =
4429 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Ty: RD->bases_begin()->getType());
4430 Fields.push_back(Elt: BaseTypeInfo);
4431}
4432
4433namespace {
4434 /// SeenBases - Contains virtual and non-virtual bases seen when traversing
4435 /// a class hierarchy.
4436 struct SeenBases {
4437 llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
4438 llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
4439 };
4440}
4441
4442/// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
4443/// abi::__vmi_class_type_info.
4444///
4445static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
4446 SeenBases &Bases) {
4447
4448 unsigned Flags = 0;
4449
4450 auto *BaseDecl = Base->getType()->castAsCXXRecordDecl();
4451 if (Base->isVirtual()) {
4452 // Mark the virtual base as seen.
4453 if (!Bases.VirtualBases.insert(Ptr: BaseDecl).second) {
4454 // If this virtual base has been seen before, then the class is diamond
4455 // shaped.
4456 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
4457 } else {
4458 if (Bases.NonVirtualBases.count(Ptr: BaseDecl))
4459 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4460 }
4461 } else {
4462 // Mark the non-virtual base as seen.
4463 if (!Bases.NonVirtualBases.insert(Ptr: BaseDecl).second) {
4464 // If this non-virtual base has been seen before, then the class has non-
4465 // diamond shaped repeated inheritance.
4466 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4467 } else {
4468 if (Bases.VirtualBases.count(Ptr: BaseDecl))
4469 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4470 }
4471 }
4472
4473 // Walk all bases.
4474 for (const auto &I : BaseDecl->bases())
4475 Flags |= ComputeVMIClassTypeInfoFlags(Base: &I, Bases);
4476
4477 return Flags;
4478}
4479
4480static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
4481 unsigned Flags = 0;
4482 SeenBases Bases;
4483
4484 // Walk all bases.
4485 for (const auto &I : RD->bases())
4486 Flags |= ComputeVMIClassTypeInfoFlags(Base: &I, Bases);
4487
4488 return Flags;
4489}
4490
4491/// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
4492/// classes with bases that do not satisfy the abi::__si_class_type_info
4493/// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
4494void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
4495 llvm::Type *UnsignedIntLTy =
4496 CGM.getTypes().ConvertType(T: CGM.getContext().UnsignedIntTy);
4497
4498 // Itanium C++ ABI 2.9.5p6c:
4499 // __flags is a word with flags describing details about the class
4500 // structure, which may be referenced by using the __flags_masks
4501 // enumeration. These flags refer to both direct and indirect bases.
4502 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
4503 Fields.push_back(Elt: llvm::ConstantInt::get(Ty: UnsignedIntLTy, V: Flags));
4504
4505 // Itanium C++ ABI 2.9.5p6c:
4506 // __base_count is a word with the number of direct proper base class
4507 // descriptions that follow.
4508 Fields.push_back(Elt: llvm::ConstantInt::get(Ty: UnsignedIntLTy, V: RD->getNumBases()));
4509
4510 if (!RD->getNumBases())
4511 return;
4512
4513 // Now add the base class descriptions.
4514
4515 // Itanium C++ ABI 2.9.5p6c:
4516 // __base_info[] is an array of base class descriptions -- one for every
4517 // direct proper base. Each description is of the type:
4518 //
4519 // struct abi::__base_class_type_info {
4520 // public:
4521 // const __class_type_info *__base_type;
4522 // long __offset_flags;
4523 //
4524 // enum __offset_flags_masks {
4525 // __virtual_mask = 0x1,
4526 // __public_mask = 0x2,
4527 // __offset_shift = 8
4528 // };
4529 // };
4530
4531 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
4532 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
4533 // LLP64 platforms.
4534 // FIXME: Consider updating libc++abi to match, and extend this logic to all
4535 // LLP64 platforms.
4536 QualType OffsetFlagsTy = CGM.getContext().LongTy;
4537 const TargetInfo &TI = CGM.getContext().getTargetInfo();
4538 if (TI.getTriple().isOSCygMing() &&
4539 TI.getPointerWidth(AddrSpace: LangAS::Default) > TI.getLongWidth())
4540 OffsetFlagsTy = CGM.getContext().LongLongTy;
4541 llvm::Type *OffsetFlagsLTy =
4542 CGM.getTypes().ConvertType(T: OffsetFlagsTy);
4543
4544 for (const auto &Base : RD->bases()) {
4545 // The __base_type member points to the RTTI for the base type.
4546 Fields.push_back(Elt: ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Ty: Base.getType()));
4547
4548 auto *BaseDecl = Base.getType()->castAsCXXRecordDecl();
4549 int64_t OffsetFlags = 0;
4550
4551 // All but the lower 8 bits of __offset_flags are a signed offset.
4552 // For a non-virtual base, this is the offset in the object of the base
4553 // subobject. For a virtual base, this is the offset in the virtual table of
4554 // the virtual base offset for the virtual base referenced (negative).
4555 CharUnits Offset;
4556 if (Base.isVirtual())
4557 Offset =
4558 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, VBase: BaseDecl);
4559 else {
4560 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(D: RD);
4561 Offset = Layout.getBaseClassOffset(Base: BaseDecl);
4562 };
4563
4564 OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
4565
4566 // The low-order byte of __offset_flags contains flags, as given by the
4567 // masks from the enumeration __offset_flags_masks.
4568 if (Base.isVirtual())
4569 OffsetFlags |= BCTI_Virtual;
4570 if (Base.getAccessSpecifier() == AS_public)
4571 OffsetFlags |= BCTI_Public;
4572
4573 Fields.push_back(Elt: llvm::ConstantInt::getSigned(Ty: OffsetFlagsLTy, V: OffsetFlags));
4574 }
4575}
4576
4577/// Compute the flags for a __pbase_type_info, and remove the corresponding
4578/// pieces from \p Type.
4579static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
4580 unsigned Flags = 0;
4581
4582 if (Type.isConstQualified())
4583 Flags |= ItaniumRTTIBuilder::PTI_Const;
4584 if (Type.isVolatileQualified())
4585 Flags |= ItaniumRTTIBuilder::PTI_Volatile;
4586 if (Type.isRestrictQualified())
4587 Flags |= ItaniumRTTIBuilder::PTI_Restrict;
4588 Type = Type.getUnqualifiedType();
4589
4590 // Itanium C++ ABI 2.9.5p7:
4591 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
4592 // incomplete class type, the incomplete target type flag is set.
4593 if (ContainsIncompleteClassType(Ty: Type))
4594 Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
4595
4596 if (auto *Proto = Type->getAs<FunctionProtoType>()) {
4597 if (Proto->isNothrow()) {
4598 Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
4599 Type = Ctx.getFunctionTypeWithExceptionSpec(Orig: Type, ESI: EST_None);
4600 }
4601 }
4602
4603 return Flags;
4604}
4605
4606/// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
4607/// used for pointer types.
4608void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
4609 // Itanium C++ ABI 2.9.5p7:
4610 // __flags is a flag word describing the cv-qualification and other
4611 // attributes of the type pointed to
4612 unsigned Flags = extractPBaseFlags(Ctx&: CGM.getContext(), Type&: PointeeTy);
4613
4614 llvm::Type *UnsignedIntLTy =
4615 CGM.getTypes().ConvertType(T: CGM.getContext().UnsignedIntTy);
4616 Fields.push_back(Elt: llvm::ConstantInt::get(Ty: UnsignedIntLTy, V: Flags));
4617
4618 // Itanium C++ ABI 2.9.5p7:
4619 // __pointee is a pointer to the std::type_info derivation for the
4620 // unqualified type being pointed to.
4621 llvm::Constant *PointeeTypeInfo =
4622 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Ty: PointeeTy);
4623 Fields.push_back(Elt: PointeeTypeInfo);
4624}
4625
4626/// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
4627/// struct, used for member pointer types.
4628void
4629ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
4630 QualType PointeeTy = Ty->getPointeeType();
4631
4632 // Itanium C++ ABI 2.9.5p7:
4633 // __flags is a flag word describing the cv-qualification and other
4634 // attributes of the type pointed to.
4635 unsigned Flags = extractPBaseFlags(Ctx&: CGM.getContext(), Type&: PointeeTy);
4636
4637 const auto *RD = Ty->getMostRecentCXXRecordDecl();
4638 if (!RD->hasDefinition())
4639 Flags |= PTI_ContainingClassIncomplete;
4640
4641 llvm::Type *UnsignedIntLTy =
4642 CGM.getTypes().ConvertType(T: CGM.getContext().UnsignedIntTy);
4643 Fields.push_back(Elt: llvm::ConstantInt::get(Ty: UnsignedIntLTy, V: Flags));
4644
4645 // Itanium C++ ABI 2.9.5p7:
4646 // __pointee is a pointer to the std::type_info derivation for the
4647 // unqualified type being pointed to.
4648 llvm::Constant *PointeeTypeInfo =
4649 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Ty: PointeeTy);
4650 Fields.push_back(Elt: PointeeTypeInfo);
4651
4652 // Itanium C++ ABI 2.9.5p9:
4653 // __context is a pointer to an abi::__class_type_info corresponding to the
4654 // class type containing the member pointed to
4655 // (e.g., the "A" in "int A::*").
4656 CanQualType T = CGM.getContext().getCanonicalTagType(TD: RD);
4657 Fields.push_back(Elt: ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Ty: T));
4658}
4659
4660llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
4661 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
4662}
4663
4664void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
4665 // Types added here must also be added to TypeInfoIsInStandardLibrary.
4666 QualType FundamentalTypes[] = {
4667 getContext().VoidTy, getContext().NullPtrTy,
4668 getContext().BoolTy, getContext().WCharTy,
4669 getContext().CharTy, getContext().UnsignedCharTy,
4670 getContext().SignedCharTy, getContext().ShortTy,
4671 getContext().UnsignedShortTy, getContext().IntTy,
4672 getContext().UnsignedIntTy, getContext().LongTy,
4673 getContext().UnsignedLongTy, getContext().LongLongTy,
4674 getContext().UnsignedLongLongTy, getContext().Int128Ty,
4675 getContext().UnsignedInt128Ty, getContext().HalfTy,
4676 getContext().FloatTy, getContext().DoubleTy,
4677 getContext().LongDoubleTy, getContext().Float128Ty,
4678 getContext().Char8Ty, getContext().Char16Ty,
4679 getContext().Char32Ty
4680 };
4681 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4682 RD->hasAttr<DLLExportAttr>() || CGM.shouldMapVisibilityToDLLExport(D: RD)
4683 ? llvm::GlobalValue::DLLExportStorageClass
4684 : llvm::GlobalValue::DefaultStorageClass;
4685 llvm::GlobalValue::VisibilityTypes Visibility =
4686 CodeGenModule::GetLLVMVisibility(V: RD->getVisibility());
4687 for (const QualType &FundamentalType : FundamentalTypes) {
4688 QualType PointerType = getContext().getPointerType(T: FundamentalType);
4689 QualType PointerTypeConst = getContext().getPointerType(
4690 T: FundamentalType.withConst());
4691 for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
4692 ItaniumRTTIBuilder(*this).BuildTypeInfo(
4693 Ty: Type, Linkage: llvm::GlobalValue::ExternalLinkage,
4694 Visibility, DLLStorageClass);
4695 }
4696}
4697
4698/// What sort of uniqueness rules should we use for the RTTI for the
4699/// given type?
4700ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
4701 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
4702 if (shouldRTTIBeUnique())
4703 return RUK_Unique;
4704
4705 // It's only necessary for linkonce_odr or weak_odr linkage.
4706 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
4707 Linkage != llvm::GlobalValue::WeakODRLinkage)
4708 return RUK_Unique;
4709
4710 // It's only necessary with default visibility.
4711 if (CanTy->getVisibility() != DefaultVisibility)
4712 return RUK_Unique;
4713
4714 // If we're not required to publish this symbol, hide it.
4715 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4716 return RUK_NonUniqueHidden;
4717
4718 // If we're required to publish this symbol, as we might be under an
4719 // explicit instantiation, leave it with default visibility but
4720 // enable string-comparisons.
4721 assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
4722 return RUK_NonUniqueVisible;
4723}
4724
4725// Find out how to codegen the complete destructor and constructor
4726namespace {
4727enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
4728}
4729static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
4730 const CXXMethodDecl *MD) {
4731 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
4732 return StructorCodegen::Emit;
4733
4734 // The complete and base structors are not equivalent if there are any virtual
4735 // bases, so emit separate functions.
4736 if (MD->getParent()->getNumVBases())
4737 return StructorCodegen::Emit;
4738
4739 GlobalDecl AliasDecl;
4740 if (const auto *DD = dyn_cast<CXXDestructorDecl>(Val: MD)) {
4741 AliasDecl = GlobalDecl(DD, Dtor_Complete);
4742 } else {
4743 const auto *CD = cast<CXXConstructorDecl>(Val: MD);
4744 AliasDecl = GlobalDecl(CD, Ctor_Complete);
4745 }
4746 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(GD: AliasDecl);
4747
4748 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
4749 return StructorCodegen::RAUW;
4750
4751 // FIXME: Should we allow available_externally aliases?
4752 if (!llvm::GlobalAlias::isValidLinkage(L: Linkage))
4753 return StructorCodegen::RAUW;
4754
4755 if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
4756 // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
4757 if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
4758 CGM.getTarget().getTriple().isOSBinFormatWasm())
4759 return StructorCodegen::COMDAT;
4760 return StructorCodegen::Emit;
4761 }
4762
4763 return StructorCodegen::Alias;
4764}
4765
4766static void emitConstructorDestructorAlias(CodeGenModule &CGM,
4767 GlobalDecl AliasDecl,
4768 GlobalDecl TargetDecl) {
4769 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(GD: AliasDecl);
4770
4771 StringRef MangledName = CGM.getMangledName(GD: AliasDecl);
4772 llvm::GlobalValue *Entry = CGM.GetGlobalValue(Ref: MangledName);
4773 if (Entry && !Entry->isDeclaration())
4774 return;
4775
4776 auto *Aliasee = cast<llvm::GlobalValue>(Val: CGM.GetAddrOfGlobal(GD: TargetDecl));
4777
4778 // Create the alias with no name.
4779 auto *Alias = llvm::GlobalAlias::create(Linkage, Name: "", Aliasee);
4780
4781 // Constructors and destructors are always unnamed_addr.
4782 Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4783
4784 // Switch any previous uses to the alias.
4785 if (Entry) {
4786 assert(Entry->getType() == Aliasee->getType() &&
4787 "declaration exists with different type");
4788 Alias->takeName(V: Entry);
4789 Entry->replaceAllUsesWith(V: Alias);
4790 Entry->eraseFromParent();
4791 } else {
4792 Alias->setName(MangledName);
4793 }
4794
4795 // Finally, set up the alias with its proper name and attributes.
4796 CGM.SetCommonAttributes(GD: AliasDecl, GV: Alias);
4797}
4798
4799void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
4800 auto *MD = cast<CXXMethodDecl>(Val: GD.getDecl());
4801 auto *CD = dyn_cast<CXXConstructorDecl>(Val: MD);
4802 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(Val: MD);
4803
4804 StructorCodegen CGType = getCodegenToUse(CGM, MD);
4805
4806 if (CD ? GD.getCtorType() == Ctor_Complete
4807 : GD.getDtorType() == Dtor_Complete) {
4808 GlobalDecl BaseDecl;
4809 if (CD)
4810 BaseDecl = GD.getWithCtorType(Type: Ctor_Base);
4811 else
4812 BaseDecl = GD.getWithDtorType(Type: Dtor_Base);
4813
4814 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
4815 emitConstructorDestructorAlias(CGM, AliasDecl: GD, TargetDecl: BaseDecl);
4816 return;
4817 }
4818
4819 if (CGType == StructorCodegen::RAUW) {
4820 StringRef MangledName = CGM.getMangledName(GD);
4821 auto *Aliasee = CGM.GetAddrOfGlobal(GD: BaseDecl);
4822 CGM.addReplacement(Name: MangledName, C: Aliasee);
4823 return;
4824 }
4825 }
4826
4827 // The base destructor is equivalent to the base destructor of its
4828 // base class if there is exactly one non-virtual base class with a
4829 // non-trivial destructor, there are no fields with a non-trivial
4830 // destructor, and the body of the destructor is trivial.
4831 if (DD && GD.getDtorType() == Dtor_Base &&
4832 CGType != StructorCodegen::COMDAT &&
4833 !CGM.TryEmitBaseDestructorAsAlias(D: DD))
4834 return;
4835
4836 // FIXME: The deleting destructor is equivalent to the selected operator
4837 // delete if:
4838 // * either the delete is a destroying operator delete or the destructor
4839 // would be trivial if it weren't virtual,
4840 // * the conversion from the 'this' parameter to the first parameter of the
4841 // destructor is equivalent to a bitcast,
4842 // * the destructor does not have an implicit "this" return, and
4843 // * the operator delete has the same calling convention and IR function type
4844 // as the destructor.
4845 // In such cases we should try to emit the deleting dtor as an alias to the
4846 // selected 'operator delete'.
4847
4848 llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4849
4850 if (CGType == StructorCodegen::COMDAT) {
4851 SmallString<256> Buffer;
4852 llvm::raw_svector_ostream Out(Buffer);
4853 if (DD)
4854 getMangleContext().mangleCXXDtorComdat(D: DD, Out);
4855 else
4856 getMangleContext().mangleCXXCtorComdat(D: CD, Out);
4857 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Name: Out.str());
4858 Fn->setComdat(C);
4859 } else {
4860 CGM.maybeSetTrivialComdat(D: *MD, GO&: *Fn);
4861 }
4862}
4863
4864static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4865 // void *__cxa_begin_catch(void*);
4866 llvm::FunctionType *FTy = llvm::FunctionType::get(
4867 Result: CGM.Int8PtrTy, Params: CGM.Int8PtrTy, /*isVarArg=*/false);
4868
4869 return CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_begin_catch");
4870}
4871
4872static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4873 // void __cxa_end_catch();
4874 llvm::FunctionType *FTy =
4875 llvm::FunctionType::get(Result: CGM.VoidTy, /*isVarArg=*/false);
4876
4877 return CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_end_catch");
4878}
4879
4880static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4881 // void *__cxa_get_exception_ptr(void*);
4882 llvm::FunctionType *FTy = llvm::FunctionType::get(
4883 Result: CGM.Int8PtrTy, Params: CGM.Int8PtrTy, /*isVarArg=*/false);
4884
4885 return CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_get_exception_ptr");
4886}
4887
4888namespace {
4889 /// A cleanup to call __cxa_end_catch. In many cases, the caught
4890 /// exception type lets us state definitively that the thrown exception
4891 /// type does not have a destructor. In particular:
4892 /// - Catch-alls tell us nothing, so we have to conservatively
4893 /// assume that the thrown exception might have a destructor.
4894 /// - Catches by reference behave according to their base types.
4895 /// - Catches of non-record types will only trigger for exceptions
4896 /// of non-record types, which never have destructors.
4897 /// - Catches of record types can trigger for arbitrary subclasses
4898 /// of the caught type, so we have to assume the actual thrown
4899 /// exception type might have a throwing destructor, even if the
4900 /// caught type's destructor is trivial or nothrow.
4901 struct CallEndCatch final : EHScopeStack::Cleanup {
4902 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4903 bool MightThrow;
4904
4905 void Emit(CodeGenFunction &CGF, Flags flags) override {
4906 if (!MightThrow) {
4907 CGF.EmitNounwindRuntimeCall(callee: getEndCatchFn(CGM&: CGF.CGM));
4908 return;
4909 }
4910
4911 CGF.EmitRuntimeCallOrInvoke(callee: getEndCatchFn(CGM&: CGF.CGM));
4912 }
4913 };
4914}
4915
4916/// Emits a call to __cxa_begin_catch and enters a cleanup to call
4917/// __cxa_end_catch. If -fassume-nothrow-exception-dtor is specified, we assume
4918/// that the exception object's dtor is nothrow, therefore the __cxa_end_catch
4919/// call can be marked as nounwind even if EndMightThrow is true.
4920///
4921/// \param EndMightThrow - true if __cxa_end_catch might throw
4922static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4923 llvm::Value *Exn,
4924 bool EndMightThrow) {
4925 llvm::CallInst *call =
4926 CGF.EmitNounwindRuntimeCall(callee: getBeginCatchFn(CGM&: CGF.CGM), args: Exn);
4927
4928 CGF.EHStack.pushCleanup<CallEndCatch>(
4929 Kind: NormalAndEHCleanup,
4930 A: EndMightThrow && !CGF.CGM.getLangOpts().AssumeNothrowExceptionDtor);
4931
4932 return call;
4933}
4934
4935/// A "special initializer" callback for initializing a catch
4936/// parameter during catch initialization.
4937static void InitCatchParam(CodeGenFunction &CGF,
4938 const VarDecl &CatchParam,
4939 Address ParamAddr,
4940 SourceLocation Loc) {
4941 // Load the exception from where the landing pad saved it.
4942 llvm::Value *Exn = CGF.getExceptionFromSlot();
4943
4944 CanQualType CatchType =
4945 CGF.CGM.getContext().getCanonicalType(T: CatchParam.getType());
4946 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(T: CatchType);
4947
4948 // If we're catching by reference, we can just cast the object
4949 // pointer to the appropriate pointer.
4950 if (isa<ReferenceType>(Val: CatchType)) {
4951 QualType CaughtType = cast<ReferenceType>(Val&: CatchType)->getPointeeType();
4952 bool EndCatchMightThrow = CaughtType->isRecordType();
4953
4954 // __cxa_begin_catch returns the adjusted object pointer.
4955 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndMightThrow: EndCatchMightThrow);
4956
4957 // We have no way to tell the personality function that we're
4958 // catching by reference, so if we're catching a pointer,
4959 // __cxa_begin_catch will actually return that pointer by value.
4960 if (const PointerType *PT = dyn_cast<PointerType>(Val&: CaughtType)) {
4961 QualType PointeeType = PT->getPointeeType();
4962
4963 // When catching by reference, generally we should just ignore
4964 // this by-value pointer and use the exception object instead.
4965 if (!PointeeType->isRecordType()) {
4966
4967 // Exn points to the struct _Unwind_Exception header, which
4968 // we have to skip past in order to reach the exception data.
4969 unsigned HeaderSize =
4970 CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
4971 AdjustedExn =
4972 CGF.Builder.CreateConstGEP1_32(Ty: CGF.Int8Ty, Ptr: Exn, Idx0: HeaderSize);
4973
4974 // However, if we're catching a pointer-to-record type that won't
4975 // work, because the personality function might have adjusted
4976 // the pointer. There's actually no way for us to fully satisfy
4977 // the language/ABI contract here: we can't use Exn because it
4978 // might have the wrong adjustment, but we can't use the by-value
4979 // pointer because it's off by a level of abstraction.
4980 //
4981 // The current solution is to dump the adjusted pointer into an
4982 // alloca, which breaks language semantics (because changing the
4983 // pointer doesn't change the exception) but at least works.
4984 // The better solution would be to filter out non-exact matches
4985 // and rethrow them, but this is tricky because the rethrow
4986 // really needs to be catchable by other sites at this landing
4987 // pad. The best solution is to fix the personality function.
4988 } else {
4989 // Pull the pointer for the reference type off.
4990 llvm::Type *PtrTy = CGF.ConvertTypeForMem(T: CaughtType);
4991
4992 // Create the temporary and write the adjusted pointer into it.
4993 Address ExnPtrTmp =
4994 CGF.CreateTempAlloca(Ty: PtrTy, align: CGF.getPointerAlign(), Name: "exn.byref.tmp");
4995 llvm::Value *Casted = CGF.Builder.CreateBitCast(V: AdjustedExn, DestTy: PtrTy);
4996 CGF.Builder.CreateStore(Val: Casted, Addr: ExnPtrTmp);
4997
4998 // Bind the reference to the temporary.
4999 AdjustedExn = ExnPtrTmp.emitRawPointer(CGF);
5000 }
5001 }
5002
5003 llvm::Value *ExnCast =
5004 CGF.Builder.CreateBitCast(V: AdjustedExn, DestTy: LLVMCatchTy, Name: "exn.byref");
5005 CGF.Builder.CreateStore(Val: ExnCast, Addr: ParamAddr);
5006 return;
5007 }
5008
5009 // Scalars and complexes.
5010 TypeEvaluationKind TEK = CGF.getEvaluationKind(T: CatchType);
5011 if (TEK != TEK_Aggregate) {
5012 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndMightThrow: false);
5013
5014 // If the catch type is a pointer type, __cxa_begin_catch returns
5015 // the pointer by value.
5016 if (CatchType->hasPointerRepresentation()) {
5017 llvm::Value *CastExn =
5018 CGF.Builder.CreateBitCast(V: AdjustedExn, DestTy: LLVMCatchTy, Name: "exn.casted");
5019
5020 switch (CatchType.getQualifiers().getObjCLifetime()) {
5021 case Qualifiers::OCL_Strong:
5022 CastExn = CGF.EmitARCRetainNonBlock(value: CastExn);
5023 [[fallthrough]];
5024
5025 case Qualifiers::OCL_None:
5026 case Qualifiers::OCL_ExplicitNone:
5027 case Qualifiers::OCL_Autoreleasing:
5028 CGF.Builder.CreateStore(Val: CastExn, Addr: ParamAddr);
5029 return;
5030
5031 case Qualifiers::OCL_Weak:
5032 CGF.EmitARCInitWeak(addr: ParamAddr, value: CastExn);
5033 return;
5034 }
5035 llvm_unreachable("bad ownership qualifier!");
5036 }
5037
5038 // Otherwise, it returns a pointer into the exception object.
5039
5040 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(V: AdjustedExn, T: CatchType);
5041 LValue destLV = CGF.MakeAddrLValue(Addr: ParamAddr, T: CatchType);
5042 switch (TEK) {
5043 case TEK_Complex:
5044 CGF.EmitStoreOfComplex(V: CGF.EmitLoadOfComplex(src: srcLV, loc: Loc), dest: destLV,
5045 /*init*/ isInit: true);
5046 return;
5047 case TEK_Scalar: {
5048 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(lvalue: srcLV, Loc);
5049 CGF.EmitStoreOfScalar(value: ExnLoad, lvalue: destLV, /*init*/ isInit: true);
5050 return;
5051 }
5052 case TEK_Aggregate:
5053 llvm_unreachable("evaluation kind filtered out!");
5054 }
5055 llvm_unreachable("bad evaluation kind");
5056 }
5057
5058 assert(isa<RecordType>(CatchType) && "unexpected catch type!");
5059 auto catchRD = CatchType->getAsCXXRecordDecl();
5060 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(CD: catchRD);
5061
5062 llvm::Type *PtrTy = CGF.DefaultPtrTy;
5063
5064 // Check for a copy expression. If we don't have a copy expression,
5065 // that means a trivial copy is okay.
5066 const Expr *copyExpr = CatchParam.getInit();
5067 if (!copyExpr) {
5068 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, EndMightThrow: true);
5069 Address adjustedExn(CGF.Builder.CreateBitCast(V: rawAdjustedExn, DestTy: PtrTy),
5070 LLVMCatchTy, caughtExnAlignment);
5071 LValue Dest = CGF.MakeAddrLValue(Addr: ParamAddr, T: CatchType);
5072 LValue Src = CGF.MakeAddrLValue(Addr: adjustedExn, T: CatchType);
5073 CGF.EmitAggregateCopy(Dest, Src, EltTy: CatchType, MayOverlap: AggValueSlot::DoesNotOverlap);
5074 return;
5075 }
5076
5077 // We have to call __cxa_get_exception_ptr to get the adjusted
5078 // pointer before copying.
5079 llvm::CallInst *rawAdjustedExn =
5080 CGF.EmitNounwindRuntimeCall(callee: getGetExceptionPtrFn(CGM&: CGF.CGM), args: Exn);
5081
5082 // Cast that to the appropriate type.
5083 Address adjustedExn(CGF.Builder.CreateBitCast(V: rawAdjustedExn, DestTy: PtrTy),
5084 LLVMCatchTy, caughtExnAlignment);
5085
5086 // The copy expression is defined in terms of an OpaqueValueExpr.
5087 // Find it and map it to the adjusted expression.
5088 CodeGenFunction::OpaqueValueMapping
5089 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(expr: copyExpr),
5090 CGF.MakeAddrLValue(Addr: adjustedExn, T: CatchParam.getType()));
5091
5092 // Call the copy ctor in a terminate scope.
5093 CGF.EHStack.pushTerminate();
5094
5095 // Perform the copy construction.
5096 CGF.EmitAggExpr(E: copyExpr,
5097 AS: AggValueSlot::forAddr(addr: ParamAddr, quals: Qualifiers(),
5098 isDestructed: AggValueSlot::IsNotDestructed,
5099 needsGC: AggValueSlot::DoesNotNeedGCBarriers,
5100 isAliased: AggValueSlot::IsNotAliased,
5101 mayOverlap: AggValueSlot::DoesNotOverlap));
5102
5103 // Leave the terminate scope.
5104 CGF.EHStack.popTerminate();
5105
5106 // Undo the opaque value mapping.
5107 opaque.pop();
5108
5109 // Finally we can call __cxa_begin_catch.
5110 CallBeginCatch(CGF, Exn, EndMightThrow: true);
5111}
5112
5113/// Begins a catch statement by initializing the catch variable and
5114/// calling __cxa_begin_catch.
5115void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
5116 const CXXCatchStmt *S) {
5117 // We have to be very careful with the ordering of cleanups here:
5118 // C++ [except.throw]p4:
5119 // The destruction [of the exception temporary] occurs
5120 // immediately after the destruction of the object declared in
5121 // the exception-declaration in the handler.
5122 //
5123 // So the precise ordering is:
5124 // 1. Construct catch variable.
5125 // 2. __cxa_begin_catch
5126 // 3. Enter __cxa_end_catch cleanup
5127 // 4. Enter dtor cleanup
5128 //
5129 // We do this by using a slightly abnormal initialization process.
5130 // Delegation sequence:
5131 // - ExitCXXTryStmt opens a RunCleanupsScope
5132 // - EmitAutoVarAlloca creates the variable and debug info
5133 // - InitCatchParam initializes the variable from the exception
5134 // - CallBeginCatch calls __cxa_begin_catch
5135 // - CallBeginCatch enters the __cxa_end_catch cleanup
5136 // - EmitAutoVarCleanups enters the variable destructor cleanup
5137 // - EmitCXXTryStmt emits the code for the catch body
5138 // - EmitCXXTryStmt close the RunCleanupsScope
5139
5140 VarDecl *CatchParam = S->getExceptionDecl();
5141 if (!CatchParam) {
5142 llvm::Value *Exn = CGF.getExceptionFromSlot();
5143 CallBeginCatch(CGF, Exn, EndMightThrow: true);
5144 return;
5145 }
5146
5147 // Emit the local.
5148 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(var: *CatchParam);
5149 {
5150 ApplyAtomGroup Grp(CGF.getDebugInfo());
5151 InitCatchParam(CGF, CatchParam: *CatchParam, ParamAddr: var.getObjectAddress(CGF),
5152 Loc: S->getBeginLoc());
5153 }
5154 CGF.EmitAutoVarCleanups(emission: var);
5155}
5156
5157/// Get or define the following function:
5158/// void @__clang_call_terminate(i8* %exn) nounwind noreturn
5159/// This code is used only in C++.
5160static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
5161 ASTContext &C = CGM.getContext();
5162 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
5163 resultType: C.VoidTy, argTypes: {C.getPointerType(T: C.CharTy)});
5164 llvm::FunctionType *fnTy = CGM.getTypes().GetFunctionType(Info: FI);
5165 llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
5166 Ty: fnTy, Name: "__clang_call_terminate", ExtraAttrs: llvm::AttributeList(), /*Local=*/true);
5167 llvm::Function *fn =
5168 cast<llvm::Function>(Val: fnRef.getCallee()->stripPointerCasts());
5169 if (fn->empty()) {
5170 CGM.SetLLVMFunctionAttributes(GD: GlobalDecl(), Info: FI, F: fn, /*IsThunk=*/false);
5171 CGM.SetLLVMFunctionAttributesForDefinition(D: nullptr, F: fn);
5172 fn->setDoesNotThrow();
5173 fn->setDoesNotReturn();
5174
5175 // What we really want is to massively penalize inlining without
5176 // forbidding it completely. The difference between that and
5177 // 'noinline' is negligible.
5178 fn->addFnAttr(Kind: llvm::Attribute::NoInline);
5179
5180 // Allow this function to be shared across translation units, but
5181 // we don't want it to turn into an exported symbol.
5182 fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
5183 fn->setVisibility(llvm::Function::HiddenVisibility);
5184 if (CGM.supportsCOMDAT())
5185 fn->setComdat(CGM.getModule().getOrInsertComdat(Name: fn->getName()));
5186
5187 // Set up the function.
5188 llvm::BasicBlock *entry =
5189 llvm::BasicBlock::Create(Context&: CGM.getLLVMContext(), Name: "", Parent: fn);
5190 CGBuilderTy builder(CGM, entry);
5191
5192 // Pull the exception pointer out of the parameter list.
5193 llvm::Value *exn = &*fn->arg_begin();
5194
5195 // Call __cxa_begin_catch(exn).
5196 llvm::CallInst *catchCall = builder.CreateCall(Callee: getBeginCatchFn(CGM), Args: exn);
5197 catchCall->setDoesNotThrow();
5198 catchCall->setCallingConv(CGM.getRuntimeCC());
5199
5200 // Call std::terminate().
5201 llvm::CallInst *termCall = builder.CreateCall(Callee: CGM.getTerminateFn());
5202 termCall->setDoesNotThrow();
5203 termCall->setDoesNotReturn();
5204 termCall->setCallingConv(CGM.getRuntimeCC());
5205
5206 // std::terminate cannot return.
5207 builder.CreateUnreachable();
5208 }
5209 return fnRef;
5210}
5211
5212llvm::CallInst *
5213ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
5214 llvm::Value *Exn) {
5215 // In C++, we want to call __cxa_begin_catch() before terminating.
5216 if (Exn) {
5217 assert(CGF.CGM.getLangOpts().CPlusPlus);
5218 return CGF.EmitNounwindRuntimeCall(callee: getClangCallTerminateFn(CGM&: CGF.CGM), args: Exn);
5219 }
5220 return CGF.EmitNounwindRuntimeCall(callee: CGF.CGM.getTerminateFn());
5221}
5222
5223std::pair<llvm::Value *, const CXXRecordDecl *>
5224ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
5225 const CXXRecordDecl *RD) {
5226 return {CGF.GetVTablePtr(This, VTableTy: CGM.Int8PtrTy, VTableClass: RD), RD};
5227}
5228
5229llvm::Constant *
5230ItaniumCXXABI::getSignedVirtualMemberFunctionPointer(const CXXMethodDecl *MD) {
5231 const CXXMethodDecl *origMD =
5232 cast<CXXMethodDecl>(Val: CGM.getItaniumVTableContext()
5233 .findOriginalMethod(GD: MD->getCanonicalDecl())
5234 .getDecl());
5235 llvm::Constant *thunk = getOrCreateVirtualFunctionPointerThunk(MD: origMD);
5236 QualType funcType = CGM.getContext().getMemberPointerType(
5237 T: MD->getType(), /*Qualifier=*/std::nullopt, Cls: MD->getParent());
5238 return CGM.getMemberFunctionPointer(Pointer: thunk, FT: funcType);
5239}
5240
5241void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
5242 const CXXCatchStmt *C) {
5243 if (CGF.getTarget().hasFeature(Feature: "exception-handling"))
5244 CGF.EHStack.pushCleanup<CatchRetScope>(
5245 Kind: NormalCleanup, A: cast<llvm::CatchPadInst>(Val: CGF.CurrentFuncletPad));
5246 ItaniumCXXABI::emitBeginCatch(CGF, S: C);
5247}
5248
5249llvm::CallInst *
5250WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
5251 llvm::Value *Exn) {
5252 // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on
5253 // the violating exception to mark it handled, but it is currently hard to do
5254 // with wasm EH instruction structure with catch/catch_all, we just call
5255 // std::terminate and ignore the violating exception as in CGCXXABI in Wasm EH
5256 // and call __clang_call_terminate only in Emscripten EH.
5257 // TODO Consider code transformation that makes calling __clang_call_terminate
5258 // in Wasm EH possible.
5259 if (Exn && !EHPersonality::get(CGF).isWasmPersonality()) {
5260 assert(CGF.CGM.getLangOpts().CPlusPlus);
5261 return CGF.EmitNounwindRuntimeCall(callee: getClangCallTerminateFn(CGM&: CGF.CGM), args: Exn);
5262 }
5263 return CGCXXABI::emitTerminateForUnexpectedException(CGF, Exn);
5264}
5265
5266/// Register a global destructor as best as we know how.
5267void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
5268 llvm::FunctionCallee Dtor,
5269 llvm::Constant *Addr) {
5270 if (D.getTLSKind() != VarDecl::TLS_None) {
5271 llvm::PointerType *PtrTy = CGF.DefaultPtrTy;
5272
5273 // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
5274 llvm::FunctionType *AtExitTy =
5275 llvm::FunctionType::get(Result: CGM.IntTy, Params: {CGM.IntTy, PtrTy}, isVarArg: true);
5276
5277 // Fetch the actual function.
5278 llvm::FunctionCallee AtExit =
5279 CGM.CreateRuntimeFunction(Ty: AtExitTy, Name: "__pt_atexit_np");
5280
5281 // Create __dtor function for the var decl.
5282 llvm::Function *DtorStub = CGF.createTLSAtExitStub(VD: D, Dtor, Addr, AtExit);
5283
5284 // Register above __dtor with atexit().
5285 // First param is flags and must be 0, second param is function ptr
5286 llvm::Value *NV = llvm::Constant::getNullValue(Ty: CGM.IntTy);
5287 CGF.EmitNounwindRuntimeCall(callee: AtExit, args: {NV, DtorStub});
5288
5289 // Cannot unregister TLS __dtor so done
5290 return;
5291 }
5292
5293 // Create __dtor function for the var decl.
5294 llvm::Function *DtorStub =
5295 cast<llvm::Function>(Val: CGF.createAtExitStub(VD: D, Dtor, Addr));
5296
5297 // Register above __dtor with atexit().
5298 CGF.registerGlobalDtorWithAtExit(dtorStub: DtorStub);
5299
5300 // Emit __finalize function to unregister __dtor and (as appropriate) call
5301 // __dtor.
5302 emitCXXStermFinalizer(D, dtorStub: DtorStub, addr: Addr);
5303}
5304
5305void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
5306 llvm::Constant *addr) {
5307 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: CGM.VoidTy, isVarArg: false);
5308 SmallString<256> FnName;
5309 {
5310 llvm::raw_svector_ostream Out(FnName);
5311 getMangleContext().mangleDynamicStermFinalizer(D: &D, Out);
5312 }
5313
5314 // Create the finalization action associated with a variable.
5315 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
5316 llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
5317 ty: FTy, name: FnName.str(), FI, Loc: D.getLocation());
5318
5319 CodeGenFunction CGF(CGM);
5320
5321 CGF.StartFunction(GD: GlobalDecl(), RetTy: CGM.getContext().VoidTy, Fn: StermFinalizer, FnInfo: FI,
5322 Args: FunctionArgList(), Loc: D.getLocation(),
5323 StartLoc: D.getInit()->getExprLoc());
5324
5325 // The unatexit subroutine unregisters __dtor functions that were previously
5326 // registered by the atexit subroutine. If the referenced function is found,
5327 // the unatexit returns a value of 0, meaning that the cleanup is still
5328 // pending (and we should call the __dtor function).
5329 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
5330
5331 llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(Arg: V, Name: "needs_destruct");
5332
5333 llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock(name: "destruct.call");
5334 llvm::BasicBlock *EndBlock = CGF.createBasicBlock(name: "destruct.end");
5335
5336 // Check if unatexit returns a value of 0. If it does, jump to
5337 // DestructCallBlock, otherwise jump to EndBlock directly.
5338 CGF.Builder.CreateCondBr(Cond: NeedsDestruct, True: DestructCallBlock, False: EndBlock);
5339
5340 CGF.EmitBlock(BB: DestructCallBlock);
5341
5342 // Emit the call to dtorStub.
5343 llvm::CallInst *CI = CGF.Builder.CreateCall(Callee: dtorStub);
5344
5345 // Make sure the call and the callee agree on calling convention.
5346 CI->setCallingConv(dtorStub->getCallingConv());
5347
5348 CGF.EmitBlock(BB: EndBlock);
5349
5350 CGF.FinishFunction();
5351
5352 if (auto *IPA = D.getAttr<InitPriorityAttr>()) {
5353 CGM.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer,
5354 Priority: IPA->getPriority());
5355 } else if (isTemplateInstantiation(Kind: D.getTemplateSpecializationKind()) ||
5356 getContext().GetGVALinkageForVariable(VD: &D) == GVA_DiscardableODR) {
5357 // According to C++ [basic.start.init]p2, class template static data
5358 // members (i.e., implicitly or explicitly instantiated specializations)
5359 // have unordered initialization. As a consequence, we can put them into
5360 // their own llvm.global_dtors entry.
5361 CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, Priority: 65535);
5362 } else {
5363 CGM.AddCXXStermFinalizerEntry(DtorFn: StermFinalizer);
5364 }
5365}
5366