1//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This provides C++ code generation targeting the Itanium C++ ABI. The class
10// in this file generates structures that follow the Itanium C++ ABI, which is
11// documented at:
12// https://itanium-cxx-abi.github.io/cxx-abi/abi.html
13// https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
14//
15// It also supports the closely-related ARM ABI, documented at:
16// https://developer.arm.com/documentation/ihi0041/g/
17//
18//===----------------------------------------------------------------------===//
19
20#include "CGCXXABI.h"
21#include "CGCleanup.h"
22#include "CGDebugInfo.h"
23#include "CGRecordLayout.h"
24#include "CGVTables.h"
25#include "CodeGenFunction.h"
26#include "CodeGenModule.h"
27#include "TargetInfo.h"
28#include "clang/AST/Attr.h"
29#include "clang/AST/Mangle.h"
30#include "clang/AST/StmtCXX.h"
31#include "clang/AST/Type.h"
32#include "clang/CodeGen/ConstantInitBuilder.h"
33#include "llvm/IR/DataLayout.h"
34#include "llvm/IR/GlobalValue.h"
35#include "llvm/IR/Instructions.h"
36#include "llvm/IR/Intrinsics.h"
37#include "llvm/IR/Value.h"
38#include "llvm/Support/ScopedPrinter.h"
39
40#include <optional>
41
42using namespace clang;
43using namespace CodeGen;
44
45namespace {
46class ItaniumCXXABI : public CodeGen::CGCXXABI {
47 /// VTables - All the vtables which have been defined.
48 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
49
50 /// All the thread wrapper functions that have been used.
51 llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8>
52 ThreadWrappers;
53
54protected:
55 bool UseARMMethodPtrABI;
56 bool UseARMGuardVarABI;
57 bool Use32BitVTableOffsetABI;
58
59 ItaniumMangleContext &getMangleContext() {
60 return cast<ItaniumMangleContext>(Val&: CodeGen::CGCXXABI::getMangleContext());
61 }
62
63public:
64 ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
65 bool UseARMMethodPtrABI = false,
66 bool UseARMGuardVarABI = false) :
67 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
68 UseARMGuardVarABI(UseARMGuardVarABI),
69 Use32BitVTableOffsetABI(false) { }
70
71 bool classifyReturnType(CGFunctionInfo &FI) const override;
72
73 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
74 // If C++ prohibits us from making a copy, pass by address.
75 if (!RD->canPassInRegisters())
76 return RAA_Indirect;
77 return RAA_Default;
78 }
79
80 bool isThisCompleteObject(GlobalDecl GD) const override {
81 // The Itanium ABI has separate complete-object vs. base-object
82 // variants of both constructors and destructors.
83 if (isa<CXXDestructorDecl>(Val: GD.getDecl())) {
84 switch (GD.getDtorType()) {
85 case Dtor_Complete:
86 case Dtor_Deleting:
87 return true;
88
89 case Dtor_Base:
90 return false;
91
92 case Dtor_Comdat:
93 llvm_unreachable("emitting dtor comdat as function?");
94 }
95 llvm_unreachable("bad dtor kind");
96 }
97 if (isa<CXXConstructorDecl>(Val: GD.getDecl())) {
98 switch (GD.getCtorType()) {
99 case Ctor_Complete:
100 return true;
101
102 case Ctor_Base:
103 return false;
104
105 case Ctor_CopyingClosure:
106 case Ctor_DefaultClosure:
107 llvm_unreachable("closure ctors in Itanium ABI?");
108
109 case Ctor_Comdat:
110 llvm_unreachable("emitting ctor comdat as function?");
111 }
112 llvm_unreachable("bad dtor kind");
113 }
114
115 // No other kinds.
116 return false;
117 }
118
119 bool isZeroInitializable(const MemberPointerType *MPT) override;
120
121 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
122
123 CGCallee
124 EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
125 const Expr *E,
126 Address This,
127 llvm::Value *&ThisPtrForCall,
128 llvm::Value *MemFnPtr,
129 const MemberPointerType *MPT) override;
130
131 llvm::Value *EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
132 Address Base, llvm::Value *MemPtr,
133 const MemberPointerType *MPT,
134 bool IsInBounds) override;
135
136 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
137 const CastExpr *E,
138 llvm::Value *Src) override;
139 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
140 llvm::Constant *Src) override;
141
142 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
143
144 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
145 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
146 CharUnits offset) override;
147 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
148 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
149 CharUnits ThisAdjustment);
150
151 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
152 llvm::Value *L, llvm::Value *R,
153 const MemberPointerType *MPT,
154 bool Inequality) override;
155
156 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
157 llvm::Value *Addr,
158 const MemberPointerType *MPT) override;
159
160 void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
161 Address Ptr, QualType ElementType,
162 const CXXDestructorDecl *Dtor) override;
163
164 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
165 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
166
167 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
168
169 llvm::CallInst *
170 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
171 llvm::Value *Exn) override;
172
173 void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
174 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
175 CatchTypeInfo
176 getAddrOfCXXCatchHandlerType(QualType Ty,
177 QualType CatchHandlerType) override {
178 return CatchTypeInfo{.RTTI: getAddrOfRTTIDescriptor(Ty), .Flags: 0};
179 }
180
181 bool shouldTypeidBeNullChecked(QualType SrcRecordTy) override;
182 void EmitBadTypeidCall(CodeGenFunction &CGF) override;
183 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
184 Address ThisPtr,
185 llvm::Type *StdTypeInfoPtrTy) override;
186
187 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
188 QualType SrcRecordTy) override;
189
190 /// Determine whether we know that all instances of type RecordTy will have
191 /// the same vtable pointer values, that is distinct from all other vtable
192 /// pointers. While this is required by the Itanium ABI, it doesn't happen in
193 /// practice in some cases due to language extensions.
194 bool hasUniqueVTablePointer(QualType RecordTy) {
195 const CXXRecordDecl *RD = RecordTy->getAsCXXRecordDecl();
196
197 // Under -fapple-kext, multiple definitions of the same vtable may be
198 // emitted.
199 if (!CGM.getCodeGenOpts().AssumeUniqueVTables ||
200 getContext().getLangOpts().AppleKext)
201 return false;
202
203 // If the type_info* would be null, the vtable might be merged with that of
204 // another type.
205 if (!CGM.shouldEmitRTTI())
206 return false;
207
208 // If there's only one definition of the vtable in the program, it has a
209 // unique address.
210 if (!llvm::GlobalValue::isWeakForLinker(Linkage: CGM.getVTableLinkage(RD)))
211 return true;
212
213 // Even if there are multiple definitions of the vtable, they are required
214 // by the ABI to use the same symbol name, so should be merged at load
215 // time. However, if the class has hidden visibility, there can be
216 // different versions of the class in different modules, and the ABI
217 // library might treat them as being the same.
218 if (CGM.GetLLVMVisibility(V: RD->getVisibility()) !=
219 llvm::GlobalValue::DefaultVisibility)
220 return false;
221
222 return true;
223 }
224
225 bool shouldEmitExactDynamicCast(QualType DestRecordTy) override {
226 return hasUniqueVTablePointer(RecordTy: DestRecordTy);
227 }
228
229 llvm::Value *emitDynamicCastCall(CodeGenFunction &CGF, Address Value,
230 QualType SrcRecordTy, QualType DestTy,
231 QualType DestRecordTy,
232 llvm::BasicBlock *CastEnd) override;
233
234 llvm::Value *emitExactDynamicCast(CodeGenFunction &CGF, Address ThisAddr,
235 QualType SrcRecordTy, QualType DestTy,
236 QualType DestRecordTy,
237 llvm::BasicBlock *CastSuccess,
238 llvm::BasicBlock *CastFail) override;
239
240 llvm::Value *emitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
241 QualType SrcRecordTy) override;
242
243 bool EmitBadCastCall(CodeGenFunction &CGF) override;
244
245 llvm::Value *
246 GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
247 const CXXRecordDecl *ClassDecl,
248 const CXXRecordDecl *BaseClassDecl) override;
249
250 void EmitCXXConstructors(const CXXConstructorDecl *D) override;
251
252 AddedStructorArgCounts
253 buildStructorSignature(GlobalDecl GD,
254 SmallVectorImpl<CanQualType> &ArgTys) override;
255
256 bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
257 CXXDtorType DT) const override {
258 // Itanium does not emit any destructor variant as an inline thunk.
259 // Delegating may occur as an optimization, but all variants are either
260 // emitted with external linkage or as linkonce if they are inline and used.
261 return false;
262 }
263
264 void EmitCXXDestructors(const CXXDestructorDecl *D) override;
265
266 void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
267 FunctionArgList &Params) override;
268
269 void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
270
271 AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
272 const CXXConstructorDecl *D,
273 CXXCtorType Type,
274 bool ForVirtualBase,
275 bool Delegating) override;
276
277 llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
278 const CXXDestructorDecl *DD,
279 CXXDtorType Type,
280 bool ForVirtualBase,
281 bool Delegating) override;
282
283 void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
284 CXXDtorType Type, bool ForVirtualBase,
285 bool Delegating, Address This,
286 QualType ThisTy) override;
287
288 void emitVTableDefinitions(CodeGenVTables &CGVT,
289 const CXXRecordDecl *RD) override;
290
291 bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
292 CodeGenFunction::VPtr Vptr) override;
293
294 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
295 return true;
296 }
297
298 llvm::Constant *
299 getVTableAddressPoint(BaseSubobject Base,
300 const CXXRecordDecl *VTableClass) override;
301
302 llvm::Value *getVTableAddressPointInStructor(
303 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
304 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
305
306 llvm::Value *getVTableAddressPointInStructorWithVTT(
307 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
308 BaseSubobject Base, const CXXRecordDecl *NearestVBase);
309
310 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
311 CharUnits VPtrOffset) override;
312
313 CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
314 Address This, llvm::Type *Ty,
315 SourceLocation Loc) override;
316
317 llvm::Value *
318 EmitVirtualDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *Dtor,
319 CXXDtorType DtorType, Address This,
320 DeleteOrMemberCallExpr E,
321 llvm::CallBase **CallOrInvoke) override;
322
323 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
324
325 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
326 bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
327
328 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
329 bool ReturnAdjustment) override {
330 // Allow inlining of thunks by emitting them with available_externally
331 // linkage together with vtables when needed.
332 if (ForVTable && !Thunk->hasLocalLinkage())
333 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
334 CGM.setGVProperties(GV: Thunk, GD);
335 }
336
337 bool exportThunk() override { return true; }
338
339 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
340 const CXXRecordDecl *UnadjustedThisClass,
341 const ThunkInfo &TI) override;
342
343 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
344 const CXXRecordDecl *UnadjustedRetClass,
345 const ReturnAdjustment &RA) override;
346
347 size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
348 FunctionArgList &Args) const override {
349 assert(!Args.empty() && "expected the arglist to not be empty!");
350 return Args.size() - 1;
351 }
352
353 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
354 StringRef GetDeletedVirtualCallName() override
355 { return "__cxa_deleted_virtual"; }
356
357 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
358 Address InitializeArrayCookie(CodeGenFunction &CGF,
359 Address NewPtr,
360 llvm::Value *NumElements,
361 const CXXNewExpr *expr,
362 QualType ElementType) override;
363 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
364 Address allocPtr,
365 CharUnits cookieSize) override;
366
367 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
368 llvm::GlobalVariable *DeclPtr,
369 bool PerformInit) override;
370 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
371 llvm::FunctionCallee dtor,
372 llvm::Constant *addr) override;
373
374 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
375 llvm::Value *Val);
376 void EmitThreadLocalInitFuncs(
377 CodeGenModule &CGM,
378 ArrayRef<const VarDecl *> CXXThreadLocals,
379 ArrayRef<llvm::Function *> CXXThreadLocalInits,
380 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
381
382 bool usesThreadWrapperFunction(const VarDecl *VD) const override {
383 return !isEmittedWithConstantInitializer(VD) ||
384 mayNeedDestruction(VD);
385 }
386 LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
387 QualType LValType) override;
388
389 bool NeedsVTTParameter(GlobalDecl GD) override;
390
391 llvm::Constant *
392 getOrCreateVirtualFunctionPointerThunk(const CXXMethodDecl *MD);
393
394 /**************************** RTTI Uniqueness ******************************/
395
396protected:
397 /// Returns true if the ABI requires RTTI type_info objects to be unique
398 /// across a program.
399 virtual bool shouldRTTIBeUnique() const { return true; }
400
401public:
402 /// What sort of unique-RTTI behavior should we use?
403 enum RTTIUniquenessKind {
404 /// We are guaranteeing, or need to guarantee, that the RTTI string
405 /// is unique.
406 RUK_Unique,
407
408 /// We are not guaranteeing uniqueness for the RTTI string, so we
409 /// can demote to hidden visibility but must use string comparisons.
410 RUK_NonUniqueHidden,
411
412 /// We are not guaranteeing uniqueness for the RTTI string, so we
413 /// have to use string comparisons, but we also have to emit it with
414 /// non-hidden visibility.
415 RUK_NonUniqueVisible
416 };
417
418 /// Return the required visibility status for the given type and linkage in
419 /// the current ABI.
420 RTTIUniquenessKind
421 classifyRTTIUniqueness(QualType CanTy,
422 llvm::GlobalValue::LinkageTypes Linkage) const;
423 friend class ItaniumRTTIBuilder;
424
425 void emitCXXStructor(GlobalDecl GD) override;
426
427 std::pair<llvm::Value *, const CXXRecordDecl *>
428 LoadVTablePtr(CodeGenFunction &CGF, Address This,
429 const CXXRecordDecl *RD) override;
430
431 private:
432 llvm::Constant *
433 getSignedVirtualMemberFunctionPointer(const CXXMethodDecl *MD);
434
435 bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
436 const auto &VtableLayout =
437 CGM.getItaniumVTableContext().getVTableLayout(RD);
438
439 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
440 // Skip empty slot.
441 if (!VtableComponent.isUsedFunctionPointerKind())
442 continue;
443
444 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
445 const FunctionDecl *FD = Method->getDefinition();
446 const bool IsInlined =
447 Method->getCanonicalDecl()->isInlined() || (FD && FD->isInlined());
448 if (!IsInlined)
449 continue;
450
451 StringRef Name = CGM.getMangledName(GD: VtableComponent.getGlobalDecl());
452 auto *Entry = CGM.GetGlobalValue(Ref: Name);
453 // This checks if virtual inline function has already been emitted.
454 // Note that it is possible that this inline function would be emitted
455 // after trying to emit vtable speculatively. Because of this we do
456 // an extra pass after emitting all deferred vtables to find and emit
457 // these vtables opportunistically.
458 if (!Entry || Entry->isDeclaration())
459 return true;
460 }
461 return false;
462 }
463
464 bool isVTableHidden(const CXXRecordDecl *RD) const {
465 const auto &VtableLayout =
466 CGM.getItaniumVTableContext().getVTableLayout(RD);
467
468 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
469 if (VtableComponent.isRTTIKind()) {
470 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
471 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
472 return true;
473 } else if (VtableComponent.isUsedFunctionPointerKind()) {
474 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
475 if (Method->getVisibility() == Visibility::HiddenVisibility &&
476 !Method->isDefined())
477 return true;
478 }
479 }
480 return false;
481 }
482};
483
484class ARMCXXABI : public ItaniumCXXABI {
485public:
486 ARMCXXABI(CodeGen::CodeGenModule &CGM) :
487 ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
488 /*UseARMGuardVarABI=*/true) {}
489
490 bool constructorsAndDestructorsReturnThis() const override { return true; }
491
492 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
493 QualType ResTy) override;
494
495 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
496 Address InitializeArrayCookie(CodeGenFunction &CGF,
497 Address NewPtr,
498 llvm::Value *NumElements,
499 const CXXNewExpr *expr,
500 QualType ElementType) override;
501 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
502 CharUnits cookieSize) override;
503};
504
505class AppleARM64CXXABI : public ARMCXXABI {
506public:
507 AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
508 Use32BitVTableOffsetABI = true;
509 }
510
511 // ARM64 libraries are prepared for non-unique RTTI.
512 bool shouldRTTIBeUnique() const override { return false; }
513};
514
515class FuchsiaCXXABI final : public ItaniumCXXABI {
516public:
517 explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
518 : ItaniumCXXABI(CGM) {}
519
520private:
521 bool constructorsAndDestructorsReturnThis() const override { return true; }
522};
523
524class WebAssemblyCXXABI final : public ItaniumCXXABI {
525public:
526 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
527 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
528 /*UseARMGuardVarABI=*/true) {}
529 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
530 llvm::CallInst *
531 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
532 llvm::Value *Exn) override;
533
534private:
535 bool constructorsAndDestructorsReturnThis() const override { return true; }
536 bool canCallMismatchedFunctionType() const override { return false; }
537};
538
539class XLCXXABI final : public ItaniumCXXABI {
540public:
541 explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
542 : ItaniumCXXABI(CGM) {}
543
544 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
545 llvm::FunctionCallee dtor,
546 llvm::Constant *addr) override;
547
548 bool useSinitAndSterm() const override { return true; }
549
550private:
551 void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
552 llvm::Constant *addr);
553};
554}
555
556CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
557 switch (CGM.getContext().getCXXABIKind()) {
558 // For IR-generation purposes, there's no significant difference
559 // between the ARM and iOS ABIs.
560 case TargetCXXABI::GenericARM:
561 case TargetCXXABI::iOS:
562 case TargetCXXABI::WatchOS:
563 return new ARMCXXABI(CGM);
564
565 case TargetCXXABI::AppleARM64:
566 return new AppleARM64CXXABI(CGM);
567
568 case TargetCXXABI::Fuchsia:
569 return new FuchsiaCXXABI(CGM);
570
571 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
572 // include the other 32-bit ARM oddities: constructor/destructor return values
573 // and array cookies.
574 case TargetCXXABI::GenericAArch64:
575 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
576 /*UseARMGuardVarABI=*/true);
577
578 case TargetCXXABI::GenericMIPS:
579 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
580
581 case TargetCXXABI::WebAssembly:
582 return new WebAssemblyCXXABI(CGM);
583
584 case TargetCXXABI::XL:
585 return new XLCXXABI(CGM);
586
587 case TargetCXXABI::GenericItanium:
588 return new ItaniumCXXABI(CGM);
589
590 case TargetCXXABI::Microsoft:
591 llvm_unreachable("Microsoft ABI is not Itanium-based");
592 }
593 llvm_unreachable("bad ABI kind");
594}
595
596llvm::Type *
597ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
598 if (MPT->isMemberDataPointer())
599 return CGM.PtrDiffTy;
600 return llvm::StructType::get(elt1: CGM.PtrDiffTy, elts: CGM.PtrDiffTy);
601}
602
603/// In the Itanium and ARM ABIs, method pointers have the form:
604/// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
605///
606/// In the Itanium ABI:
607/// - method pointers are virtual if (memptr.ptr & 1) is nonzero
608/// - the this-adjustment is (memptr.adj)
609/// - the virtual offset is (memptr.ptr - 1)
610///
611/// In the ARM ABI:
612/// - method pointers are virtual if (memptr.adj & 1) is nonzero
613/// - the this-adjustment is (memptr.adj >> 1)
614/// - the virtual offset is (memptr.ptr)
615/// ARM uses 'adj' for the virtual flag because Thumb functions
616/// may be only single-byte aligned.
617///
618/// If the member is virtual, the adjusted 'this' pointer points
619/// to a vtable pointer from which the virtual offset is applied.
620///
621/// If the member is non-virtual, memptr.ptr is the address of
622/// the function to call.
623CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
624 CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
625 llvm::Value *&ThisPtrForCall,
626 llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
627 CGBuilderTy &Builder = CGF.Builder;
628
629 const FunctionProtoType *FPT =
630 MPT->getPointeeType()->castAs<FunctionProtoType>();
631 auto *RD = MPT->getMostRecentCXXRecordDecl();
632
633 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: 1);
634
635 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock(name: "memptr.virtual");
636 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock(name: "memptr.nonvirtual");
637 llvm::BasicBlock *FnEnd = CGF.createBasicBlock(name: "memptr.end");
638
639 // Extract memptr.adj, which is in the second field.
640 llvm::Value *RawAdj = Builder.CreateExtractValue(Agg: MemFnPtr, Idxs: 1, Name: "memptr.adj");
641
642 // Compute the true adjustment.
643 llvm::Value *Adj = RawAdj;
644 if (UseARMMethodPtrABI)
645 Adj = Builder.CreateAShr(LHS: Adj, RHS: ptrdiff_1, Name: "memptr.adj.shifted");
646
647 // Apply the adjustment and cast back to the original struct type
648 // for consistency.
649 llvm::Value *This = ThisAddr.emitRawPointer(CGF);
650 This = Builder.CreateInBoundsGEP(Ty: Builder.getInt8Ty(), Ptr: This, IdxList: Adj);
651 ThisPtrForCall = This;
652
653 // Load the function pointer.
654 llvm::Value *FnAsInt = Builder.CreateExtractValue(Agg: MemFnPtr, Idxs: 0, Name: "memptr.ptr");
655
656 // If the LSB in the function pointer is 1, the function pointer points to
657 // a virtual function.
658 llvm::Value *IsVirtual;
659 if (UseARMMethodPtrABI)
660 IsVirtual = Builder.CreateAnd(LHS: RawAdj, RHS: ptrdiff_1);
661 else
662 IsVirtual = Builder.CreateAnd(LHS: FnAsInt, RHS: ptrdiff_1);
663 IsVirtual = Builder.CreateIsNotNull(Arg: IsVirtual, Name: "memptr.isvirtual");
664 Builder.CreateCondBr(Cond: IsVirtual, True: FnVirtual, False: FnNonVirtual);
665
666 // In the virtual path, the adjustment left 'This' pointing to the
667 // vtable of the correct base subobject. The "function pointer" is an
668 // offset within the vtable (+1 for the virtual flag on non-ARM).
669 CGF.EmitBlock(BB: FnVirtual);
670
671 // Cast the adjusted this to a pointer to vtable pointer and load.
672 llvm::Type *VTableTy = CGF.CGM.GlobalsInt8PtrTy;
673 CharUnits VTablePtrAlign =
674 CGF.CGM.getDynamicOffsetAlignment(ActualAlign: ThisAddr.getAlignment(), Class: RD,
675 ExpectedTargetAlign: CGF.getPointerAlign());
676 llvm::Value *VTable = CGF.GetVTablePtr(
677 This: Address(This, ThisAddr.getElementType(), VTablePtrAlign), VTableTy, VTableClass: RD);
678
679 // Apply the offset.
680 // On ARM64, to reserve extra space in virtual member function pointers,
681 // we only pay attention to the low 32 bits of the offset.
682 llvm::Value *VTableOffset = FnAsInt;
683 if (!UseARMMethodPtrABI)
684 VTableOffset = Builder.CreateSub(LHS: VTableOffset, RHS: ptrdiff_1);
685 if (Use32BitVTableOffsetABI) {
686 VTableOffset = Builder.CreateTrunc(V: VTableOffset, DestTy: CGF.Int32Ty);
687 VTableOffset = Builder.CreateZExt(V: VTableOffset, DestTy: CGM.PtrDiffTy);
688 }
689
690 // Check the address of the function pointer if CFI on member function
691 // pointers is enabled.
692 llvm::Constant *CheckSourceLocation;
693 llvm::Constant *CheckTypeDesc;
694 bool ShouldEmitCFICheck = CGF.SanOpts.has(K: SanitizerKind::CFIMFCall) &&
695 CGM.HasHiddenLTOVisibility(RD);
696
697 if (ShouldEmitCFICheck) {
698 if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: E)) {
699 if (BinOp->isPtrMemOp() &&
700 BinOp->getRHS()
701 ->getType()
702 ->hasPointeeToToCFIUncheckedCalleeFunctionType())
703 ShouldEmitCFICheck = false;
704 }
705 }
706
707 bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
708 CGM.HasHiddenLTOVisibility(RD);
709 bool ShouldEmitWPDInfo =
710 CGM.getCodeGenOpts().WholeProgramVTables &&
711 // Don't insert type tests if we are forcing public visibility.
712 !CGM.AlwaysHasLTOVisibilityPublic(RD);
713 llvm::Value *VirtualFn = nullptr;
714
715 {
716 auto CheckOrdinal = SanitizerKind::SO_CFIMFCall;
717 auto CheckHandler = SanitizerHandler::CFICheckFail;
718 SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
719
720 llvm::Value *TypeId = nullptr;
721 llvm::Value *CheckResult = nullptr;
722
723 if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
724 // If doing CFI, VFE or WPD, we will need the metadata node to check
725 // against.
726 llvm::Metadata *MD =
727 CGM.CreateMetadataIdentifierForVirtualMemPtrType(T: QualType(MPT, 0));
728 TypeId = llvm::MetadataAsValue::get(Context&: CGF.getLLVMContext(), MD);
729 }
730
731 if (ShouldEmitVFEInfo) {
732 llvm::Value *VFPAddr =
733 Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: VTable, IdxList: VTableOffset);
734
735 // If doing VFE, load from the vtable with a type.checked.load intrinsic
736 // call. Note that we use the GEP to calculate the address to load from
737 // and pass 0 as the offset to the intrinsic. This is because every
738 // vtable slot of the correct type is marked with matching metadata, and
739 // we know that the load must be from one of these slots.
740 llvm::Value *CheckedLoad = Builder.CreateCall(
741 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::type_checked_load),
742 Args: {VFPAddr, llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: 0), TypeId});
743 CheckResult = Builder.CreateExtractValue(Agg: CheckedLoad, Idxs: 1);
744 VirtualFn = Builder.CreateExtractValue(Agg: CheckedLoad, Idxs: 0);
745 } else {
746 // When not doing VFE, emit a normal load, as it allows more
747 // optimisations than type.checked.load.
748 if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
749 llvm::Value *VFPAddr =
750 Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: VTable, IdxList: VTableOffset);
751 llvm::Intrinsic::ID IID = CGM.HasHiddenLTOVisibility(RD)
752 ? llvm::Intrinsic::type_test
753 : llvm::Intrinsic::public_type_test;
754
755 CheckResult =
756 Builder.CreateCall(Callee: CGM.getIntrinsic(IID), Args: {VFPAddr, TypeId});
757 }
758
759 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
760 VirtualFn = CGF.Builder.CreateCall(
761 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::load_relative,
762 Tys: {VTableOffset->getType()}),
763 Args: {VTable, VTableOffset});
764 } else {
765 llvm::Value *VFPAddr =
766 CGF.Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: VTable, IdxList: VTableOffset);
767 VirtualFn = CGF.Builder.CreateAlignedLoad(Ty: CGF.UnqualPtrTy, Addr: VFPAddr,
768 Align: CGF.getPointerAlign(),
769 Name: "memptr.virtualfn");
770 }
771 }
772 assert(VirtualFn && "Virtual fuction pointer not created!");
773 assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
774 CheckResult) &&
775 "Check result required but not created!");
776
777 if (ShouldEmitCFICheck) {
778 // If doing CFI, emit the check.
779 CheckSourceLocation = CGF.EmitCheckSourceLocation(Loc: E->getBeginLoc());
780 CheckTypeDesc = CGF.EmitCheckTypeDescriptor(T: QualType(MPT, 0));
781 llvm::Constant *StaticData[] = {
782 llvm::ConstantInt::get(Ty: CGF.Int8Ty, V: CodeGenFunction::CFITCK_VMFCall),
783 CheckSourceLocation,
784 CheckTypeDesc,
785 };
786
787 if (CGM.getCodeGenOpts().SanitizeTrap.has(K: SanitizerKind::CFIMFCall)) {
788 CGF.EmitTrapCheck(Checked: CheckResult, CheckHandlerID: CheckHandler);
789 } else {
790 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
791 Context&: CGM.getLLVMContext(),
792 MD: llvm::MDString::get(Context&: CGM.getLLVMContext(), Str: "all-vtables"));
793 llvm::Value *ValidVtable = Builder.CreateCall(
794 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::type_test), Args: {VTable, AllVtables});
795 CGF.EmitCheck(Checked: std::make_pair(x&: CheckResult, y&: CheckOrdinal), Check: CheckHandler,
796 StaticArgs: StaticData, DynamicArgs: {VTable, ValidVtable});
797 }
798
799 FnVirtual = Builder.GetInsertBlock();
800 }
801 } // End of sanitizer scope
802
803 CGF.EmitBranch(Block: FnEnd);
804
805 // In the non-virtual path, the function pointer is actually a
806 // function pointer.
807 CGF.EmitBlock(BB: FnNonVirtual);
808 llvm::Value *NonVirtualFn =
809 Builder.CreateIntToPtr(V: FnAsInt, DestTy: CGF.UnqualPtrTy, Name: "memptr.nonvirtualfn");
810
811 // Check the function pointer if CFI on member function pointers is enabled.
812 if (ShouldEmitCFICheck) {
813 CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
814 if (RD->hasDefinition()) {
815 auto CheckOrdinal = SanitizerKind::SO_CFIMFCall;
816 auto CheckHandler = SanitizerHandler::CFICheckFail;
817 SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
818
819 llvm::Constant *StaticData[] = {
820 llvm::ConstantInt::get(Ty: CGF.Int8Ty, V: CodeGenFunction::CFITCK_NVMFCall),
821 CheckSourceLocation,
822 CheckTypeDesc,
823 };
824
825 llvm::Value *Bit = Builder.getFalse();
826 for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
827 llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
828 T: getContext().getMemberPointerType(T: MPT->getPointeeType(),
829 /*Qualifier=*/nullptr,
830 Cls: Base->getCanonicalDecl()));
831 llvm::Value *TypeId =
832 llvm::MetadataAsValue::get(Context&: CGF.getLLVMContext(), MD);
833
834 llvm::Value *TypeTest =
835 Builder.CreateCall(Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::type_test),
836 Args: {NonVirtualFn, TypeId});
837 Bit = Builder.CreateOr(LHS: Bit, RHS: TypeTest);
838 }
839
840 CGF.EmitCheck(Checked: std::make_pair(x&: Bit, y&: CheckOrdinal), Check: CheckHandler, StaticArgs: StaticData,
841 DynamicArgs: {NonVirtualFn, llvm::UndefValue::get(T: CGF.IntPtrTy)});
842
843 FnNonVirtual = Builder.GetInsertBlock();
844 }
845 }
846
847 // We're done.
848 CGF.EmitBlock(BB: FnEnd);
849 llvm::PHINode *CalleePtr = Builder.CreatePHI(Ty: CGF.UnqualPtrTy, NumReservedValues: 2);
850 CalleePtr->addIncoming(V: VirtualFn, BB: FnVirtual);
851 CalleePtr->addIncoming(V: NonVirtualFn, BB: FnNonVirtual);
852
853 CGPointerAuthInfo PointerAuth;
854
855 if (const auto &Schema =
856 CGM.getCodeGenOpts().PointerAuth.CXXMemberFunctionPointers) {
857 llvm::PHINode *DiscriminatorPHI = Builder.CreatePHI(Ty: CGF.IntPtrTy, NumReservedValues: 2);
858 DiscriminatorPHI->addIncoming(V: llvm::ConstantInt::get(Ty: CGF.IntPtrTy, V: 0),
859 BB: FnVirtual);
860 const auto &AuthInfo =
861 CGM.getMemberFunctionPointerAuthInfo(FT: QualType(MPT, 0));
862 assert(Schema.getKey() == AuthInfo.getKey() &&
863 "Keys for virtual and non-virtual member functions must match");
864 auto *NonVirtualDiscriminator = AuthInfo.getDiscriminator();
865 DiscriminatorPHI->addIncoming(V: NonVirtualDiscriminator, BB: FnNonVirtual);
866 PointerAuth = CGPointerAuthInfo(
867 Schema.getKey(), Schema.getAuthenticationMode(), Schema.isIsaPointer(),
868 Schema.authenticatesNullValues(), DiscriminatorPHI);
869 }
870
871 CGCallee Callee(FPT, CalleePtr, PointerAuth);
872 return Callee;
873}
874
875/// Compute an l-value by applying the given pointer-to-member to a
876/// base object.
877llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
878 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
879 const MemberPointerType *MPT, bool IsInBounds) {
880 assert(MemPtr->getType() == CGM.PtrDiffTy);
881
882 CGBuilderTy &Builder = CGF.Builder;
883
884 // Apply the offset.
885 llvm::Value *BaseAddr = Base.emitRawPointer(CGF);
886 return Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: BaseAddr, IdxList: MemPtr, Name: "memptr.offset",
887 NW: IsInBounds ? llvm::GEPNoWrapFlags::inBounds()
888 : llvm::GEPNoWrapFlags::none());
889}
890
891// See if it's possible to return a constant signed pointer.
892static llvm::Constant *pointerAuthResignConstant(
893 llvm::Value *Ptr, const CGPointerAuthInfo &CurAuthInfo,
894 const CGPointerAuthInfo &NewAuthInfo, CodeGenModule &CGM) {
895 const auto *CPA = dyn_cast<llvm::ConstantPtrAuth>(Val: Ptr);
896
897 if (!CPA)
898 return nullptr;
899
900 assert(CPA->getKey()->getZExtValue() == CurAuthInfo.getKey() &&
901 CPA->getAddrDiscriminator()->isZeroValue() &&
902 CPA->getDiscriminator() == CurAuthInfo.getDiscriminator() &&
903 "unexpected key or discriminators");
904
905 return CGM.getConstantSignedPointer(
906 Pointer: CPA->getPointer(), Key: NewAuthInfo.getKey(), StorageAddress: nullptr,
907 OtherDiscriminator: cast<llvm::ConstantInt>(Val: NewAuthInfo.getDiscriminator()));
908}
909
910/// Perform a bitcast, derived-to-base, or base-to-derived member pointer
911/// conversion.
912///
913/// Bitcast conversions are always a no-op under Itanium.
914///
915/// Obligatory offset/adjustment diagram:
916/// <-- offset --> <-- adjustment -->
917/// |--------------------------|----------------------|--------------------|
918/// ^Derived address point ^Base address point ^Member address point
919///
920/// So when converting a base member pointer to a derived member pointer,
921/// we add the offset to the adjustment because the address point has
922/// decreased; and conversely, when converting a derived MP to a base MP
923/// we subtract the offset from the adjustment because the address point
924/// has increased.
925///
926/// The standard forbids (at compile time) conversion to and from
927/// virtual bases, which is why we don't have to consider them here.
928///
929/// The standard forbids (at run time) casting a derived MP to a base
930/// MP when the derived MP does not point to a member of the base.
931/// This is why -1 is a reasonable choice for null data member
932/// pointers.
933llvm::Value *
934ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
935 const CastExpr *E,
936 llvm::Value *src) {
937 // Use constant emission if we can.
938 if (isa<llvm::Constant>(Val: src))
939 return EmitMemberPointerConversion(E, Src: cast<llvm::Constant>(Val: src));
940
941 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
942 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
943 E->getCastKind() == CK_ReinterpretMemberPointer);
944
945 CGBuilderTy &Builder = CGF.Builder;
946 QualType DstType = E->getType();
947
948 if (DstType->isMemberFunctionPointerType()) {
949 if (const auto &NewAuthInfo =
950 CGM.getMemberFunctionPointerAuthInfo(FT: DstType)) {
951 QualType SrcType = E->getSubExpr()->getType();
952 assert(SrcType->isMemberFunctionPointerType());
953 const auto &CurAuthInfo = CGM.getMemberFunctionPointerAuthInfo(FT: SrcType);
954 llvm::Value *MemFnPtr = Builder.CreateExtractValue(Agg: src, Idxs: 0, Name: "memptr.ptr");
955 llvm::Type *OrigTy = MemFnPtr->getType();
956
957 llvm::BasicBlock *StartBB = Builder.GetInsertBlock();
958 llvm::BasicBlock *ResignBB = CGF.createBasicBlock(name: "resign");
959 llvm::BasicBlock *MergeBB = CGF.createBasicBlock(name: "merge");
960
961 // Check whether we have a virtual offset or a pointer to a function.
962 assert(UseARMMethodPtrABI && "ARM ABI expected");
963 llvm::Value *Adj = Builder.CreateExtractValue(Agg: src, Idxs: 1, Name: "memptr.adj");
964 llvm::Constant *Ptrdiff_1 = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: 1);
965 llvm::Value *AndVal = Builder.CreateAnd(LHS: Adj, RHS: Ptrdiff_1);
966 llvm::Value *IsVirtualOffset =
967 Builder.CreateIsNotNull(Arg: AndVal, Name: "is.virtual.offset");
968 Builder.CreateCondBr(Cond: IsVirtualOffset, True: MergeBB, False: ResignBB);
969
970 CGF.EmitBlock(BB: ResignBB);
971 llvm::Type *PtrTy = llvm::PointerType::getUnqual(C&: CGM.getLLVMContext());
972 MemFnPtr = Builder.CreateIntToPtr(V: MemFnPtr, DestTy: PtrTy);
973 MemFnPtr =
974 CGF.emitPointerAuthResign(Pointer: MemFnPtr, PointerType: SrcType, CurAuthInfo, NewAuthInfo,
975 IsKnownNonNull: isa<llvm::Constant>(Val: src));
976 MemFnPtr = Builder.CreatePtrToInt(V: MemFnPtr, DestTy: OrigTy);
977 llvm::Value *ResignedVal = Builder.CreateInsertValue(Agg: src, Val: MemFnPtr, Idxs: 0);
978 ResignBB = Builder.GetInsertBlock();
979
980 CGF.EmitBlock(BB: MergeBB);
981 llvm::PHINode *NewSrc = Builder.CreatePHI(Ty: src->getType(), NumReservedValues: 2);
982 NewSrc->addIncoming(V: src, BB: StartBB);
983 NewSrc->addIncoming(V: ResignedVal, BB: ResignBB);
984 src = NewSrc;
985 }
986 }
987
988 // Under Itanium, reinterprets don't require any additional processing.
989 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
990
991 llvm::Constant *adj = getMemberPointerAdjustment(E);
992 if (!adj) return src;
993
994 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
995
996 const MemberPointerType *destTy =
997 E->getType()->castAs<MemberPointerType>();
998
999 // For member data pointers, this is just a matter of adding the
1000 // offset if the source is non-null.
1001 if (destTy->isMemberDataPointer()) {
1002 llvm::Value *dst;
1003 if (isDerivedToBase)
1004 dst = Builder.CreateNSWSub(LHS: src, RHS: adj, Name: "adj");
1005 else
1006 dst = Builder.CreateNSWAdd(LHS: src, RHS: adj, Name: "adj");
1007
1008 // Null check.
1009 llvm::Value *null = llvm::Constant::getAllOnesValue(Ty: src->getType());
1010 llvm::Value *isNull = Builder.CreateICmpEQ(LHS: src, RHS: null, Name: "memptr.isnull");
1011 return Builder.CreateSelect(C: isNull, True: src, False: dst);
1012 }
1013
1014 // The this-adjustment is left-shifted by 1 on ARM.
1015 if (UseARMMethodPtrABI) {
1016 uint64_t offset = cast<llvm::ConstantInt>(Val: adj)->getZExtValue();
1017 offset <<= 1;
1018 adj = llvm::ConstantInt::get(Ty: adj->getType(), V: offset);
1019 }
1020
1021 llvm::Value *srcAdj = Builder.CreateExtractValue(Agg: src, Idxs: 1, Name: "src.adj");
1022 llvm::Value *dstAdj;
1023 if (isDerivedToBase)
1024 dstAdj = Builder.CreateNSWSub(LHS: srcAdj, RHS: adj, Name: "adj");
1025 else
1026 dstAdj = Builder.CreateNSWAdd(LHS: srcAdj, RHS: adj, Name: "adj");
1027
1028 return Builder.CreateInsertValue(Agg: src, Val: dstAdj, Idxs: 1);
1029}
1030
1031static llvm::Constant *
1032pointerAuthResignMemberFunctionPointer(llvm::Constant *Src, QualType DestType,
1033 QualType SrcType, CodeGenModule &CGM) {
1034 assert(DestType->isMemberFunctionPointerType() &&
1035 SrcType->isMemberFunctionPointerType() &&
1036 "member function pointers expected");
1037 if (DestType == SrcType)
1038 return Src;
1039
1040 const auto &NewAuthInfo = CGM.getMemberFunctionPointerAuthInfo(FT: DestType);
1041 const auto &CurAuthInfo = CGM.getMemberFunctionPointerAuthInfo(FT: SrcType);
1042
1043 if (!NewAuthInfo && !CurAuthInfo)
1044 return Src;
1045
1046 llvm::Constant *MemFnPtr = Src->getAggregateElement(Elt: 0u);
1047 if (MemFnPtr->getNumOperands() == 0) {
1048 // src must be a pair of null pointers.
1049 assert(isa<llvm::ConstantInt>(MemFnPtr) && "constant int expected");
1050 return Src;
1051 }
1052
1053 llvm::Constant *ConstPtr = pointerAuthResignConstant(
1054 Ptr: cast<llvm::User>(Val: MemFnPtr)->getOperand(i: 0), CurAuthInfo, NewAuthInfo, CGM);
1055 ConstPtr = llvm::ConstantExpr::getPtrToInt(C: ConstPtr, Ty: MemFnPtr->getType());
1056 return ConstantFoldInsertValueInstruction(Agg: Src, Val: ConstPtr, Idxs: 0);
1057}
1058
1059llvm::Constant *
1060ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
1061 llvm::Constant *src) {
1062 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
1063 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
1064 E->getCastKind() == CK_ReinterpretMemberPointer);
1065
1066 QualType DstType = E->getType();
1067
1068 if (DstType->isMemberFunctionPointerType())
1069 src = pointerAuthResignMemberFunctionPointer(
1070 Src: src, DestType: DstType, SrcType: E->getSubExpr()->getType(), CGM);
1071
1072 // Under Itanium, reinterprets don't require any additional processing.
1073 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
1074
1075 // If the adjustment is trivial, we don't need to do anything.
1076 llvm::Constant *adj = getMemberPointerAdjustment(E);
1077 if (!adj) return src;
1078
1079 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
1080
1081 const MemberPointerType *destTy =
1082 E->getType()->castAs<MemberPointerType>();
1083
1084 // For member data pointers, this is just a matter of adding the
1085 // offset if the source is non-null.
1086 if (destTy->isMemberDataPointer()) {
1087 // null maps to null.
1088 if (src->isAllOnesValue()) return src;
1089
1090 if (isDerivedToBase)
1091 return llvm::ConstantExpr::getNSWSub(C1: src, C2: adj);
1092 else
1093 return llvm::ConstantExpr::getNSWAdd(C1: src, C2: adj);
1094 }
1095
1096 // The this-adjustment is left-shifted by 1 on ARM.
1097 if (UseARMMethodPtrABI) {
1098 uint64_t offset = cast<llvm::ConstantInt>(Val: adj)->getZExtValue();
1099 offset <<= 1;
1100 adj = llvm::ConstantInt::get(Ty: adj->getType(), V: offset);
1101 }
1102
1103 llvm::Constant *srcAdj = src->getAggregateElement(Elt: 1);
1104 llvm::Constant *dstAdj;
1105 if (isDerivedToBase)
1106 dstAdj = llvm::ConstantExpr::getNSWSub(C1: srcAdj, C2: adj);
1107 else
1108 dstAdj = llvm::ConstantExpr::getNSWAdd(C1: srcAdj, C2: adj);
1109
1110 llvm::Constant *res = ConstantFoldInsertValueInstruction(Agg: src, Val: dstAdj, Idxs: 1);
1111 assert(res != nullptr && "Folding must succeed");
1112 return res;
1113}
1114
1115llvm::Constant *
1116ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
1117 // Itanium C++ ABI 2.3:
1118 // A NULL pointer is represented as -1.
1119 if (MPT->isMemberDataPointer())
1120 return llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: -1ULL, /*isSigned=*/IsSigned: true);
1121
1122 llvm::Constant *Zero = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: 0);
1123 llvm::Constant *Values[2] = { Zero, Zero };
1124 return llvm::ConstantStruct::getAnon(V: Values);
1125}
1126
1127llvm::Constant *
1128ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
1129 CharUnits offset) {
1130 // Itanium C++ ABI 2.3:
1131 // A pointer to data member is an offset from the base address of
1132 // the class object containing it, represented as a ptrdiff_t
1133 return llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: offset.getQuantity());
1134}
1135
1136llvm::Constant *
1137ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
1138 return BuildMemberPointer(MD, ThisAdjustment: CharUnits::Zero());
1139}
1140
1141llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
1142 CharUnits ThisAdjustment) {
1143 assert(MD->isInstance() && "Member function must not be static!");
1144
1145 CodeGenTypes &Types = CGM.getTypes();
1146
1147 // Get the function pointer (or index if this is a virtual function).
1148 llvm::Constant *MemPtr[2];
1149 if (MD->isVirtual()) {
1150 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(GD: MD);
1151 uint64_t VTableOffset;
1152 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1153 // Multiply by 4-byte relative offsets.
1154 VTableOffset = Index * 4;
1155 } else {
1156 const ASTContext &Context = getContext();
1157 CharUnits PointerWidth = Context.toCharUnitsFromBits(
1158 BitSize: Context.getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default));
1159 VTableOffset = Index * PointerWidth.getQuantity();
1160 }
1161
1162 if (UseARMMethodPtrABI) {
1163 // ARM C++ ABI 3.2.1:
1164 // This ABI specifies that adj contains twice the this
1165 // adjustment, plus 1 if the member function is virtual. The
1166 // least significant bit of adj then makes exactly the same
1167 // discrimination as the least significant bit of ptr does for
1168 // Itanium.
1169
1170 // We cannot use the Itanium ABI's representation for virtual member
1171 // function pointers under pointer authentication because it would
1172 // require us to store both the virtual offset and the constant
1173 // discriminator in the pointer, which would be immediately vulnerable
1174 // to attack. Instead we introduce a thunk that does the virtual dispatch
1175 // and store it as if it were a non-virtual member function. This means
1176 // that virtual function pointers may not compare equal anymore, but
1177 // fortunately they aren't required to by the standard, and we do make
1178 // a best-effort attempt to re-use the thunk.
1179 //
1180 // To support interoperation with code in which pointer authentication
1181 // is disabled, derefencing a member function pointer must still handle
1182 // the virtual case, but it can use a discriminator which should never
1183 // be valid.
1184 const auto &Schema =
1185 CGM.getCodeGenOpts().PointerAuth.CXXMemberFunctionPointers;
1186 if (Schema)
1187 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(
1188 C: getSignedVirtualMemberFunctionPointer(MD), Ty: CGM.PtrDiffTy);
1189 else
1190 MemPtr[0] = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: VTableOffset);
1191 // Don't set the LSB of adj to 1 if pointer authentication for member
1192 // function pointers is enabled.
1193 MemPtr[1] = llvm::ConstantInt::get(
1194 Ty: CGM.PtrDiffTy, V: 2 * ThisAdjustment.getQuantity() + !Schema);
1195 } else {
1196 // Itanium C++ ABI 2.3:
1197 // For a virtual function, [the pointer field] is 1 plus the
1198 // virtual table offset (in bytes) of the function,
1199 // represented as a ptrdiff_t.
1200 MemPtr[0] = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: VTableOffset + 1);
1201 MemPtr[1] = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy,
1202 V: ThisAdjustment.getQuantity());
1203 }
1204 } else {
1205 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1206 llvm::Type *Ty;
1207 // Check whether the function has a computable LLVM signature.
1208 if (Types.isFuncTypeConvertible(FT: FPT)) {
1209 // The function has a computable LLVM signature; use the correct type.
1210 Ty = Types.GetFunctionType(Info: Types.arrangeCXXMethodDeclaration(MD));
1211 } else {
1212 // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1213 // function type is incomplete.
1214 Ty = CGM.PtrDiffTy;
1215 }
1216 llvm::Constant *addr = CGM.getMemberFunctionPointer(FD: MD, Ty);
1217
1218 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(C: addr, Ty: CGM.PtrDiffTy);
1219 MemPtr[1] = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy,
1220 V: (UseARMMethodPtrABI ? 2 : 1) *
1221 ThisAdjustment.getQuantity());
1222 }
1223
1224 return llvm::ConstantStruct::getAnon(V: MemPtr);
1225}
1226
1227llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1228 QualType MPType) {
1229 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1230 const ValueDecl *MPD = MP.getMemberPointerDecl();
1231 if (!MPD)
1232 return EmitNullMemberPointer(MPT);
1233
1234 CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
1235
1236 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: MPD)) {
1237 llvm::Constant *Src = BuildMemberPointer(MD, ThisAdjustment);
1238 QualType SrcType = getContext().getMemberPointerType(
1239 T: MD->getType(), /*Qualifier=*/nullptr, Cls: MD->getParent());
1240 return pointerAuthResignMemberFunctionPointer(Src, DestType: MPType, SrcType, CGM);
1241 }
1242
1243 CharUnits FieldOffset =
1244 getContext().toCharUnitsFromBits(BitSize: getContext().getFieldOffset(FD: MPD));
1245 return EmitMemberDataPointer(MPT, offset: ThisAdjustment + FieldOffset);
1246}
1247
1248/// The comparison algorithm is pretty easy: the member pointers are
1249/// the same if they're either bitwise identical *or* both null.
1250///
1251/// ARM is different here only because null-ness is more complicated.
1252llvm::Value *
1253ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1254 llvm::Value *L,
1255 llvm::Value *R,
1256 const MemberPointerType *MPT,
1257 bool Inequality) {
1258 CGBuilderTy &Builder = CGF.Builder;
1259
1260 llvm::ICmpInst::Predicate Eq;
1261 llvm::Instruction::BinaryOps And, Or;
1262 if (Inequality) {
1263 Eq = llvm::ICmpInst::ICMP_NE;
1264 And = llvm::Instruction::Or;
1265 Or = llvm::Instruction::And;
1266 } else {
1267 Eq = llvm::ICmpInst::ICMP_EQ;
1268 And = llvm::Instruction::And;
1269 Or = llvm::Instruction::Or;
1270 }
1271
1272 // Member data pointers are easy because there's a unique null
1273 // value, so it just comes down to bitwise equality.
1274 if (MPT->isMemberDataPointer())
1275 return Builder.CreateICmp(P: Eq, LHS: L, RHS: R);
1276
1277 // For member function pointers, the tautologies are more complex.
1278 // The Itanium tautology is:
1279 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1280 // The ARM tautology is:
1281 // (L == R) <==> (L.ptr == R.ptr &&
1282 // (L.adj == R.adj ||
1283 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1284 // The inequality tautologies have exactly the same structure, except
1285 // applying De Morgan's laws.
1286
1287 llvm::Value *LPtr = Builder.CreateExtractValue(Agg: L, Idxs: 0, Name: "lhs.memptr.ptr");
1288 llvm::Value *RPtr = Builder.CreateExtractValue(Agg: R, Idxs: 0, Name: "rhs.memptr.ptr");
1289
1290 // This condition tests whether L.ptr == R.ptr. This must always be
1291 // true for equality to hold.
1292 llvm::Value *PtrEq = Builder.CreateICmp(P: Eq, LHS: LPtr, RHS: RPtr, Name: "cmp.ptr");
1293
1294 // This condition, together with the assumption that L.ptr == R.ptr,
1295 // tests whether the pointers are both null. ARM imposes an extra
1296 // condition.
1297 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: LPtr->getType());
1298 llvm::Value *EqZero = Builder.CreateICmp(P: Eq, LHS: LPtr, RHS: Zero, Name: "cmp.ptr.null");
1299
1300 // This condition tests whether L.adj == R.adj. If this isn't
1301 // true, the pointers are unequal unless they're both null.
1302 llvm::Value *LAdj = Builder.CreateExtractValue(Agg: L, Idxs: 1, Name: "lhs.memptr.adj");
1303 llvm::Value *RAdj = Builder.CreateExtractValue(Agg: R, Idxs: 1, Name: "rhs.memptr.adj");
1304 llvm::Value *AdjEq = Builder.CreateICmp(P: Eq, LHS: LAdj, RHS: RAdj, Name: "cmp.adj");
1305
1306 // Null member function pointers on ARM clear the low bit of Adj,
1307 // so the zero condition has to check that neither low bit is set.
1308 if (UseARMMethodPtrABI) {
1309 llvm::Value *One = llvm::ConstantInt::get(Ty: LPtr->getType(), V: 1);
1310
1311 // Compute (l.adj | r.adj) & 1 and test it against zero.
1312 llvm::Value *OrAdj = Builder.CreateOr(LHS: LAdj, RHS: RAdj, Name: "or.adj");
1313 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(LHS: OrAdj, RHS: One);
1314 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(P: Eq, LHS: OrAdjAnd1, RHS: Zero,
1315 Name: "cmp.or.adj");
1316 EqZero = Builder.CreateBinOp(Opc: And, LHS: EqZero, RHS: OrAdjAnd1EqZero);
1317 }
1318
1319 // Tie together all our conditions.
1320 llvm::Value *Result = Builder.CreateBinOp(Opc: Or, LHS: EqZero, RHS: AdjEq);
1321 Result = Builder.CreateBinOp(Opc: And, LHS: PtrEq, RHS: Result,
1322 Name: Inequality ? "memptr.ne" : "memptr.eq");
1323 return Result;
1324}
1325
1326llvm::Value *
1327ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1328 llvm::Value *MemPtr,
1329 const MemberPointerType *MPT) {
1330 CGBuilderTy &Builder = CGF.Builder;
1331
1332 /// For member data pointers, this is just a check against -1.
1333 if (MPT->isMemberDataPointer()) {
1334 assert(MemPtr->getType() == CGM.PtrDiffTy);
1335 llvm::Value *NegativeOne =
1336 llvm::Constant::getAllOnesValue(Ty: MemPtr->getType());
1337 return Builder.CreateICmpNE(LHS: MemPtr, RHS: NegativeOne, Name: "memptr.tobool");
1338 }
1339
1340 // In Itanium, a member function pointer is not null if 'ptr' is not null.
1341 llvm::Value *Ptr = Builder.CreateExtractValue(Agg: MemPtr, Idxs: 0, Name: "memptr.ptr");
1342
1343 llvm::Constant *Zero = llvm::ConstantInt::get(Ty: Ptr->getType(), V: 0);
1344 llvm::Value *Result = Builder.CreateICmpNE(LHS: Ptr, RHS: Zero, Name: "memptr.tobool");
1345
1346 // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1347 // (the virtual bit) is set.
1348 if (UseARMMethodPtrABI) {
1349 llvm::Constant *One = llvm::ConstantInt::get(Ty: Ptr->getType(), V: 1);
1350 llvm::Value *Adj = Builder.CreateExtractValue(Agg: MemPtr, Idxs: 1, Name: "memptr.adj");
1351 llvm::Value *VirtualBit = Builder.CreateAnd(LHS: Adj, RHS: One, Name: "memptr.virtualbit");
1352 llvm::Value *IsVirtual = Builder.CreateICmpNE(LHS: VirtualBit, RHS: Zero,
1353 Name: "memptr.isvirtual");
1354 Result = Builder.CreateOr(LHS: Result, RHS: IsVirtual);
1355 }
1356
1357 return Result;
1358}
1359
1360bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1361 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1362 if (!RD)
1363 return false;
1364
1365 // If C++ prohibits us from making a copy, return by address.
1366 if (!RD->canPassInRegisters()) {
1367 auto Align = CGM.getContext().getTypeAlignInChars(T: FI.getReturnType());
1368 FI.getReturnInfo() = ABIArgInfo::getIndirect(
1369 Alignment: Align, /*AddrSpace=*/CGM.getDataLayout().getAllocaAddrSpace(),
1370 /*ByVal=*/false);
1371 return true;
1372 }
1373 return false;
1374}
1375
1376/// The Itanium ABI requires non-zero initialization only for data
1377/// member pointers, for which '0' is a valid offset.
1378bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1379 return MPT->isMemberFunctionPointer();
1380}
1381
1382/// The Itanium ABI always places an offset to the complete object
1383/// at entry -2 in the vtable.
1384void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1385 const CXXDeleteExpr *DE,
1386 Address Ptr,
1387 QualType ElementType,
1388 const CXXDestructorDecl *Dtor) {
1389 bool UseGlobalDelete = DE->isGlobalDelete();
1390 if (UseGlobalDelete) {
1391 // Derive the complete-object pointer, which is what we need
1392 // to pass to the deallocation function.
1393
1394 // Grab the vtable pointer as an intptr_t*.
1395 auto *ClassDecl =
1396 cast<CXXRecordDecl>(Val: ElementType->castAs<RecordType>()->getDecl());
1397 llvm::Value *VTable = CGF.GetVTablePtr(This: Ptr, VTableTy: CGF.UnqualPtrTy, VTableClass: ClassDecl);
1398
1399 // Track back to entry -2 and pull out the offset there.
1400 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1401 Ty: CGF.IntPtrTy, Ptr: VTable, Idx0: -2, Name: "complete-offset.ptr");
1402 llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(Ty: CGF.IntPtrTy, Addr: OffsetPtr,
1403 Align: CGF.getPointerAlign());
1404
1405 // Apply the offset.
1406 llvm::Value *CompletePtr = Ptr.emitRawPointer(CGF);
1407 CompletePtr =
1408 CGF.Builder.CreateInBoundsGEP(Ty: CGF.Int8Ty, Ptr: CompletePtr, IdxList: Offset);
1409
1410 // If we're supposed to call the global delete, make sure we do so
1411 // even if the destructor throws.
1412 CGF.pushCallObjectDeleteCleanup(OperatorDelete: DE->getOperatorDelete(), CompletePtr,
1413 ElementType);
1414 }
1415
1416 // FIXME: Provide a source location here even though there's no
1417 // CXXMemberCallExpr for dtor call.
1418 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1419 EmitVirtualDestructorCall(CGF, Dtor, DtorType, This: Ptr, E: DE,
1420 /*CallOrInvoke=*/nullptr);
1421
1422 if (UseGlobalDelete)
1423 CGF.PopCleanupBlock();
1424}
1425
1426void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1427 // void __cxa_rethrow();
1428
1429 llvm::FunctionType *FTy =
1430 llvm::FunctionType::get(Result: CGM.VoidTy, /*isVarArg=*/false);
1431
1432 llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_rethrow");
1433
1434 if (isNoReturn)
1435 CGF.EmitNoreturnRuntimeCallOrInvoke(callee: Fn, args: {});
1436 else
1437 CGF.EmitRuntimeCallOrInvoke(callee: Fn);
1438}
1439
1440static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1441 // void *__cxa_allocate_exception(size_t thrown_size);
1442
1443 llvm::FunctionType *FTy =
1444 llvm::FunctionType::get(Result: CGM.Int8PtrTy, Params: CGM.SizeTy, /*isVarArg=*/false);
1445
1446 return CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_allocate_exception");
1447}
1448
1449static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1450 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1451 // void (*dest) (void *));
1452
1453 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.GlobalsInt8PtrTy, CGM.Int8PtrTy };
1454 llvm::FunctionType *FTy =
1455 llvm::FunctionType::get(Result: CGM.VoidTy, Params: Args, /*isVarArg=*/false);
1456
1457 return CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_throw");
1458}
1459
1460void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1461 QualType ThrowType = E->getSubExpr()->getType();
1462 // Now allocate the exception object.
1463 llvm::Type *SizeTy = CGF.ConvertType(T: getContext().getSizeType());
1464 uint64_t TypeSize = getContext().getTypeSizeInChars(T: ThrowType).getQuantity();
1465
1466 llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1467 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1468 callee: AllocExceptionFn, args: llvm::ConstantInt::get(Ty: SizeTy, V: TypeSize), name: "exception");
1469
1470 CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1471 CGF.EmitAnyExprToExn(
1472 E: E->getSubExpr(), Addr: Address(ExceptionPtr, CGM.Int8Ty, ExnAlign));
1473
1474 // Now throw the exception.
1475 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(Ty: ThrowType,
1476 /*ForEH=*/true);
1477
1478 // The address of the destructor. If the exception type has a
1479 // trivial destructor (or isn't a record), we just pass null.
1480 llvm::Constant *Dtor = nullptr;
1481 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1482 CXXRecordDecl *Record = cast<CXXRecordDecl>(Val: RecordTy->getDecl());
1483 if (!Record->hasTrivialDestructor()) {
1484 // __cxa_throw is declared to take its destructor as void (*)(void *). We
1485 // must match that if function pointers can be authenticated with a
1486 // discriminator based on their type.
1487 const ASTContext &Ctx = getContext();
1488 QualType DtorTy = Ctx.getFunctionType(ResultTy: Ctx.VoidTy, Args: {Ctx.VoidPtrTy},
1489 EPI: FunctionProtoType::ExtProtoInfo());
1490
1491 CXXDestructorDecl *DtorD = Record->getDestructor();
1492 Dtor = CGM.getAddrOfCXXStructor(GD: GlobalDecl(DtorD, Dtor_Complete));
1493 Dtor = CGM.getFunctionPointer(Pointer: Dtor, FunctionType: DtorTy);
1494 }
1495 }
1496 if (!Dtor) Dtor = llvm::Constant::getNullValue(Ty: CGM.Int8PtrTy);
1497
1498 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1499 CGF.EmitNoreturnRuntimeCallOrInvoke(callee: getThrowFn(CGM), args);
1500}
1501
1502static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1503 // void *__dynamic_cast(const void *sub,
1504 // global_as const abi::__class_type_info *src,
1505 // global_as const abi::__class_type_info *dst,
1506 // std::ptrdiff_t src2dst_offset);
1507
1508 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1509 llvm::Type *GlobInt8PtrTy = CGF.GlobalsInt8PtrTy;
1510 llvm::Type *PtrDiffTy =
1511 CGF.ConvertType(T: CGF.getContext().getPointerDiffType());
1512
1513 llvm::Type *Args[4] = { Int8PtrTy, GlobInt8PtrTy, GlobInt8PtrTy, PtrDiffTy };
1514
1515 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: Int8PtrTy, Params: Args, isVarArg: false);
1516
1517 // Mark the function as nounwind willreturn readonly.
1518 llvm::AttrBuilder FuncAttrs(CGF.getLLVMContext());
1519 FuncAttrs.addAttribute(Val: llvm::Attribute::NoUnwind);
1520 FuncAttrs.addAttribute(Val: llvm::Attribute::WillReturn);
1521 FuncAttrs.addMemoryAttr(ME: llvm::MemoryEffects::readOnly());
1522 llvm::AttributeList Attrs = llvm::AttributeList::get(
1523 C&: CGF.getLLVMContext(), Index: llvm::AttributeList::FunctionIndex, B: FuncAttrs);
1524
1525 return CGF.CGM.CreateRuntimeFunction(Ty: FTy, Name: "__dynamic_cast", ExtraAttrs: Attrs);
1526}
1527
1528static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1529 // void __cxa_bad_cast();
1530 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: CGF.VoidTy, isVarArg: false);
1531 return CGF.CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_bad_cast");
1532}
1533
1534/// Compute the src2dst_offset hint as described in the
1535/// Itanium C++ ABI [2.9.7]
1536static CharUnits computeOffsetHint(ASTContext &Context,
1537 const CXXRecordDecl *Src,
1538 const CXXRecordDecl *Dst) {
1539 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1540 /*DetectVirtual=*/false);
1541
1542 // If Dst is not derived from Src we can skip the whole computation below and
1543 // return that Src is not a public base of Dst. Record all inheritance paths.
1544 if (!Dst->isDerivedFrom(Base: Src, Paths))
1545 return CharUnits::fromQuantity(Quantity: -2ULL);
1546
1547 unsigned NumPublicPaths = 0;
1548 CharUnits Offset;
1549
1550 // Now walk all possible inheritance paths.
1551 for (const CXXBasePath &Path : Paths) {
1552 if (Path.Access != AS_public) // Ignore non-public inheritance.
1553 continue;
1554
1555 ++NumPublicPaths;
1556
1557 for (const CXXBasePathElement &PathElement : Path) {
1558 // If the path contains a virtual base class we can't give any hint.
1559 // -1: no hint.
1560 if (PathElement.Base->isVirtual())
1561 return CharUnits::fromQuantity(Quantity: -1ULL);
1562
1563 if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1564 continue;
1565
1566 // Accumulate the base class offsets.
1567 const ASTRecordLayout &L = Context.getASTRecordLayout(D: PathElement.Class);
1568 Offset += L.getBaseClassOffset(
1569 Base: PathElement.Base->getType()->getAsCXXRecordDecl());
1570 }
1571 }
1572
1573 // -2: Src is not a public base of Dst.
1574 if (NumPublicPaths == 0)
1575 return CharUnits::fromQuantity(Quantity: -2ULL);
1576
1577 // -3: Src is a multiple public base type but never a virtual base type.
1578 if (NumPublicPaths > 1)
1579 return CharUnits::fromQuantity(Quantity: -3ULL);
1580
1581 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1582 // Return the offset of Src from the origin of Dst.
1583 return Offset;
1584}
1585
1586static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1587 // void __cxa_bad_typeid();
1588 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: CGF.VoidTy, isVarArg: false);
1589
1590 return CGF.CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_bad_typeid");
1591}
1592
1593bool ItaniumCXXABI::shouldTypeidBeNullChecked(QualType SrcRecordTy) {
1594 return true;
1595}
1596
1597void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1598 llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1599 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(callee: Fn);
1600 Call->setDoesNotReturn();
1601 CGF.Builder.CreateUnreachable();
1602}
1603
1604llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1605 QualType SrcRecordTy,
1606 Address ThisPtr,
1607 llvm::Type *StdTypeInfoPtrTy) {
1608 auto *ClassDecl =
1609 cast<CXXRecordDecl>(Val: SrcRecordTy->castAs<RecordType>()->getDecl());
1610 llvm::Value *Value = CGF.GetVTablePtr(This: ThisPtr, VTableTy: CGM.GlobalsInt8PtrTy,
1611 VTableClass: ClassDecl);
1612
1613 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1614 // Load the type info.
1615 Value = CGF.Builder.CreateCall(
1616 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::load_relative, Tys: {CGM.Int32Ty}),
1617 Args: {Value, llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: -4)});
1618 } else {
1619 // Load the type info.
1620 Value =
1621 CGF.Builder.CreateConstInBoundsGEP1_64(Ty: StdTypeInfoPtrTy, Ptr: Value, Idx0: -1ULL);
1622 }
1623 return CGF.Builder.CreateAlignedLoad(Ty: StdTypeInfoPtrTy, Addr: Value,
1624 Align: CGF.getPointerAlign());
1625}
1626
1627bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1628 QualType SrcRecordTy) {
1629 return SrcIsPtr;
1630}
1631
1632llvm::Value *ItaniumCXXABI::emitDynamicCastCall(
1633 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1634 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1635 llvm::Type *PtrDiffLTy =
1636 CGF.ConvertType(T: CGF.getContext().getPointerDiffType());
1637
1638 llvm::Value *SrcRTTI =
1639 CGF.CGM.GetAddrOfRTTIDescriptor(Ty: SrcRecordTy.getUnqualifiedType());
1640 llvm::Value *DestRTTI =
1641 CGF.CGM.GetAddrOfRTTIDescriptor(Ty: DestRecordTy.getUnqualifiedType());
1642
1643 // Compute the offset hint.
1644 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1645 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1646 llvm::Value *OffsetHint = llvm::ConstantInt::get(
1647 Ty: PtrDiffLTy,
1648 V: computeOffsetHint(Context&: CGF.getContext(), Src: SrcDecl, Dst: DestDecl).getQuantity());
1649
1650 // Emit the call to __dynamic_cast.
1651 llvm::Value *Value = ThisAddr.emitRawPointer(CGF);
1652 if (CGM.getCodeGenOpts().PointerAuth.CXXVTablePointers) {
1653 // We perform a no-op load of the vtable pointer here to force an
1654 // authentication. In environments that do not support pointer
1655 // authentication this is a an actual no-op that will be elided. When
1656 // pointer authentication is supported and enforced on vtable pointers this
1657 // load can trap.
1658 llvm::Value *Vtable =
1659 CGF.GetVTablePtr(This: ThisAddr, VTableTy: CGM.Int8PtrTy, VTableClass: SrcDecl,
1660 AuthMode: CodeGenFunction::VTableAuthMode::MustTrap);
1661 assert(Vtable);
1662 (void)Vtable;
1663 }
1664
1665 llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1666 Value = CGF.EmitNounwindRuntimeCall(callee: getItaniumDynamicCastFn(CGF), args);
1667
1668 /// C++ [expr.dynamic.cast]p9:
1669 /// A failed cast to reference type throws std::bad_cast
1670 if (DestTy->isReferenceType()) {
1671 llvm::BasicBlock *BadCastBlock =
1672 CGF.createBasicBlock(name: "dynamic_cast.bad_cast");
1673
1674 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Arg: Value);
1675 CGF.Builder.CreateCondBr(Cond: IsNull, True: BadCastBlock, False: CastEnd);
1676
1677 CGF.EmitBlock(BB: BadCastBlock);
1678 EmitBadCastCall(CGF);
1679 }
1680
1681 return Value;
1682}
1683
1684llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
1685 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1686 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastSuccess,
1687 llvm::BasicBlock *CastFail) {
1688 ASTContext &Context = getContext();
1689
1690 // Find all the inheritance paths.
1691 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1692 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1693 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1694 /*DetectVirtual=*/false);
1695 (void)DestDecl->isDerivedFrom(Base: SrcDecl, Paths);
1696
1697 // Find an offset within `DestDecl` where a `SrcDecl` instance and its vptr
1698 // might appear.
1699 std::optional<CharUnits> Offset;
1700 for (const CXXBasePath &Path : Paths) {
1701 // dynamic_cast only finds public inheritance paths.
1702 if (Path.Access != AS_public)
1703 continue;
1704
1705 CharUnits PathOffset;
1706 for (const CXXBasePathElement &PathElement : Path) {
1707 // Find the offset along this inheritance step.
1708 const CXXRecordDecl *Base =
1709 PathElement.Base->getType()->getAsCXXRecordDecl();
1710 if (PathElement.Base->isVirtual()) {
1711 // For a virtual base class, we know that the derived class is exactly
1712 // DestDecl, so we can use the vbase offset from its layout.
1713 const ASTRecordLayout &L = Context.getASTRecordLayout(D: DestDecl);
1714 PathOffset = L.getVBaseClassOffset(VBase: Base);
1715 } else {
1716 const ASTRecordLayout &L =
1717 Context.getASTRecordLayout(D: PathElement.Class);
1718 PathOffset += L.getBaseClassOffset(Base);
1719 }
1720 }
1721
1722 if (!Offset)
1723 Offset = PathOffset;
1724 else if (Offset != PathOffset) {
1725 // Base appears in at least two different places. Find the most-derived
1726 // object and see if it's a DestDecl. Note that the most-derived object
1727 // must be at least as aligned as this base class subobject, and must
1728 // have a vptr at offset 0.
1729 ThisAddr = Address(emitDynamicCastToVoid(CGF, Value: ThisAddr, SrcRecordTy),
1730 CGF.VoidPtrTy, ThisAddr.getAlignment());
1731 SrcDecl = DestDecl;
1732 Offset = CharUnits::Zero();
1733 break;
1734 }
1735 }
1736
1737 if (!Offset) {
1738 // If there are no public inheritance paths, the cast always fails.
1739 CGF.EmitBranch(Block: CastFail);
1740 return llvm::PoisonValue::get(T: CGF.VoidPtrTy);
1741 }
1742
1743 // Compare the vptr against the expected vptr for the destination type at
1744 // this offset. Note that we do not know what type ThisAddr points to in
1745 // the case where the derived class multiply inherits from the base class
1746 // so we can't use GetVTablePtr, so we load the vptr directly instead.
1747 llvm::Instruction *VPtr = CGF.Builder.CreateLoad(
1748 Addr: ThisAddr.withElementType(ElemTy: CGF.VoidPtrPtrTy), Name: "vtable");
1749 CGM.DecorateInstructionWithTBAA(
1750 Inst: VPtr, TBAAInfo: CGM.getTBAAVTablePtrAccessInfo(VTablePtrType: CGF.VoidPtrPtrTy));
1751 llvm::Value *Success = CGF.Builder.CreateICmpEQ(
1752 LHS: VPtr, RHS: getVTableAddressPoint(Base: BaseSubobject(SrcDecl, *Offset), VTableClass: DestDecl));
1753 llvm::Value *Result = ThisAddr.emitRawPointer(CGF);
1754 if (!Offset->isZero())
1755 Result = CGF.Builder.CreateInBoundsGEP(
1756 Ty: CGF.CharTy, Ptr: Result,
1757 IdxList: {llvm::ConstantInt::get(Ty: CGF.PtrDiffTy, V: -Offset->getQuantity())});
1758 CGF.Builder.CreateCondBr(Cond: Success, True: CastSuccess, False: CastFail);
1759 return Result;
1760}
1761
1762llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
1763 Address ThisAddr,
1764 QualType SrcRecordTy) {
1765 auto *ClassDecl =
1766 cast<CXXRecordDecl>(Val: SrcRecordTy->castAs<RecordType>()->getDecl());
1767 llvm::Value *OffsetToTop;
1768 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1769 // Get the vtable pointer.
1770 llvm::Value *VTable =
1771 CGF.GetVTablePtr(This: ThisAddr, VTableTy: CGF.UnqualPtrTy, VTableClass: ClassDecl);
1772
1773 // Get the offset-to-top from the vtable.
1774 OffsetToTop =
1775 CGF.Builder.CreateConstInBoundsGEP1_32(Ty: CGM.Int32Ty, Ptr: VTable, Idx0: -2U);
1776 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1777 Ty: CGM.Int32Ty, Addr: OffsetToTop, Align: CharUnits::fromQuantity(Quantity: 4), Name: "offset.to.top");
1778 } else {
1779 llvm::Type *PtrDiffLTy =
1780 CGF.ConvertType(T: CGF.getContext().getPointerDiffType());
1781
1782 // Get the vtable pointer.
1783 llvm::Value *VTable =
1784 CGF.GetVTablePtr(This: ThisAddr, VTableTy: CGF.UnqualPtrTy, VTableClass: ClassDecl);
1785
1786 // Get the offset-to-top from the vtable.
1787 OffsetToTop =
1788 CGF.Builder.CreateConstInBoundsGEP1_64(Ty: PtrDiffLTy, Ptr: VTable, Idx0: -2ULL);
1789 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1790 Ty: PtrDiffLTy, Addr: OffsetToTop, Align: CGF.getPointerAlign(), Name: "offset.to.top");
1791 }
1792 // Finally, add the offset to the pointer.
1793 return CGF.Builder.CreateInBoundsGEP(Ty: CGF.Int8Ty, Ptr: ThisAddr.emitRawPointer(CGF),
1794 IdxList: OffsetToTop);
1795}
1796
1797bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1798 llvm::FunctionCallee Fn = getBadCastFn(CGF);
1799 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(callee: Fn);
1800 Call->setDoesNotReturn();
1801 CGF.Builder.CreateUnreachable();
1802 return true;
1803}
1804
1805llvm::Value *
1806ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1807 Address This,
1808 const CXXRecordDecl *ClassDecl,
1809 const CXXRecordDecl *BaseClassDecl) {
1810 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, VTableTy: CGM.Int8PtrTy, VTableClass: ClassDecl);
1811 CharUnits VBaseOffsetOffset =
1812 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD: ClassDecl,
1813 VBase: BaseClassDecl);
1814 llvm::Value *VBaseOffsetPtr =
1815 CGF.Builder.CreateConstGEP1_64(
1816 Ty: CGF.Int8Ty, Ptr: VTablePtr, Idx0: VBaseOffsetOffset.getQuantity(),
1817 Name: "vbase.offset.ptr");
1818
1819 llvm::Value *VBaseOffset;
1820 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1821 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1822 Ty: CGF.Int32Ty, Addr: VBaseOffsetPtr, Align: CharUnits::fromQuantity(Quantity: 4),
1823 Name: "vbase.offset");
1824 } else {
1825 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1826 Ty: CGM.PtrDiffTy, Addr: VBaseOffsetPtr, Align: CGF.getPointerAlign(), Name: "vbase.offset");
1827 }
1828 return VBaseOffset;
1829}
1830
1831void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1832 // Just make sure we're in sync with TargetCXXABI.
1833 assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1834
1835 // The constructor used for constructing this as a base class;
1836 // ignores virtual bases.
1837 CGM.EmitGlobal(D: GlobalDecl(D, Ctor_Base));
1838
1839 // The constructor used for constructing this as a complete class;
1840 // constructs the virtual bases, then calls the base constructor.
1841 if (!D->getParent()->isAbstract()) {
1842 // We don't need to emit the complete ctor if the class is abstract.
1843 CGM.EmitGlobal(D: GlobalDecl(D, Ctor_Complete));
1844 }
1845}
1846
1847CGCXXABI::AddedStructorArgCounts
1848ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1849 SmallVectorImpl<CanQualType> &ArgTys) {
1850 ASTContext &Context = getContext();
1851
1852 // All parameters are already in place except VTT, which goes after 'this'.
1853 // These are Clang types, so we don't need to worry about sret yet.
1854
1855 // Check if we need to add a VTT parameter (which has type global void **).
1856 if ((isa<CXXConstructorDecl>(Val: GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1857 : GD.getDtorType() == Dtor_Base) &&
1858 cast<CXXMethodDecl>(Val: GD.getDecl())->getParent()->getNumVBases() != 0) {
1859 LangAS AS = CGM.GetGlobalVarAddressSpace(D: nullptr);
1860 QualType Q = Context.getAddrSpaceQualType(T: Context.VoidPtrTy, AddressSpace: AS);
1861 ArgTys.insert(I: ArgTys.begin() + 1,
1862 Elt: Context.getPointerType(T: CanQualType::CreateUnsafe(Other: Q)));
1863 return AddedStructorArgCounts::prefix(N: 1);
1864 }
1865 return AddedStructorArgCounts{};
1866}
1867
1868void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1869 // The destructor used for destructing this as a base class; ignores
1870 // virtual bases.
1871 CGM.EmitGlobal(D: GlobalDecl(D, Dtor_Base));
1872
1873 // The destructor used for destructing this as a most-derived class;
1874 // call the base destructor and then destructs any virtual bases.
1875 CGM.EmitGlobal(D: GlobalDecl(D, Dtor_Complete));
1876
1877 // The destructor in a virtual table is always a 'deleting'
1878 // destructor, which calls the complete destructor and then uses the
1879 // appropriate operator delete.
1880 if (D->isVirtual())
1881 CGM.EmitGlobal(D: GlobalDecl(D, Dtor_Deleting));
1882}
1883
1884void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1885 QualType &ResTy,
1886 FunctionArgList &Params) {
1887 const CXXMethodDecl *MD = cast<CXXMethodDecl>(Val: CGF.CurGD.getDecl());
1888 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1889
1890 // Check if we need a VTT parameter as well.
1891 if (NeedsVTTParameter(GD: CGF.CurGD)) {
1892 ASTContext &Context = getContext();
1893
1894 // FIXME: avoid the fake decl
1895 LangAS AS = CGM.GetGlobalVarAddressSpace(D: nullptr);
1896 QualType Q = Context.getAddrSpaceQualType(T: Context.VoidPtrTy, AddressSpace: AS);
1897 QualType T = Context.getPointerType(T: Q);
1898 auto *VTTDecl = ImplicitParamDecl::Create(
1899 C&: Context, /*DC=*/nullptr, IdLoc: MD->getLocation(), Id: &Context.Idents.get(Name: "vtt"),
1900 T, ParamKind: ImplicitParamKind::CXXVTT);
1901 Params.insert(I: Params.begin() + 1, Elt: VTTDecl);
1902 getStructorImplicitParamDecl(CGF) = VTTDecl;
1903 }
1904}
1905
1906void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1907 // Naked functions have no prolog.
1908 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1909 return;
1910
1911 /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1912 /// adjustments are required, because they are all handled by thunks.
1913 setCXXABIThisValue(CGF, ThisPtr: loadIncomingCXXThis(CGF));
1914
1915 /// Initialize the 'vtt' slot if needed.
1916 if (getStructorImplicitParamDecl(CGF)) {
1917 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1918 Addr: CGF.GetAddrOfLocalVar(VD: getStructorImplicitParamDecl(CGF)), Name: "vtt");
1919 }
1920
1921 /// If this is a function that the ABI specifies returns 'this', initialize
1922 /// the return slot to 'this' at the start of the function.
1923 ///
1924 /// Unlike the setting of return types, this is done within the ABI
1925 /// implementation instead of by clients of CGCXXABI because:
1926 /// 1) getThisValue is currently protected
1927 /// 2) in theory, an ABI could implement 'this' returns some other way;
1928 /// HasThisReturn only specifies a contract, not the implementation
1929 if (HasThisReturn(GD: CGF.CurGD))
1930 CGF.Builder.CreateStore(Val: getThisValue(CGF), Addr: CGF.ReturnValue);
1931}
1932
1933CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
1934 CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1935 bool ForVirtualBase, bool Delegating) {
1936 if (!NeedsVTTParameter(GD: GlobalDecl(D, Type)))
1937 return AddedStructorArgs{};
1938
1939 // Insert the implicit 'vtt' argument as the second argument. Make sure to
1940 // correctly reflect its address space, which can differ from generic on
1941 // some targets.
1942 llvm::Value *VTT =
1943 CGF.GetVTTParameter(GD: GlobalDecl(D, Type), ForVirtualBase, Delegating);
1944 LangAS AS = CGM.GetGlobalVarAddressSpace(D: nullptr);
1945 QualType Q = getContext().getAddrSpaceQualType(T: getContext().VoidPtrTy, AddressSpace: AS);
1946 QualType VTTTy = getContext().getPointerType(T: Q);
1947 return AddedStructorArgs::prefix(Args: {{.Value: VTT, .Type: VTTTy}});
1948}
1949
1950llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
1951 CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
1952 bool ForVirtualBase, bool Delegating) {
1953 GlobalDecl GD(DD, Type);
1954 return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1955}
1956
1957void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1958 const CXXDestructorDecl *DD,
1959 CXXDtorType Type, bool ForVirtualBase,
1960 bool Delegating, Address This,
1961 QualType ThisTy) {
1962 GlobalDecl GD(DD, Type);
1963 llvm::Value *VTT =
1964 getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
1965 QualType VTTTy = getContext().getPointerType(T: getContext().VoidPtrTy);
1966
1967 CGCallee Callee;
1968 if (getContext().getLangOpts().AppleKext &&
1969 Type != Dtor_Base && DD->isVirtual())
1970 Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, RD: DD->getParent());
1971 else
1972 Callee = CGCallee::forDirect(functionPtr: CGM.getAddrOfCXXStructor(GD), abstractInfo: GD);
1973
1974 CGF.EmitCXXDestructorCall(Dtor: GD, Callee, This: CGF.getAsNaturalPointerTo(Addr: This, PointeeType: ThisTy),
1975 ThisTy, ImplicitParam: VTT, ImplicitParamTy: VTTTy, E: nullptr);
1976}
1977
1978// Check if any non-inline method has the specified attribute.
1979template <typename T>
1980static bool CXXRecordNonInlineHasAttr(const CXXRecordDecl *RD) {
1981 for (const auto *D : RD->noload_decls()) {
1982 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
1983 if (FD->isInlined() || FD->doesThisDeclarationHaveABody() ||
1984 FD->isPureVirtual())
1985 continue;
1986 if (D->hasAttr<T>())
1987 return true;
1988 }
1989 }
1990
1991 return false;
1992}
1993
1994static void setVTableSelectiveDLLImportExport(CodeGenModule &CGM,
1995 llvm::GlobalVariable *VTable,
1996 const CXXRecordDecl *RD) {
1997 if (VTable->getDLLStorageClass() !=
1998 llvm::GlobalVariable::DefaultStorageClass ||
1999 RD->hasAttr<DLLImportAttr>() || RD->hasAttr<DLLExportAttr>())
2000 return;
2001
2002 if (CGM.getVTables().isVTableExternal(RD)) {
2003 if (CXXRecordNonInlineHasAttr<DLLImportAttr>(RD))
2004 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
2005 } else if (CXXRecordNonInlineHasAttr<DLLExportAttr>(RD))
2006 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
2007}
2008
2009void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
2010 const CXXRecordDecl *RD) {
2011 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, VPtrOffset: CharUnits());
2012 if (VTable->hasInitializer())
2013 return;
2014
2015 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
2016 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
2017 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
2018 llvm::Constant *RTTI =
2019 CGM.GetAddrOfRTTIDescriptor(Ty: CGM.getContext().getTagDeclType(Decl: RD));
2020
2021 // Create and set the initializer.
2022 ConstantInitBuilder builder(CGM);
2023 auto components = builder.beginStruct();
2024 CGVT.createVTableInitializer(builder&: components, layout: VTLayout, rtti: RTTI,
2025 vtableHasLocalLinkage: llvm::GlobalValue::isLocalLinkage(Linkage));
2026 components.finishAndSetAsInitializer(global: VTable);
2027
2028 // Set the correct linkage.
2029 VTable->setLinkage(Linkage);
2030
2031 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
2032 VTable->setComdat(CGM.getModule().getOrInsertComdat(Name: VTable->getName()));
2033
2034 if (CGM.getTarget().hasPS4DLLImportExport())
2035 setVTableSelectiveDLLImportExport(CGM, VTable, RD);
2036
2037 // Set the right visibility.
2038 CGM.setGVProperties(GV: VTable, D: RD);
2039
2040 // If this is the magic class __cxxabiv1::__fundamental_type_info,
2041 // we will emit the typeinfo for the fundamental types. This is the
2042 // same behaviour as GCC.
2043 const DeclContext *DC = RD->getDeclContext();
2044 if (RD->getIdentifier() &&
2045 RD->getIdentifier()->isStr(Str: "__fundamental_type_info") &&
2046 isa<NamespaceDecl>(Val: DC) && cast<NamespaceDecl>(Val: DC)->getIdentifier() &&
2047 cast<NamespaceDecl>(Val: DC)->getIdentifier()->isStr(Str: "__cxxabiv1") &&
2048 DC->getParent()->isTranslationUnit())
2049 EmitFundamentalRTTIDescriptors(RD);
2050
2051 // Always emit type metadata on non-available_externally definitions, and on
2052 // available_externally definitions if we are performing whole program
2053 // devirtualization. For WPD we need the type metadata on all vtable
2054 // definitions to ensure we associate derived classes with base classes
2055 // defined in headers but with a strong definition only in a shared library.
2056 if (!VTable->isDeclarationForLinker() ||
2057 CGM.getCodeGenOpts().WholeProgramVTables) {
2058 CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
2059 // For available_externally definitions, add the vtable to
2060 // @llvm.compiler.used so that it isn't deleted before whole program
2061 // analysis.
2062 if (VTable->isDeclarationForLinker()) {
2063 assert(CGM.getCodeGenOpts().WholeProgramVTables);
2064 CGM.addCompilerUsedGlobal(GV: VTable);
2065 }
2066 }
2067
2068 if (VTContext.isRelativeLayout()) {
2069 CGVT.RemoveHwasanMetadata(GV: VTable);
2070 if (!VTable->isDSOLocal())
2071 CGVT.GenerateRelativeVTableAlias(VTable, AliasNameRef: VTable->getName());
2072 }
2073
2074 // Emit symbol for debugger only if requested debug info.
2075 if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
2076 DI->emitVTableSymbol(VTable, RD);
2077}
2078
2079bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
2080 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
2081 if (Vptr.NearestVBase == nullptr)
2082 return false;
2083 return NeedsVTTParameter(GD: CGF.CurGD);
2084}
2085
2086llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
2087 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
2088 const CXXRecordDecl *NearestVBase) {
2089
2090 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
2091 NeedsVTTParameter(GD: CGF.CurGD)) {
2092 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
2093 NearestVBase);
2094 }
2095 return getVTableAddressPoint(Base, VTableClass);
2096}
2097
2098llvm::Constant *
2099ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
2100 const CXXRecordDecl *VTableClass) {
2101 llvm::GlobalValue *VTable = getAddrOfVTable(RD: VTableClass, VPtrOffset: CharUnits());
2102
2103 // Find the appropriate vtable within the vtable group, and the address point
2104 // within that vtable.
2105 const VTableLayout &Layout =
2106 CGM.getItaniumVTableContext().getVTableLayout(RD: VTableClass);
2107 VTableLayout::AddressPointLocation AddressPoint =
2108 Layout.getAddressPoint(Base);
2109 llvm::Value *Indices[] = {
2110 llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: 0),
2111 llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: AddressPoint.VTableIndex),
2112 llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: AddressPoint.AddressPointIndex),
2113 };
2114
2115 // Add inrange attribute to indicate that only the VTableIndex can be
2116 // accessed.
2117 unsigned ComponentSize =
2118 CGM.getDataLayout().getTypeAllocSize(Ty: CGM.getVTableComponentType());
2119 unsigned VTableSize =
2120 ComponentSize * Layout.getVTableSize(i: AddressPoint.VTableIndex);
2121 unsigned Offset = ComponentSize * AddressPoint.AddressPointIndex;
2122 llvm::ConstantRange InRange(
2123 llvm::APInt(32, (int)-Offset, true),
2124 llvm::APInt(32, (int)(VTableSize - Offset), true));
2125 return llvm::ConstantExpr::getGetElementPtr(
2126 Ty: VTable->getValueType(), C: VTable, IdxList: Indices, /*InBounds=*/NW: true, InRange);
2127}
2128
2129llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
2130 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
2131 const CXXRecordDecl *NearestVBase) {
2132 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
2133 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
2134
2135 // Get the secondary vpointer index.
2136 uint64_t VirtualPointerIndex =
2137 CGM.getVTables().getSecondaryVirtualPointerIndex(RD: VTableClass, Base);
2138
2139 /// Load the VTT.
2140 llvm::Value *VTT = CGF.LoadCXXVTT();
2141 if (VirtualPointerIndex)
2142 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(Ty: CGF.GlobalsVoidPtrTy, Ptr: VTT,
2143 Idx0: VirtualPointerIndex);
2144
2145 // And load the address point from the VTT.
2146 llvm::Value *AP =
2147 CGF.Builder.CreateAlignedLoad(Ty: CGF.GlobalsVoidPtrTy, Addr: VTT,
2148 Align: CGF.getPointerAlign());
2149
2150 if (auto &Schema = CGF.CGM.getCodeGenOpts().PointerAuth.CXXVTTVTablePointers) {
2151 CGPointerAuthInfo PointerAuth = CGF.EmitPointerAuthInfo(Schema, StorageAddress: VTT,
2152 SchemaDecl: GlobalDecl(),
2153 SchemaType: QualType());
2154 AP = CGF.EmitPointerAuthAuth(Info: PointerAuth, Pointer: AP);
2155 }
2156
2157 return AP;
2158}
2159
2160llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
2161 CharUnits VPtrOffset) {
2162 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
2163
2164 llvm::GlobalVariable *&VTable = VTables[RD];
2165 if (VTable)
2166 return VTable;
2167
2168 // Queue up this vtable for possible deferred emission.
2169 CGM.addDeferredVTable(RD);
2170
2171 SmallString<256> Name;
2172 llvm::raw_svector_ostream Out(Name);
2173 getMangleContext().mangleCXXVTable(RD, Out);
2174
2175 const VTableLayout &VTLayout =
2176 CGM.getItaniumVTableContext().getVTableLayout(RD);
2177 llvm::Type *VTableType = CGM.getVTables().getVTableType(layout: VTLayout);
2178
2179 // Use pointer to global alignment for the vtable. Otherwise we would align
2180 // them based on the size of the initializer which doesn't make sense as only
2181 // single values are read.
2182 unsigned PAlign = CGM.getVtableGlobalVarAlignment();
2183
2184 VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
2185 Name, Ty: VTableType, Linkage: llvm::GlobalValue::ExternalLinkage,
2186 Alignment: getContext().toCharUnitsFromBits(BitSize: PAlign).getAsAlign());
2187 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2188
2189 if (CGM.getTarget().hasPS4DLLImportExport())
2190 setVTableSelectiveDLLImportExport(CGM, VTable, RD);
2191
2192 CGM.setGVProperties(GV: VTable, D: RD);
2193 return VTable;
2194}
2195
2196CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
2197 GlobalDecl GD,
2198 Address This,
2199 llvm::Type *Ty,
2200 SourceLocation Loc) {
2201 llvm::Type *PtrTy = CGM.GlobalsInt8PtrTy;
2202 auto *MethodDecl = cast<CXXMethodDecl>(Val: GD.getDecl());
2203 llvm::Value *VTable = CGF.GetVTablePtr(This, VTableTy: PtrTy, VTableClass: MethodDecl->getParent());
2204
2205 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
2206 llvm::Value *VFunc, *VTableSlotPtr = nullptr;
2207 auto &Schema = CGM.getCodeGenOpts().PointerAuth.CXXVirtualFunctionPointers;
2208
2209 llvm::Type *ComponentTy = CGM.getVTables().getVTableComponentType();
2210 uint64_t ByteOffset =
2211 VTableIndex * CGM.getDataLayout().getTypeSizeInBits(Ty: ComponentTy) / 8;
2212
2213 if (!Schema && CGF.ShouldEmitVTableTypeCheckedLoad(RD: MethodDecl->getParent())) {
2214 VFunc = CGF.EmitVTableTypeCheckedLoad(RD: MethodDecl->getParent(), VTable,
2215 VTableTy: PtrTy, VTableByteOffset: ByteOffset);
2216 } else {
2217 CGF.EmitTypeMetadataCodeForVCall(RD: MethodDecl->getParent(), VTable, Loc);
2218
2219 llvm::Value *VFuncLoad;
2220 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
2221 VFuncLoad = CGF.Builder.CreateCall(
2222 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::load_relative, Tys: {CGM.Int32Ty}),
2223 Args: {VTable, llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: ByteOffset)});
2224 } else {
2225 VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2226 Ty: PtrTy, Ptr: VTable, Idx0: VTableIndex, Name: "vfn");
2227 VFuncLoad = CGF.Builder.CreateAlignedLoad(Ty: PtrTy, Addr: VTableSlotPtr,
2228 Align: CGF.getPointerAlign());
2229 }
2230
2231 // Add !invariant.load md to virtual function load to indicate that
2232 // function didn't change inside vtable.
2233 // It's safe to add it without -fstrict-vtable-pointers, but it would not
2234 // help in devirtualization because it will only matter if we will have 2
2235 // the same virtual function loads from the same vtable load, which won't
2236 // happen without enabled devirtualization with -fstrict-vtable-pointers.
2237 if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
2238 CGM.getCodeGenOpts().StrictVTablePointers) {
2239 if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(Val: VFuncLoad)) {
2240 VFuncLoadInstr->setMetadata(
2241 KindID: llvm::LLVMContext::MD_invariant_load,
2242 Node: llvm::MDNode::get(Context&: CGM.getLLVMContext(),
2243 MDs: llvm::ArrayRef<llvm::Metadata *>()));
2244 }
2245 }
2246 VFunc = VFuncLoad;
2247 }
2248
2249 CGPointerAuthInfo PointerAuth;
2250 if (Schema) {
2251 assert(VTableSlotPtr && "virtual function pointer not set");
2252 GD = CGM.getItaniumVTableContext().findOriginalMethod(GD: GD.getCanonicalDecl());
2253 PointerAuth = CGF.EmitPointerAuthInfo(Schema, StorageAddress: VTableSlotPtr, SchemaDecl: GD, SchemaType: QualType());
2254 }
2255 CGCallee Callee(GD, VFunc, PointerAuth);
2256 return Callee;
2257}
2258
2259llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
2260 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
2261 Address This, DeleteOrMemberCallExpr E, llvm::CallBase **CallOrInvoke) {
2262 auto *CE = dyn_cast<const CXXMemberCallExpr *>(Val&: E);
2263 auto *D = dyn_cast<const CXXDeleteExpr *>(Val&: E);
2264 assert((CE != nullptr) ^ (D != nullptr));
2265 assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
2266 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
2267
2268 GlobalDecl GD(Dtor, DtorType);
2269 const CGFunctionInfo *FInfo =
2270 &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
2271 llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(Info: *FInfo);
2272 CGCallee Callee = CGCallee::forVirtual(CE, MD: GD, Addr: This, FTy: Ty);
2273
2274 QualType ThisTy;
2275 if (CE) {
2276 ThisTy = CE->getObjectType();
2277 } else {
2278 ThisTy = D->getDestroyedType();
2279 }
2280
2281 CGF.EmitCXXDestructorCall(Dtor: GD, Callee, This: This.emitRawPointer(CGF), ThisTy,
2282 ImplicitParam: nullptr, ImplicitParamTy: QualType(), E: nullptr, CallOrInvoke);
2283 return nullptr;
2284}
2285
2286void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
2287 CodeGenVTables &VTables = CGM.getVTables();
2288 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
2289 VTables.EmitVTTDefinition(VTT, Linkage: CGM.getVTableLinkage(RD), RD);
2290}
2291
2292bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
2293 const CXXRecordDecl *RD) const {
2294 // We don't emit available_externally vtables if we are in -fapple-kext mode
2295 // because kext mode does not permit devirtualization.
2296 if (CGM.getLangOpts().AppleKext)
2297 return false;
2298
2299 // If the vtable is hidden then it is not safe to emit an available_externally
2300 // copy of vtable.
2301 if (isVTableHidden(RD))
2302 return false;
2303
2304 if (CGM.getCodeGenOpts().ForceEmitVTables)
2305 return true;
2306
2307 // A speculative vtable can only be generated if all virtual inline functions
2308 // defined by this class are emitted. The vtable in the final program contains
2309 // for each virtual inline function not used in the current TU a function that
2310 // is equivalent to the unused function. The function in the actual vtable
2311 // does not have to be declared under the same symbol (e.g., a virtual
2312 // destructor that can be substituted with its base class's destructor). Since
2313 // inline functions are emitted lazily and this emissions does not account for
2314 // speculative emission of a vtable, we might generate a speculative vtable
2315 // with references to inline functions that are not emitted under that name.
2316 // This can lead to problems when devirtualizing a call to such a function,
2317 // that result in linking errors. Hence, if there are any unused virtual
2318 // inline function, we cannot emit the speculative vtable.
2319 // FIXME we can still emit a copy of the vtable if we
2320 // can emit definition of the inline functions.
2321 if (hasAnyUnusedVirtualInlineFunction(RD))
2322 return false;
2323
2324 // For a class with virtual bases, we must also be able to speculatively
2325 // emit the VTT, because CodeGen doesn't have separate notions of "can emit
2326 // the vtable" and "can emit the VTT". For a base subobject, this means we
2327 // need to be able to emit non-virtual base vtables.
2328 if (RD->getNumVBases()) {
2329 for (const auto &B : RD->bases()) {
2330 auto *BRD = B.getType()->getAsCXXRecordDecl();
2331 assert(BRD && "no class for base specifier");
2332 if (B.isVirtual() || !BRD->isDynamicClass())
2333 continue;
2334 if (!canSpeculativelyEmitVTableAsBaseClass(RD: BRD))
2335 return false;
2336 }
2337 }
2338
2339 return true;
2340}
2341
2342bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
2343 if (!canSpeculativelyEmitVTableAsBaseClass(RD))
2344 return false;
2345
2346 if (RD->shouldEmitInExternalSource())
2347 return false;
2348
2349 // For a complete-object vtable (or more specifically, for the VTT), we need
2350 // to be able to speculatively emit the vtables of all dynamic virtual bases.
2351 for (const auto &B : RD->vbases()) {
2352 auto *BRD = B.getType()->getAsCXXRecordDecl();
2353 assert(BRD && "no class for base specifier");
2354 if (!BRD->isDynamicClass())
2355 continue;
2356 if (!canSpeculativelyEmitVTableAsBaseClass(RD: BRD))
2357 return false;
2358 }
2359
2360 return true;
2361}
2362static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
2363 Address InitialPtr,
2364 const CXXRecordDecl *UnadjustedClass,
2365 int64_t NonVirtualAdjustment,
2366 int64_t VirtualAdjustment,
2367 bool IsReturnAdjustment) {
2368 if (!NonVirtualAdjustment && !VirtualAdjustment)
2369 return InitialPtr.emitRawPointer(CGF);
2370
2371 Address V = InitialPtr.withElementType(ElemTy: CGF.Int8Ty);
2372
2373 // In a base-to-derived cast, the non-virtual adjustment is applied first.
2374 if (NonVirtualAdjustment && !IsReturnAdjustment) {
2375 V = CGF.Builder.CreateConstInBoundsByteGEP(Addr: V,
2376 Offset: CharUnits::fromQuantity(Quantity: NonVirtualAdjustment));
2377 }
2378
2379 // Perform the virtual adjustment if we have one.
2380 llvm::Value *ResultPtr;
2381 if (VirtualAdjustment) {
2382 llvm::Value *VTablePtr =
2383 CGF.GetVTablePtr(This: V, VTableTy: CGF.Int8PtrTy, VTableClass: UnadjustedClass);
2384
2385 llvm::Value *Offset;
2386 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2387 Ty: CGF.Int8Ty, Ptr: VTablePtr, Idx0: VirtualAdjustment);
2388 if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
2389 // Load the adjustment offset from the vtable as a 32-bit int.
2390 Offset =
2391 CGF.Builder.CreateAlignedLoad(Ty: CGF.Int32Ty, Addr: OffsetPtr,
2392 Align: CharUnits::fromQuantity(Quantity: 4));
2393 } else {
2394 llvm::Type *PtrDiffTy =
2395 CGF.ConvertType(T: CGF.getContext().getPointerDiffType());
2396
2397 // Load the adjustment offset from the vtable.
2398 Offset = CGF.Builder.CreateAlignedLoad(Ty: PtrDiffTy, Addr: OffsetPtr,
2399 Align: CGF.getPointerAlign());
2400 }
2401 // Adjust our pointer.
2402 ResultPtr = CGF.Builder.CreateInBoundsGEP(Ty: V.getElementType(),
2403 Ptr: V.emitRawPointer(CGF), IdxList: Offset);
2404 } else {
2405 ResultPtr = V.emitRawPointer(CGF);
2406 }
2407
2408 // In a derived-to-base conversion, the non-virtual adjustment is
2409 // applied second.
2410 if (NonVirtualAdjustment && IsReturnAdjustment) {
2411 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(Ty: CGF.Int8Ty, Ptr: ResultPtr,
2412 Idx0: NonVirtualAdjustment);
2413 }
2414
2415 return ResultPtr;
2416}
2417
2418llvm::Value *
2419ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF, Address This,
2420 const CXXRecordDecl *UnadjustedClass,
2421 const ThunkInfo &TI) {
2422 return performTypeAdjustment(CGF, InitialPtr: This, UnadjustedClass, NonVirtualAdjustment: TI.This.NonVirtual,
2423 VirtualAdjustment: TI.This.Virtual.Itanium.VCallOffsetOffset,
2424 /*IsReturnAdjustment=*/false);
2425}
2426
2427llvm::Value *
2428ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
2429 const CXXRecordDecl *UnadjustedClass,
2430 const ReturnAdjustment &RA) {
2431 return performTypeAdjustment(CGF, InitialPtr: Ret, UnadjustedClass, NonVirtualAdjustment: RA.NonVirtual,
2432 VirtualAdjustment: RA.Virtual.Itanium.VBaseOffsetOffset,
2433 /*IsReturnAdjustment=*/true);
2434}
2435
2436void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
2437 RValue RV, QualType ResultType) {
2438 if (!isa<CXXDestructorDecl>(Val: CGF.CurGD.getDecl()))
2439 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
2440
2441 // Destructor thunks in the ARM ABI have indeterminate results.
2442 llvm::Type *T = CGF.ReturnValue.getElementType();
2443 RValue Undef = RValue::get(V: llvm::UndefValue::get(T));
2444 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV: Undef, ResultType);
2445}
2446
2447/************************** Array allocation cookies **************************/
2448
2449CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2450 // The array cookie is a size_t; pad that up to the element alignment.
2451 // The cookie is actually right-justified in that space.
2452 return std::max(a: CharUnits::fromQuantity(Quantity: CGM.SizeSizeInBytes),
2453 b: CGM.getContext().getPreferredTypeAlignInChars(T: elementType));
2454}
2455
2456Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2457 Address NewPtr,
2458 llvm::Value *NumElements,
2459 const CXXNewExpr *expr,
2460 QualType ElementType) {
2461 assert(requiresArrayCookie(expr));
2462
2463 unsigned AS = NewPtr.getAddressSpace();
2464
2465 ASTContext &Ctx = getContext();
2466 CharUnits SizeSize = CGF.getSizeSize();
2467
2468 // The size of the cookie.
2469 CharUnits CookieSize =
2470 std::max(a: SizeSize, b: Ctx.getPreferredTypeAlignInChars(T: ElementType));
2471 assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2472
2473 // Compute an offset to the cookie.
2474 Address CookiePtr = NewPtr;
2475 CharUnits CookieOffset = CookieSize - SizeSize;
2476 if (!CookieOffset.isZero())
2477 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(Addr: CookiePtr, Offset: CookieOffset);
2478
2479 // Write the number of elements into the appropriate slot.
2480 Address NumElementsPtr = CookiePtr.withElementType(ElemTy: CGF.SizeTy);
2481 llvm::Instruction *SI = CGF.Builder.CreateStore(Val: NumElements, Addr: NumElementsPtr);
2482
2483 // Handle the array cookie specially in ASan.
2484 if (CGM.getLangOpts().Sanitize.has(K: SanitizerKind::Address) && AS == 0 &&
2485 (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2486 CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
2487 // The store to the CookiePtr does not need to be instrumented.
2488 SI->setNoSanitizeMetadata();
2489 llvm::FunctionType *FTy =
2490 llvm::FunctionType::get(Result: CGM.VoidTy, Params: NumElementsPtr.getType(), isVarArg: false);
2491 llvm::FunctionCallee F =
2492 CGM.CreateRuntimeFunction(Ty: FTy, Name: "__asan_poison_cxx_array_cookie");
2493 CGF.Builder.CreateCall(Callee: F, Args: NumElementsPtr.emitRawPointer(CGF));
2494 }
2495
2496 // Finally, compute a pointer to the actual data buffer by skipping
2497 // over the cookie completely.
2498 return CGF.Builder.CreateConstInBoundsByteGEP(Addr: NewPtr, Offset: CookieSize);
2499}
2500
2501llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2502 Address allocPtr,
2503 CharUnits cookieSize) {
2504 // The element size is right-justified in the cookie.
2505 Address numElementsPtr = allocPtr;
2506 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2507 if (!numElementsOffset.isZero())
2508 numElementsPtr =
2509 CGF.Builder.CreateConstInBoundsByteGEP(Addr: numElementsPtr, Offset: numElementsOffset);
2510
2511 unsigned AS = allocPtr.getAddressSpace();
2512 numElementsPtr = numElementsPtr.withElementType(ElemTy: CGF.SizeTy);
2513 if (!CGM.getLangOpts().Sanitize.has(K: SanitizerKind::Address) || AS != 0)
2514 return CGF.Builder.CreateLoad(Addr: numElementsPtr);
2515 // In asan mode emit a function call instead of a regular load and let the
2516 // run-time deal with it: if the shadow is properly poisoned return the
2517 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2518 // We can't simply ignore this load using nosanitize metadata because
2519 // the metadata may be lost.
2520 llvm::FunctionType *FTy =
2521 llvm::FunctionType::get(Result: CGF.SizeTy, Params: CGF.UnqualPtrTy, isVarArg: false);
2522 llvm::FunctionCallee F =
2523 CGM.CreateRuntimeFunction(Ty: FTy, Name: "__asan_load_cxx_array_cookie");
2524 return CGF.Builder.CreateCall(Callee: F, Args: numElementsPtr.emitRawPointer(CGF));
2525}
2526
2527CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2528 // ARM says that the cookie is always:
2529 // struct array_cookie {
2530 // std::size_t element_size; // element_size != 0
2531 // std::size_t element_count;
2532 // };
2533 // But the base ABI doesn't give anything an alignment greater than
2534 // 8, so we can dismiss this as typical ABI-author blindness to
2535 // actual language complexity and round up to the element alignment.
2536 return std::max(a: CharUnits::fromQuantity(Quantity: 2 * CGM.SizeSizeInBytes),
2537 b: CGM.getContext().getTypeAlignInChars(T: elementType));
2538}
2539
2540Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2541 Address newPtr,
2542 llvm::Value *numElements,
2543 const CXXNewExpr *expr,
2544 QualType elementType) {
2545 assert(requiresArrayCookie(expr));
2546
2547 // The cookie is always at the start of the buffer.
2548 Address cookie = newPtr;
2549
2550 // The first element is the element size.
2551 cookie = cookie.withElementType(ElemTy: CGF.SizeTy);
2552 llvm::Value *elementSize = llvm::ConstantInt::get(Ty: CGF.SizeTy,
2553 V: getContext().getTypeSizeInChars(T: elementType).getQuantity());
2554 CGF.Builder.CreateStore(Val: elementSize, Addr: cookie);
2555
2556 // The second element is the element count.
2557 cookie = CGF.Builder.CreateConstInBoundsGEP(Addr: cookie, Index: 1);
2558 CGF.Builder.CreateStore(Val: numElements, Addr: cookie);
2559
2560 // Finally, compute a pointer to the actual data buffer by skipping
2561 // over the cookie completely.
2562 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2563 return CGF.Builder.CreateConstInBoundsByteGEP(Addr: newPtr, Offset: cookieSize);
2564}
2565
2566llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2567 Address allocPtr,
2568 CharUnits cookieSize) {
2569 // The number of elements is at offset sizeof(size_t) relative to
2570 // the allocated pointer.
2571 Address numElementsPtr
2572 = CGF.Builder.CreateConstInBoundsByteGEP(Addr: allocPtr, Offset: CGF.getSizeSize());
2573
2574 numElementsPtr = numElementsPtr.withElementType(ElemTy: CGF.SizeTy);
2575 return CGF.Builder.CreateLoad(Addr: numElementsPtr);
2576}
2577
2578/*********************** Static local initialization **************************/
2579
2580static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2581 llvm::PointerType *GuardPtrTy) {
2582 // int __cxa_guard_acquire(__guard *guard_object);
2583 llvm::FunctionType *FTy =
2584 llvm::FunctionType::get(Result: CGM.getTypes().ConvertType(T: CGM.getContext().IntTy),
2585 Params: GuardPtrTy, /*isVarArg=*/false);
2586 return CGM.CreateRuntimeFunction(
2587 Ty: FTy, Name: "__cxa_guard_acquire",
2588 ExtraAttrs: llvm::AttributeList::get(C&: CGM.getLLVMContext(),
2589 Index: llvm::AttributeList::FunctionIndex,
2590 Kinds: llvm::Attribute::NoUnwind));
2591}
2592
2593static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2594 llvm::PointerType *GuardPtrTy) {
2595 // void __cxa_guard_release(__guard *guard_object);
2596 llvm::FunctionType *FTy =
2597 llvm::FunctionType::get(Result: CGM.VoidTy, Params: GuardPtrTy, /*isVarArg=*/false);
2598 return CGM.CreateRuntimeFunction(
2599 Ty: FTy, Name: "__cxa_guard_release",
2600 ExtraAttrs: llvm::AttributeList::get(C&: CGM.getLLVMContext(),
2601 Index: llvm::AttributeList::FunctionIndex,
2602 Kinds: llvm::Attribute::NoUnwind));
2603}
2604
2605static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2606 llvm::PointerType *GuardPtrTy) {
2607 // void __cxa_guard_abort(__guard *guard_object);
2608 llvm::FunctionType *FTy =
2609 llvm::FunctionType::get(Result: CGM.VoidTy, Params: GuardPtrTy, /*isVarArg=*/false);
2610 return CGM.CreateRuntimeFunction(
2611 Ty: FTy, Name: "__cxa_guard_abort",
2612 ExtraAttrs: llvm::AttributeList::get(C&: CGM.getLLVMContext(),
2613 Index: llvm::AttributeList::FunctionIndex,
2614 Kinds: llvm::Attribute::NoUnwind));
2615}
2616
2617namespace {
2618 struct CallGuardAbort final : EHScopeStack::Cleanup {
2619 llvm::GlobalVariable *Guard;
2620 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2621
2622 void Emit(CodeGenFunction &CGF, Flags flags) override {
2623 CGF.EmitNounwindRuntimeCall(callee: getGuardAbortFn(CGM&: CGF.CGM, GuardPtrTy: Guard->getType()),
2624 args: Guard);
2625 }
2626 };
2627}
2628
2629/// The ARM code here follows the Itanium code closely enough that we
2630/// just special-case it at particular places.
2631void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2632 const VarDecl &D,
2633 llvm::GlobalVariable *var,
2634 bool shouldPerformInit) {
2635 CGBuilderTy &Builder = CGF.Builder;
2636
2637 // Inline variables that weren't instantiated from variable templates have
2638 // partially-ordered initialization within their translation unit.
2639 bool NonTemplateInline =
2640 D.isInline() &&
2641 !isTemplateInstantiation(Kind: D.getTemplateSpecializationKind());
2642
2643 // We only need to use thread-safe statics for local non-TLS variables and
2644 // inline variables; other global initialization is always single-threaded
2645 // or (through lazy dynamic loading in multiple threads) unsequenced.
2646 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2647 (D.isLocalVarDecl() || NonTemplateInline) &&
2648 !D.getTLSKind();
2649
2650 // If we have a global variable with internal linkage and thread-safe statics
2651 // are disabled, we can just let the guard variable be of type i8.
2652 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2653
2654 llvm::IntegerType *guardTy;
2655 CharUnits guardAlignment;
2656 if (useInt8GuardVariable) {
2657 guardTy = CGF.Int8Ty;
2658 guardAlignment = CharUnits::One();
2659 } else {
2660 // Guard variables are 64 bits in the generic ABI and size width on ARM
2661 // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2662 if (UseARMGuardVarABI) {
2663 guardTy = CGF.SizeTy;
2664 guardAlignment = CGF.getSizeAlign();
2665 } else {
2666 guardTy = CGF.Int64Ty;
2667 guardAlignment =
2668 CharUnits::fromQuantity(Quantity: CGM.getDataLayout().getABITypeAlign(Ty: guardTy));
2669 }
2670 }
2671 llvm::PointerType *guardPtrTy = llvm::PointerType::get(
2672 C&: CGF.CGM.getLLVMContext(),
2673 AddressSpace: CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
2674
2675 // Create the guard variable if we don't already have it (as we
2676 // might if we're double-emitting this function body).
2677 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(D: &D);
2678 if (!guard) {
2679 // Mangle the name for the guard.
2680 SmallString<256> guardName;
2681 {
2682 llvm::raw_svector_ostream out(guardName);
2683 getMangleContext().mangleStaticGuardVariable(D: &D, out);
2684 }
2685
2686 // Create the guard variable with a zero-initializer.
2687 // Just absorb linkage, visibility and dll storage class from the guarded
2688 // variable.
2689 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2690 false, var->getLinkage(),
2691 llvm::ConstantInt::get(Ty: guardTy, V: 0),
2692 guardName.str());
2693 guard->setDSOLocal(var->isDSOLocal());
2694 guard->setVisibility(var->getVisibility());
2695 guard->setDLLStorageClass(var->getDLLStorageClass());
2696 // If the variable is thread-local, so is its guard variable.
2697 guard->setThreadLocalMode(var->getThreadLocalMode());
2698 guard->setAlignment(guardAlignment.getAsAlign());
2699
2700 // The ABI says: "It is suggested that it be emitted in the same COMDAT
2701 // group as the associated data object." In practice, this doesn't work for
2702 // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2703 llvm::Comdat *C = var->getComdat();
2704 if (!D.isLocalVarDecl() && C &&
2705 (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2706 CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2707 guard->setComdat(C);
2708 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2709 guard->setComdat(CGM.getModule().getOrInsertComdat(Name: guard->getName()));
2710 }
2711
2712 CGM.setStaticLocalDeclGuardAddress(D: &D, C: guard);
2713 }
2714
2715 Address guardAddr = Address(guard, guard->getValueType(), guardAlignment);
2716
2717 // Test whether the variable has completed initialization.
2718 //
2719 // Itanium C++ ABI 3.3.2:
2720 // The following is pseudo-code showing how these functions can be used:
2721 // if (obj_guard.first_byte == 0) {
2722 // if ( __cxa_guard_acquire (&obj_guard) ) {
2723 // try {
2724 // ... initialize the object ...;
2725 // } catch (...) {
2726 // __cxa_guard_abort (&obj_guard);
2727 // throw;
2728 // }
2729 // ... queue object destructor with __cxa_atexit() ...;
2730 // __cxa_guard_release (&obj_guard);
2731 // }
2732 // }
2733 //
2734 // If threadsafe statics are enabled, but we don't have inline atomics, just
2735 // call __cxa_guard_acquire unconditionally. The "inline" check isn't
2736 // actually inline, and the user might not expect calls to __atomic libcalls.
2737
2738 unsigned MaxInlineWidthInBits = CGF.getTarget().getMaxAtomicInlineWidth();
2739 llvm::BasicBlock *EndBlock = CGF.createBasicBlock(name: "init.end");
2740 if (!threadsafe || MaxInlineWidthInBits) {
2741 // Load the first byte of the guard variable.
2742 llvm::LoadInst *LI =
2743 Builder.CreateLoad(Addr: guardAddr.withElementType(ElemTy: CGM.Int8Ty));
2744
2745 // Itanium ABI:
2746 // An implementation supporting thread-safety on multiprocessor
2747 // systems must also guarantee that references to the initialized
2748 // object do not occur before the load of the initialization flag.
2749 //
2750 // In LLVM, we do this by marking the load Acquire.
2751 if (threadsafe)
2752 LI->setAtomic(Ordering: llvm::AtomicOrdering::Acquire);
2753
2754 // For ARM, we should only check the first bit, rather than the entire byte:
2755 //
2756 // ARM C++ ABI 3.2.3.1:
2757 // To support the potential use of initialization guard variables
2758 // as semaphores that are the target of ARM SWP and LDREX/STREX
2759 // synchronizing instructions we define a static initialization
2760 // guard variable to be a 4-byte aligned, 4-byte word with the
2761 // following inline access protocol.
2762 // #define INITIALIZED 1
2763 // if ((obj_guard & INITIALIZED) != INITIALIZED) {
2764 // if (__cxa_guard_acquire(&obj_guard))
2765 // ...
2766 // }
2767 //
2768 // and similarly for ARM64:
2769 //
2770 // ARM64 C++ ABI 3.2.2:
2771 // This ABI instead only specifies the value bit 0 of the static guard
2772 // variable; all other bits are platform defined. Bit 0 shall be 0 when the
2773 // variable is not initialized and 1 when it is.
2774 llvm::Value *V =
2775 (UseARMGuardVarABI && !useInt8GuardVariable)
2776 ? Builder.CreateAnd(LHS: LI, RHS: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: 1))
2777 : LI;
2778 llvm::Value *NeedsInit = Builder.CreateIsNull(Arg: V, Name: "guard.uninitialized");
2779
2780 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock(name: "init.check");
2781
2782 // Check if the first byte of the guard variable is zero.
2783 CGF.EmitCXXGuardedInitBranch(NeedsInit, InitBlock: InitCheckBlock, NoInitBlock: EndBlock,
2784 Kind: CodeGenFunction::GuardKind::VariableGuard, D: &D);
2785
2786 CGF.EmitBlock(BB: InitCheckBlock);
2787 }
2788
2789 // The semantics of dynamic initialization of variables with static or thread
2790 // storage duration depends on whether they are declared at block-scope. The
2791 // initialization of such variables at block-scope can be aborted with an
2792 // exception and later retried (per C++20 [stmt.dcl]p4), and recursive entry
2793 // to their initialization has undefined behavior (also per C++20
2794 // [stmt.dcl]p4). For such variables declared at non-block scope, exceptions
2795 // lead to termination (per C++20 [except.terminate]p1), and recursive
2796 // references to the variables are governed only by the lifetime rules (per
2797 // C++20 [class.cdtor]p2), which means such references are perfectly fine as
2798 // long as they avoid touching memory. As a result, block-scope variables must
2799 // not be marked as initialized until after initialization completes (unless
2800 // the mark is reverted following an exception), but non-block-scope variables
2801 // must be marked prior to initialization so that recursive accesses during
2802 // initialization do not restart initialization.
2803
2804 // Variables used when coping with thread-safe statics and exceptions.
2805 if (threadsafe) {
2806 // Call __cxa_guard_acquire.
2807 llvm::Value *V
2808 = CGF.EmitNounwindRuntimeCall(callee: getGuardAcquireFn(CGM, GuardPtrTy: guardPtrTy), args: guard);
2809
2810 llvm::BasicBlock *InitBlock = CGF.createBasicBlock(name: "init");
2811
2812 Builder.CreateCondBr(Cond: Builder.CreateIsNotNull(Arg: V, Name: "tobool"),
2813 True: InitBlock, False: EndBlock);
2814
2815 // Call __cxa_guard_abort along the exceptional edge.
2816 CGF.EHStack.pushCleanup<CallGuardAbort>(Kind: EHCleanup, A: guard);
2817
2818 CGF.EmitBlock(BB: InitBlock);
2819 } else if (!D.isLocalVarDecl()) {
2820 // For non-local variables, store 1 into the first byte of the guard
2821 // variable before the object initialization begins so that references
2822 // to the variable during initialization don't restart initialization.
2823 Builder.CreateStore(Val: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: 1),
2824 Addr: guardAddr.withElementType(ElemTy: CGM.Int8Ty));
2825 }
2826
2827 // Emit the initializer and add a global destructor if appropriate.
2828 CGF.EmitCXXGlobalVarDeclInit(D, GV: var, PerformInit: shouldPerformInit);
2829
2830 if (threadsafe) {
2831 // Pop the guard-abort cleanup if we pushed one.
2832 CGF.PopCleanupBlock();
2833
2834 // Call __cxa_guard_release. This cannot throw.
2835 CGF.EmitNounwindRuntimeCall(callee: getGuardReleaseFn(CGM, GuardPtrTy: guardPtrTy),
2836 args: guardAddr.emitRawPointer(CGF));
2837 } else if (D.isLocalVarDecl()) {
2838 // For local variables, store 1 into the first byte of the guard variable
2839 // after the object initialization completes so that initialization is
2840 // retried if initialization is interrupted by an exception.
2841 Builder.CreateStore(Val: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: 1),
2842 Addr: guardAddr.withElementType(ElemTy: CGM.Int8Ty));
2843 }
2844
2845 CGF.EmitBlock(BB: EndBlock);
2846}
2847
2848/// Register a global destructor using __cxa_atexit.
2849static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2850 llvm::FunctionCallee dtor,
2851 llvm::Constant *addr, bool TLS) {
2852 assert(!CGF.getTarget().getTriple().isOSAIX() &&
2853 "unexpected call to emitGlobalDtorWithCXAAtExit");
2854 assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2855 "__cxa_atexit is disabled");
2856 const char *Name = "__cxa_atexit";
2857 if (TLS) {
2858 const llvm::Triple &T = CGF.getTarget().getTriple();
2859 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit";
2860 }
2861
2862 // We're assuming that the destructor function is something we can
2863 // reasonably call with the default CC.
2864 llvm::Type *dtorTy = CGF.UnqualPtrTy;
2865
2866 // Preserve address space of addr.
2867 auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
2868 auto AddrPtrTy = AddrAS ? llvm::PointerType::get(C&: CGF.getLLVMContext(), AddressSpace: AddrAS)
2869 : CGF.Int8PtrTy;
2870
2871 // Create a variable that binds the atexit to this shared object.
2872 llvm::Constant *handle =
2873 CGF.CGM.CreateRuntimeVariable(Ty: CGF.Int8Ty, Name: "__dso_handle");
2874 auto *GV = cast<llvm::GlobalValue>(Val: handle->stripPointerCasts());
2875 GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2876
2877 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2878 llvm::Type *paramTys[] = {dtorTy, AddrPtrTy, handle->getType()};
2879 llvm::FunctionType *atexitTy =
2880 llvm::FunctionType::get(Result: CGF.IntTy, Params: paramTys, isVarArg: false);
2881
2882 // Fetch the actual function.
2883 llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(Ty: atexitTy, Name);
2884 if (llvm::Function *fn = dyn_cast<llvm::Function>(Val: atexit.getCallee()))
2885 fn->setDoesNotThrow();
2886
2887 const auto &Context = CGF.CGM.getContext();
2888 FunctionProtoType::ExtProtoInfo EPI(Context.getDefaultCallingConvention(
2889 /*IsVariadic=*/false, /*IsCXXMethod=*/false));
2890 QualType fnType =
2891 Context.getFunctionType(ResultTy: Context.VoidTy, Args: {Context.VoidPtrTy}, EPI);
2892 llvm::Constant *dtorCallee = cast<llvm::Constant>(Val: dtor.getCallee());
2893 dtorCallee = CGF.CGM.getFunctionPointer(Pointer: dtorCallee, FunctionType: fnType);
2894
2895 if (!addr)
2896 // addr is null when we are trying to register a dtor annotated with
2897 // __attribute__((destructor)) in a constructor function. Using null here is
2898 // okay because this argument is just passed back to the destructor
2899 // function.
2900 addr = llvm::Constant::getNullValue(Ty: CGF.Int8PtrTy);
2901
2902 llvm::Value *args[] = {dtorCallee, addr, handle};
2903 CGF.EmitNounwindRuntimeCall(callee: atexit, args);
2904}
2905
2906static llvm::Function *createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM,
2907 StringRef FnName) {
2908 // Create a function that registers/unregisters destructors that have the same
2909 // priority.
2910 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: CGM.VoidTy, isVarArg: false);
2911 llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction(
2912 ty: FTy, name: FnName, FI: CGM.getTypes().arrangeNullaryFunction(), Loc: SourceLocation());
2913
2914 return GlobalInitOrCleanupFn;
2915}
2916
2917void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
2918 for (const auto &I : DtorsUsingAtExit) {
2919 int Priority = I.first;
2920 std::string GlobalCleanupFnName =
2921 std::string("__GLOBAL_cleanup_") + llvm::to_string(Value: Priority);
2922
2923 llvm::Function *GlobalCleanupFn =
2924 createGlobalInitOrCleanupFn(CGM&: *this, FnName: GlobalCleanupFnName);
2925
2926 CodeGenFunction CGF(*this);
2927 CGF.StartFunction(GD: GlobalDecl(), RetTy: getContext().VoidTy, Fn: GlobalCleanupFn,
2928 FnInfo: getTypes().arrangeNullaryFunction(), Args: FunctionArgList(),
2929 Loc: SourceLocation(), StartLoc: SourceLocation());
2930 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
2931
2932 // Get the destructor function type, void(*)(void).
2933 llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(Result: CGF.VoidTy, isVarArg: false);
2934
2935 // Destructor functions are run/unregistered in non-ascending
2936 // order of their priorities.
2937 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2938 auto itv = Dtors.rbegin();
2939 while (itv != Dtors.rend()) {
2940 llvm::Function *Dtor = *itv;
2941
2942 // We're assuming that the destructor function is something we can
2943 // reasonably call with the correct CC.
2944 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub: Dtor);
2945 llvm::Value *NeedsDestruct =
2946 CGF.Builder.CreateIsNull(Arg: V, Name: "needs_destruct");
2947
2948 llvm::BasicBlock *DestructCallBlock =
2949 CGF.createBasicBlock(name: "destruct.call");
2950 llvm::BasicBlock *EndBlock = CGF.createBasicBlock(
2951 name: (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end");
2952 // Check if unatexit returns a value of 0. If it does, jump to
2953 // DestructCallBlock, otherwise jump to EndBlock directly.
2954 CGF.Builder.CreateCondBr(Cond: NeedsDestruct, True: DestructCallBlock, False: EndBlock);
2955
2956 CGF.EmitBlock(BB: DestructCallBlock);
2957
2958 // Emit the call to casted Dtor.
2959 llvm::CallInst *CI = CGF.Builder.CreateCall(FTy: dtorFuncTy, Callee: Dtor);
2960 // Make sure the call and the callee agree on calling convention.
2961 CI->setCallingConv(Dtor->getCallingConv());
2962
2963 CGF.EmitBlock(BB: EndBlock);
2964
2965 itv++;
2966 }
2967
2968 CGF.FinishFunction();
2969 AddGlobalDtor(Dtor: GlobalCleanupFn, Priority);
2970 }
2971}
2972
2973void CodeGenModule::registerGlobalDtorsWithAtExit() {
2974 for (const auto &I : DtorsUsingAtExit) {
2975 int Priority = I.first;
2976 std::string GlobalInitFnName =
2977 std::string("__GLOBAL_init_") + llvm::to_string(Value: Priority);
2978 llvm::Function *GlobalInitFn =
2979 createGlobalInitOrCleanupFn(CGM&: *this, FnName: GlobalInitFnName);
2980
2981 CodeGenFunction CGF(*this);
2982 CGF.StartFunction(GD: GlobalDecl(), RetTy: getContext().VoidTy, Fn: GlobalInitFn,
2983 FnInfo: getTypes().arrangeNullaryFunction(), Args: FunctionArgList(),
2984 Loc: SourceLocation(), StartLoc: SourceLocation());
2985 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
2986
2987 // Since constructor functions are run in non-descending order of their
2988 // priorities, destructors are registered in non-descending order of their
2989 // priorities, and since destructor functions are run in the reverse order
2990 // of their registration, destructor functions are run in non-ascending
2991 // order of their priorities.
2992 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2993 for (auto *Dtor : Dtors) {
2994 // Register the destructor function calling __cxa_atexit if it is
2995 // available. Otherwise fall back on calling atexit.
2996 if (getCodeGenOpts().CXAAtExit) {
2997 emitGlobalDtorWithCXAAtExit(CGF, dtor: Dtor, addr: nullptr, TLS: false);
2998 } else {
2999 // We're assuming that the destructor function is something we can
3000 // reasonably call with the correct CC.
3001 CGF.registerGlobalDtorWithAtExit(dtorStub: Dtor);
3002 }
3003 }
3004
3005 CGF.FinishFunction();
3006 AddGlobalCtor(Ctor: GlobalInitFn, Priority);
3007 }
3008
3009 if (getCXXABI().useSinitAndSterm())
3010 unregisterGlobalDtorsWithUnAtExit();
3011}
3012
3013/// Register a global destructor as best as we know how.
3014void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
3015 llvm::FunctionCallee dtor,
3016 llvm::Constant *addr) {
3017 if (D.isNoDestroy(CGM.getContext()))
3018 return;
3019
3020 // HLSL doesn't support atexit.
3021 if (CGM.getLangOpts().HLSL)
3022 return CGM.AddCXXDtorEntry(DtorFn: dtor, Object: addr);
3023
3024 // OpenMP offloading supports C++ constructors and destructors but we do not
3025 // always have 'atexit' available. Instead lower these to use the LLVM global
3026 // destructors which we can handle directly in the runtime. Note that this is
3027 // not strictly 1-to-1 with using `atexit` because we no longer tear down
3028 // globals in reverse order of when they were constructed.
3029 if (!CGM.getLangOpts().hasAtExit() && !D.isStaticLocal())
3030 return CGF.registerGlobalDtorWithLLVM(D, fn: dtor, addr);
3031
3032 // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
3033 // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
3034 // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
3035 // We can always use __cxa_thread_atexit.
3036 if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
3037 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, TLS: D.getTLSKind());
3038
3039 // In Apple kexts, we want to add a global destructor entry.
3040 // FIXME: shouldn't this be guarded by some variable?
3041 if (CGM.getLangOpts().AppleKext) {
3042 // Generate a global destructor entry.
3043 return CGM.AddCXXDtorEntry(DtorFn: dtor, Object: addr);
3044 }
3045
3046 CGF.registerGlobalDtorWithAtExit(D, fn: dtor, addr);
3047}
3048
3049static bool isThreadWrapperReplaceable(const VarDecl *VD,
3050 CodeGen::CodeGenModule &CGM) {
3051 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
3052 // Darwin prefers to have references to thread local variables to go through
3053 // the thread wrapper instead of directly referencing the backing variable.
3054 return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
3055 CGM.getTarget().getTriple().isOSDarwin();
3056}
3057
3058/// Get the appropriate linkage for the wrapper function. This is essentially
3059/// the weak form of the variable's linkage; every translation unit which needs
3060/// the wrapper emits a copy, and we want the linker to merge them.
3061static llvm::GlobalValue::LinkageTypes
3062getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
3063 llvm::GlobalValue::LinkageTypes VarLinkage =
3064 CGM.getLLVMLinkageVarDefinition(VD);
3065
3066 // For internal linkage variables, we don't need an external or weak wrapper.
3067 if (llvm::GlobalValue::isLocalLinkage(Linkage: VarLinkage))
3068 return VarLinkage;
3069
3070 // If the thread wrapper is replaceable, give it appropriate linkage.
3071 if (isThreadWrapperReplaceable(VD, CGM))
3072 if (!llvm::GlobalVariable::isLinkOnceLinkage(Linkage: VarLinkage) &&
3073 !llvm::GlobalVariable::isWeakODRLinkage(Linkage: VarLinkage))
3074 return VarLinkage;
3075 return llvm::GlobalValue::WeakODRLinkage;
3076}
3077
3078llvm::Function *
3079ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
3080 llvm::Value *Val) {
3081 // Mangle the name for the thread_local wrapper function.
3082 SmallString<256> WrapperName;
3083 {
3084 llvm::raw_svector_ostream Out(WrapperName);
3085 getMangleContext().mangleItaniumThreadLocalWrapper(D: VD, Out);
3086 }
3087
3088 // FIXME: If VD is a definition, we should regenerate the function attributes
3089 // before returning.
3090 if (llvm::Value *V = CGM.getModule().getNamedValue(Name: WrapperName))
3091 return cast<llvm::Function>(Val: V);
3092
3093 QualType RetQT = VD->getType();
3094 if (RetQT->isReferenceType())
3095 RetQT = RetQT.getNonReferenceType();
3096
3097 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
3098 resultType: getContext().getPointerType(T: RetQT), args: FunctionArgList());
3099
3100 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(Info: FI);
3101 llvm::Function *Wrapper =
3102 llvm::Function::Create(Ty: FnTy, Linkage: getThreadLocalWrapperLinkage(VD, CGM),
3103 N: WrapperName.str(), M: &CGM.getModule());
3104
3105 if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker())
3106 Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Name: Wrapper->getName()));
3107
3108 CGM.SetLLVMFunctionAttributes(GD: GlobalDecl(), Info: FI, F: Wrapper, /*IsThunk=*/false);
3109
3110 // Always resolve references to the wrapper at link time.
3111 if (!Wrapper->hasLocalLinkage())
3112 if (!isThreadWrapperReplaceable(VD, CGM) ||
3113 llvm::GlobalVariable::isLinkOnceLinkage(Linkage: Wrapper->getLinkage()) ||
3114 llvm::GlobalVariable::isWeakODRLinkage(Linkage: Wrapper->getLinkage()) ||
3115 VD->getVisibility() == HiddenVisibility)
3116 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
3117
3118 if (isThreadWrapperReplaceable(VD, CGM)) {
3119 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3120 Wrapper->addFnAttr(Kind: llvm::Attribute::NoUnwind);
3121 }
3122
3123 ThreadWrappers.push_back(Elt: {VD, Wrapper});
3124 return Wrapper;
3125}
3126
3127void ItaniumCXXABI::EmitThreadLocalInitFuncs(
3128 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
3129 ArrayRef<llvm::Function *> CXXThreadLocalInits,
3130 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
3131 llvm::Function *InitFunc = nullptr;
3132
3133 // Separate initializers into those with ordered (or partially-ordered)
3134 // initialization and those with unordered initialization.
3135 llvm::SmallVector<llvm::Function *, 8> OrderedInits;
3136 llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
3137 for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
3138 if (isTemplateInstantiation(
3139 Kind: CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
3140 UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
3141 CXXThreadLocalInits[I];
3142 else
3143 OrderedInits.push_back(Elt: CXXThreadLocalInits[I]);
3144 }
3145
3146 if (!OrderedInits.empty()) {
3147 // Generate a guarded initialization function.
3148 llvm::FunctionType *FTy =
3149 llvm::FunctionType::get(Result: CGM.VoidTy, /*isVarArg=*/false);
3150 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
3151 InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(ty: FTy, name: "__tls_init", FI,
3152 Loc: SourceLocation(),
3153 /*TLS=*/true);
3154 llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
3155 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
3156 llvm::GlobalVariable::InternalLinkage,
3157 llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: 0), "__tls_guard");
3158 Guard->setThreadLocal(true);
3159 Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
3160
3161 CharUnits GuardAlign = CharUnits::One();
3162 Guard->setAlignment(GuardAlign.getAsAlign());
3163
3164 CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
3165 Fn: InitFunc, CXXThreadLocals: OrderedInits, Guard: ConstantAddress(Guard, CGM.Int8Ty, GuardAlign));
3166 // On Darwin platforms, use CXX_FAST_TLS calling convention.
3167 if (CGM.getTarget().getTriple().isOSDarwin()) {
3168 InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3169 InitFunc->addFnAttr(Kind: llvm::Attribute::NoUnwind);
3170 }
3171 }
3172
3173 // Create declarations for thread wrappers for all thread-local variables
3174 // with non-discardable definitions in this translation unit.
3175 for (const VarDecl *VD : CXXThreadLocals) {
3176 if (VD->hasDefinition() &&
3177 !isDiscardableGVALinkage(L: getContext().GetGVALinkageForVariable(VD))) {
3178 llvm::GlobalValue *GV = CGM.GetGlobalValue(Ref: CGM.getMangledName(GD: VD));
3179 getOrCreateThreadLocalWrapper(VD, Val: GV);
3180 }
3181 }
3182
3183 // Emit all referenced thread wrappers.
3184 for (auto VDAndWrapper : ThreadWrappers) {
3185 const VarDecl *VD = VDAndWrapper.first;
3186 llvm::GlobalVariable *Var =
3187 cast<llvm::GlobalVariable>(Val: CGM.GetGlobalValue(Ref: CGM.getMangledName(GD: VD)));
3188 llvm::Function *Wrapper = VDAndWrapper.second;
3189
3190 // Some targets require that all access to thread local variables go through
3191 // the thread wrapper. This means that we cannot attempt to create a thread
3192 // wrapper or a thread helper.
3193 if (!VD->hasDefinition()) {
3194 if (isThreadWrapperReplaceable(VD, CGM)) {
3195 Wrapper->setLinkage(llvm::Function::ExternalLinkage);
3196 continue;
3197 }
3198
3199 // If this isn't a TU in which this variable is defined, the thread
3200 // wrapper is discardable.
3201 if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
3202 Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
3203 }
3204
3205 CGM.SetLLVMFunctionAttributesForDefinition(D: nullptr, F: Wrapper);
3206
3207 // Mangle the name for the thread_local initialization function.
3208 SmallString<256> InitFnName;
3209 {
3210 llvm::raw_svector_ostream Out(InitFnName);
3211 getMangleContext().mangleItaniumThreadLocalInit(D: VD, Out);
3212 }
3213
3214 llvm::FunctionType *InitFnTy = llvm::FunctionType::get(Result: CGM.VoidTy, isVarArg: false);
3215
3216 // If we have a definition for the variable, emit the initialization
3217 // function as an alias to the global Init function (if any). Otherwise,
3218 // produce a declaration of the initialization function.
3219 llvm::GlobalValue *Init = nullptr;
3220 bool InitIsInitFunc = false;
3221 bool HasConstantInitialization = false;
3222 if (!usesThreadWrapperFunction(VD)) {
3223 HasConstantInitialization = true;
3224 } else if (VD->hasDefinition()) {
3225 InitIsInitFunc = true;
3226 llvm::Function *InitFuncToUse = InitFunc;
3227 if (isTemplateInstantiation(Kind: VD->getTemplateSpecializationKind()))
3228 InitFuncToUse = UnorderedInits.lookup(Val: VD->getCanonicalDecl());
3229 if (InitFuncToUse)
3230 Init = llvm::GlobalAlias::create(Linkage: Var->getLinkage(), Name: InitFnName.str(),
3231 Aliasee: InitFuncToUse);
3232 } else {
3233 // Emit a weak global function referring to the initialization function.
3234 // This function will not exist if the TU defining the thread_local
3235 // variable in question does not need any dynamic initialization for
3236 // its thread_local variables.
3237 Init = llvm::Function::Create(Ty: InitFnTy,
3238 Linkage: llvm::GlobalVariable::ExternalWeakLinkage,
3239 N: InitFnName.str(), M: &CGM.getModule());
3240 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
3241 CGM.SetLLVMFunctionAttributes(
3242 GD: GlobalDecl(), Info: FI, F: cast<llvm::Function>(Val: Init), /*IsThunk=*/false);
3243 }
3244
3245 if (Init) {
3246 Init->setVisibility(Var->getVisibility());
3247 // Don't mark an extern_weak function DSO local on windows.
3248 if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage())
3249 Init->setDSOLocal(Var->isDSOLocal());
3250 }
3251
3252 llvm::LLVMContext &Context = CGM.getModule().getContext();
3253
3254 // The linker on AIX is not happy with missing weak symbols. However,
3255 // other TUs will not know whether the initialization routine exists
3256 // so create an empty, init function to satisfy the linker.
3257 // This is needed whenever a thread wrapper function is not used, and
3258 // also when the symbol is weak.
3259 if (CGM.getTriple().isOSAIX() && VD->hasDefinition() &&
3260 isEmittedWithConstantInitializer(VD, InspectInitForWeakDef: true) &&
3261 !mayNeedDestruction(VD)) {
3262 // Init should be null. If it were non-null, then the logic above would
3263 // either be defining the function to be an alias or declaring the
3264 // function with the expectation that the definition of the variable
3265 // is elsewhere.
3266 assert(Init == nullptr && "Expected Init to be null.");
3267
3268 llvm::Function *Func = llvm::Function::Create(
3269 Ty: InitFnTy, Linkage: Var->getLinkage(), N: InitFnName.str(), M: &CGM.getModule());
3270 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
3271 CGM.SetLLVMFunctionAttributes(GD: GlobalDecl(), Info: FI,
3272 F: cast<llvm::Function>(Val: Func),
3273 /*IsThunk=*/false);
3274 // Create a function body that just returns
3275 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, Name: "", Parent: Func);
3276 CGBuilderTy Builder(CGM, Entry);
3277 Builder.CreateRetVoid();
3278 }
3279
3280 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, Name: "", Parent: Wrapper);
3281 CGBuilderTy Builder(CGM, Entry);
3282 if (HasConstantInitialization) {
3283 // No dynamic initialization to invoke.
3284 } else if (InitIsInitFunc) {
3285 if (Init) {
3286 llvm::CallInst *CallVal = Builder.CreateCall(FTy: InitFnTy, Callee: Init);
3287 if (isThreadWrapperReplaceable(VD, CGM)) {
3288 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3289 llvm::Function *Fn =
3290 cast<llvm::Function>(Val: cast<llvm::GlobalAlias>(Val: Init)->getAliasee());
3291 Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3292 }
3293 }
3294 } else if (CGM.getTriple().isOSAIX()) {
3295 // On AIX, except if constinit and also neither of class type or of
3296 // (possibly multi-dimensional) array of class type, thread_local vars
3297 // will have init routines regardless of whether they are
3298 // const-initialized. Since the routine is guaranteed to exist, we can
3299 // unconditionally call it without testing for its existance. This
3300 // avoids potentially unresolved weak symbols which the AIX linker
3301 // isn't happy with.
3302 Builder.CreateCall(FTy: InitFnTy, Callee: Init);
3303 } else {
3304 // Don't know whether we have an init function. Call it if it exists.
3305 llvm::Value *Have = Builder.CreateIsNotNull(Arg: Init);
3306 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, Name: "", Parent: Wrapper);
3307 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, Name: "", Parent: Wrapper);
3308 Builder.CreateCondBr(Cond: Have, True: InitBB, False: ExitBB);
3309
3310 Builder.SetInsertPoint(InitBB);
3311 Builder.CreateCall(FTy: InitFnTy, Callee: Init);
3312 Builder.CreateBr(Dest: ExitBB);
3313
3314 Builder.SetInsertPoint(ExitBB);
3315 }
3316
3317 // For a reference, the result of the wrapper function is a pointer to
3318 // the referenced object.
3319 llvm::Value *Val = Builder.CreateThreadLocalAddress(Ptr: Var);
3320
3321 if (VD->getType()->isReferenceType()) {
3322 CharUnits Align = CGM.getContext().getDeclAlign(D: VD);
3323 Val = Builder.CreateAlignedLoad(Ty: Var->getValueType(), Addr: Val, Align);
3324 }
3325 Val = Builder.CreateAddrSpaceCast(V: Val, DestTy: Wrapper->getReturnType());
3326
3327 Builder.CreateRet(V: Val);
3328 }
3329}
3330
3331LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
3332 const VarDecl *VD,
3333 QualType LValType) {
3334 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(D: VD);
3335 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
3336
3337 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Callee: Wrapper);
3338 CallVal->setCallingConv(Wrapper->getCallingConv());
3339
3340 LValue LV;
3341 if (VD->getType()->isReferenceType())
3342 LV = CGF.MakeNaturalAlignRawAddrLValue(V: CallVal, T: LValType);
3343 else
3344 LV = CGF.MakeRawAddrLValue(V: CallVal, T: LValType,
3345 Alignment: CGF.getContext().getDeclAlign(D: VD));
3346 // FIXME: need setObjCGCLValueClass?
3347 return LV;
3348}
3349
3350/// Return whether the given global decl needs a VTT parameter, which it does
3351/// if it's a base constructor or destructor with virtual bases.
3352bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
3353 const CXXMethodDecl *MD = cast<CXXMethodDecl>(Val: GD.getDecl());
3354
3355 // We don't have any virtual bases, just return early.
3356 if (!MD->getParent()->getNumVBases())
3357 return false;
3358
3359 // Check if we have a base constructor.
3360 if (isa<CXXConstructorDecl>(Val: MD) && GD.getCtorType() == Ctor_Base)
3361 return true;
3362
3363 // Check if we have a base destructor.
3364 if (isa<CXXDestructorDecl>(Val: MD) && GD.getDtorType() == Dtor_Base)
3365 return true;
3366
3367 return false;
3368}
3369
3370llvm::Constant *
3371ItaniumCXXABI::getOrCreateVirtualFunctionPointerThunk(const CXXMethodDecl *MD) {
3372 SmallString<256> MethodName;
3373 llvm::raw_svector_ostream Out(MethodName);
3374 getMangleContext().mangleCXXName(GD: MD, Out);
3375 MethodName += "_vfpthunk_";
3376 StringRef ThunkName = MethodName.str();
3377 llvm::Function *ThunkFn;
3378 if ((ThunkFn = cast_or_null<llvm::Function>(
3379 Val: CGM.getModule().getNamedValue(Name: ThunkName))))
3380 return ThunkFn;
3381
3382 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeCXXMethodDeclaration(MD);
3383 llvm::FunctionType *ThunkTy = CGM.getTypes().GetFunctionType(Info: FnInfo);
3384 llvm::GlobalValue::LinkageTypes Linkage =
3385 MD->isExternallyVisible() ? llvm::GlobalValue::LinkOnceODRLinkage
3386 : llvm::GlobalValue::InternalLinkage;
3387 ThunkFn =
3388 llvm::Function::Create(Ty: ThunkTy, Linkage, N: ThunkName, M: &CGM.getModule());
3389 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
3390 ThunkFn->setVisibility(llvm::GlobalValue::HiddenVisibility);
3391 assert(ThunkFn->getName() == ThunkName && "name was uniqued!");
3392
3393 CGM.SetLLVMFunctionAttributes(GD: MD, Info: FnInfo, F: ThunkFn, /*IsThunk=*/true);
3394 CGM.SetLLVMFunctionAttributesForDefinition(D: MD, F: ThunkFn);
3395
3396 // Stack protection sometimes gets inserted after the musttail call.
3397 ThunkFn->removeFnAttr(Kind: llvm::Attribute::StackProtect);
3398 ThunkFn->removeFnAttr(Kind: llvm::Attribute::StackProtectStrong);
3399 ThunkFn->removeFnAttr(Kind: llvm::Attribute::StackProtectReq);
3400
3401 // Start codegen.
3402 CodeGenFunction CGF(CGM);
3403 CGF.CurGD = GlobalDecl(MD);
3404 CGF.CurFuncIsThunk = true;
3405
3406 // Build FunctionArgs.
3407 FunctionArgList FunctionArgs;
3408 CGF.BuildFunctionArgList(GD: CGF.CurGD, Args&: FunctionArgs);
3409
3410 CGF.StartFunction(GD: GlobalDecl(), RetTy: FnInfo.getReturnType(), Fn: ThunkFn, FnInfo,
3411 Args: FunctionArgs, Loc: MD->getLocation(), StartLoc: SourceLocation());
3412 llvm::Value *ThisVal = loadIncomingCXXThis(CGF);
3413 setCXXABIThisValue(CGF, ThisPtr: ThisVal);
3414
3415 CallArgList CallArgs;
3416 for (const VarDecl *VD : FunctionArgs)
3417 CGF.EmitDelegateCallArg(args&: CallArgs, param: VD, loc: SourceLocation());
3418
3419 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
3420 RequiredArgs Required = RequiredArgs::forPrototypePlus(prototype: FPT, /*this*/ additional: 1);
3421 const CGFunctionInfo &CallInfo =
3422 CGM.getTypes().arrangeCXXMethodCall(args: CallArgs, type: FPT, required: Required, numPrefixArgs: 0);
3423 CGCallee Callee = CGCallee::forVirtual(CE: nullptr, MD: GlobalDecl(MD),
3424 Addr: getThisAddress(CGF), FTy: ThunkTy);
3425 llvm::CallBase *CallOrInvoke;
3426 CGF.EmitCall(CallInfo, Callee, ReturnValue: ReturnValueSlot(), Args: CallArgs, CallOrInvoke: &CallOrInvoke,
3427 /*IsMustTail=*/true, Loc: SourceLocation(), IsVirtualFunctionPointerThunk: true);
3428 auto *Call = cast<llvm::CallInst>(Val: CallOrInvoke);
3429 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
3430 if (Call->getType()->isVoidTy())
3431 CGF.Builder.CreateRetVoid();
3432 else
3433 CGF.Builder.CreateRet(V: Call);
3434
3435 // Finish the function to maintain CodeGenFunction invariants.
3436 // FIXME: Don't emit unreachable code.
3437 CGF.EmitBlock(BB: CGF.createBasicBlock());
3438 CGF.FinishFunction();
3439 return ThunkFn;
3440}
3441
3442namespace {
3443class ItaniumRTTIBuilder {
3444 CodeGenModule &CGM; // Per-module state.
3445 llvm::LLVMContext &VMContext;
3446 const ItaniumCXXABI &CXXABI; // Per-module state.
3447
3448 /// Fields - The fields of the RTTI descriptor currently being built.
3449 SmallVector<llvm::Constant *, 16> Fields;
3450
3451 /// GetAddrOfTypeName - Returns the mangled type name of the given type.
3452 llvm::GlobalVariable *
3453 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
3454
3455 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
3456 /// descriptor of the given type.
3457 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
3458
3459 /// BuildVTablePointer - Build the vtable pointer for the given type.
3460 void BuildVTablePointer(const Type *Ty, llvm::Constant *StorageAddress);
3461
3462 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3463 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
3464 void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
3465
3466 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3467 /// classes with bases that do not satisfy the abi::__si_class_type_info
3468 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3469 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
3470
3471 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
3472 /// for pointer types.
3473 void BuildPointerTypeInfo(QualType PointeeTy);
3474
3475 /// BuildObjCObjectTypeInfo - Build the appropriate kind of
3476 /// type_info for an object type.
3477 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
3478
3479 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3480 /// struct, used for member pointer types.
3481 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
3482
3483public:
3484 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
3485 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
3486
3487 // Pointer type info flags.
3488 enum {
3489 /// PTI_Const - Type has const qualifier.
3490 PTI_Const = 0x1,
3491
3492 /// PTI_Volatile - Type has volatile qualifier.
3493 PTI_Volatile = 0x2,
3494
3495 /// PTI_Restrict - Type has restrict qualifier.
3496 PTI_Restrict = 0x4,
3497
3498 /// PTI_Incomplete - Type is incomplete.
3499 PTI_Incomplete = 0x8,
3500
3501 /// PTI_ContainingClassIncomplete - Containing class is incomplete.
3502 /// (in pointer to member).
3503 PTI_ContainingClassIncomplete = 0x10,
3504
3505 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
3506 //PTI_TransactionSafe = 0x20,
3507
3508 /// PTI_Noexcept - Pointee is noexcept function (C++1z).
3509 PTI_Noexcept = 0x40,
3510 };
3511
3512 // VMI type info flags.
3513 enum {
3514 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
3515 VMI_NonDiamondRepeat = 0x1,
3516
3517 /// VMI_DiamondShaped - Class is diamond shaped.
3518 VMI_DiamondShaped = 0x2
3519 };
3520
3521 // Base class type info flags.
3522 enum {
3523 /// BCTI_Virtual - Base class is virtual.
3524 BCTI_Virtual = 0x1,
3525
3526 /// BCTI_Public - Base class is public.
3527 BCTI_Public = 0x2
3528 };
3529
3530 /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
3531 /// link to an existing RTTI descriptor if one already exists.
3532 llvm::Constant *BuildTypeInfo(QualType Ty);
3533
3534 /// BuildTypeInfo - Build the RTTI type info struct for the given type.
3535 llvm::Constant *BuildTypeInfo(
3536 QualType Ty,
3537 llvm::GlobalVariable::LinkageTypes Linkage,
3538 llvm::GlobalValue::VisibilityTypes Visibility,
3539 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
3540};
3541}
3542
3543llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
3544 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
3545 SmallString<256> Name;
3546 llvm::raw_svector_ostream Out(Name);
3547 CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(T: Ty, Out);
3548
3549 // We know that the mangled name of the type starts at index 4 of the
3550 // mangled name of the typename, so we can just index into it in order to
3551 // get the mangled name of the type.
3552 llvm::Constant *Init = llvm::ConstantDataArray::getString(Context&: VMContext,
3553 Initializer: Name.substr(Start: 4));
3554 auto Align = CGM.getContext().getTypeAlignInChars(T: CGM.getContext().CharTy);
3555
3556 llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
3557 Name, Ty: Init->getType(), Linkage, Alignment: Align.getAsAlign());
3558
3559 GV->setInitializer(Init);
3560
3561 return GV;
3562}
3563
3564llvm::Constant *
3565ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
3566 // Mangle the RTTI name.
3567 SmallString<256> Name;
3568 llvm::raw_svector_ostream Out(Name);
3569 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(T: Ty, Out);
3570
3571 // Look for an existing global.
3572 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
3573
3574 if (!GV) {
3575 // Create a new global variable.
3576 // Note for the future: If we would ever like to do deferred emission of
3577 // RTTI, check if emitting vtables opportunistically need any adjustment.
3578
3579 GV = new llvm::GlobalVariable(
3580 CGM.getModule(), CGM.GlobalsInt8PtrTy,
3581 /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr, Name);
3582 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
3583 CGM.setGVProperties(GV, D: RD);
3584 // Import the typeinfo symbol when all non-inline virtual methods are
3585 // imported.
3586 if (CGM.getTarget().hasPS4DLLImportExport()) {
3587 if (RD && CXXRecordNonInlineHasAttr<DLLImportAttr>(RD)) {
3588 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
3589 CGM.setDSOLocal(GV);
3590 }
3591 }
3592 }
3593
3594 return GV;
3595}
3596
3597/// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
3598/// info for that type is defined in the standard library.
3599static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
3600 // Itanium C++ ABI 2.9.2:
3601 // Basic type information (e.g. for "int", "bool", etc.) will be kept in
3602 // the run-time support library. Specifically, the run-time support
3603 // library should contain type_info objects for the types X, X* and
3604 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
3605 // unsigned char, signed char, short, unsigned short, int, unsigned int,
3606 // long, unsigned long, long long, unsigned long long, float, double,
3607 // long double, char16_t, char32_t, and the IEEE 754r decimal and
3608 // half-precision floating point types.
3609 //
3610 // GCC also emits RTTI for __int128.
3611 // FIXME: We do not emit RTTI information for decimal types here.
3612
3613 // Types added here must also be added to EmitFundamentalRTTIDescriptors.
3614 switch (Ty->getKind()) {
3615 case BuiltinType::Void:
3616 case BuiltinType::NullPtr:
3617 case BuiltinType::Bool:
3618 case BuiltinType::WChar_S:
3619 case BuiltinType::WChar_U:
3620 case BuiltinType::Char_U:
3621 case BuiltinType::Char_S:
3622 case BuiltinType::UChar:
3623 case BuiltinType::SChar:
3624 case BuiltinType::Short:
3625 case BuiltinType::UShort:
3626 case BuiltinType::Int:
3627 case BuiltinType::UInt:
3628 case BuiltinType::Long:
3629 case BuiltinType::ULong:
3630 case BuiltinType::LongLong:
3631 case BuiltinType::ULongLong:
3632 case BuiltinType::Half:
3633 case BuiltinType::Float:
3634 case BuiltinType::Double:
3635 case BuiltinType::LongDouble:
3636 case BuiltinType::Float16:
3637 case BuiltinType::Float128:
3638 case BuiltinType::Ibm128:
3639 case BuiltinType::Char8:
3640 case BuiltinType::Char16:
3641 case BuiltinType::Char32:
3642 case BuiltinType::Int128:
3643 case BuiltinType::UInt128:
3644 return true;
3645
3646#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3647 case BuiltinType::Id:
3648#include "clang/Basic/OpenCLImageTypes.def"
3649#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3650 case BuiltinType::Id:
3651#include "clang/Basic/OpenCLExtensionTypes.def"
3652 case BuiltinType::OCLSampler:
3653 case BuiltinType::OCLEvent:
3654 case BuiltinType::OCLClkEvent:
3655 case BuiltinType::OCLQueue:
3656 case BuiltinType::OCLReserveID:
3657#define SVE_TYPE(Name, Id, SingletonId) \
3658 case BuiltinType::Id:
3659#include "clang/Basic/AArch64ACLETypes.def"
3660#define PPC_VECTOR_TYPE(Name, Id, Size) \
3661 case BuiltinType::Id:
3662#include "clang/Basic/PPCTypes.def"
3663#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3664#include "clang/Basic/RISCVVTypes.def"
3665#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3666#include "clang/Basic/WebAssemblyReferenceTypes.def"
3667#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id:
3668#include "clang/Basic/AMDGPUTypes.def"
3669#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3670#include "clang/Basic/HLSLIntangibleTypes.def"
3671 case BuiltinType::ShortAccum:
3672 case BuiltinType::Accum:
3673 case BuiltinType::LongAccum:
3674 case BuiltinType::UShortAccum:
3675 case BuiltinType::UAccum:
3676 case BuiltinType::ULongAccum:
3677 case BuiltinType::ShortFract:
3678 case BuiltinType::Fract:
3679 case BuiltinType::LongFract:
3680 case BuiltinType::UShortFract:
3681 case BuiltinType::UFract:
3682 case BuiltinType::ULongFract:
3683 case BuiltinType::SatShortAccum:
3684 case BuiltinType::SatAccum:
3685 case BuiltinType::SatLongAccum:
3686 case BuiltinType::SatUShortAccum:
3687 case BuiltinType::SatUAccum:
3688 case BuiltinType::SatULongAccum:
3689 case BuiltinType::SatShortFract:
3690 case BuiltinType::SatFract:
3691 case BuiltinType::SatLongFract:
3692 case BuiltinType::SatUShortFract:
3693 case BuiltinType::SatUFract:
3694 case BuiltinType::SatULongFract:
3695 case BuiltinType::BFloat16:
3696 return false;
3697
3698 case BuiltinType::Dependent:
3699#define BUILTIN_TYPE(Id, SingletonId)
3700#define PLACEHOLDER_TYPE(Id, SingletonId) \
3701 case BuiltinType::Id:
3702#include "clang/AST/BuiltinTypes.def"
3703 llvm_unreachable("asking for RRTI for a placeholder type!");
3704
3705 case BuiltinType::ObjCId:
3706 case BuiltinType::ObjCClass:
3707 case BuiltinType::ObjCSel:
3708 llvm_unreachable("FIXME: Objective-C types are unsupported!");
3709 }
3710
3711 llvm_unreachable("Invalid BuiltinType Kind!");
3712}
3713
3714static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3715 QualType PointeeTy = PointerTy->getPointeeType();
3716 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Val&: PointeeTy);
3717 if (!BuiltinTy)
3718 return false;
3719
3720 // Check the qualifiers.
3721 Qualifiers Quals = PointeeTy.getQualifiers();
3722 Quals.removeConst();
3723
3724 if (!Quals.empty())
3725 return false;
3726
3727 return TypeInfoIsInStandardLibrary(Ty: BuiltinTy);
3728}
3729
3730/// IsStandardLibraryRTTIDescriptor - Returns whether the type
3731/// information for the given type exists in the standard library.
3732static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
3733 // Type info for builtin types is defined in the standard library.
3734 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Val&: Ty))
3735 return TypeInfoIsInStandardLibrary(Ty: BuiltinTy);
3736
3737 // Type info for some pointer types to builtin types is defined in the
3738 // standard library.
3739 if (const PointerType *PointerTy = dyn_cast<PointerType>(Val&: Ty))
3740 return TypeInfoIsInStandardLibrary(PointerTy);
3741
3742 return false;
3743}
3744
3745/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3746/// the given type exists somewhere else, and that we should not emit the type
3747/// information in this translation unit. Assumes that it is not a
3748/// standard-library type.
3749static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
3750 QualType Ty) {
3751 ASTContext &Context = CGM.getContext();
3752
3753 // If RTTI is disabled, assume it might be disabled in the
3754 // translation unit that defines any potential key function, too.
3755 if (!Context.getLangOpts().RTTI) return false;
3756
3757 if (const RecordType *RecordTy = dyn_cast<RecordType>(Val&: Ty)) {
3758 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Val: RecordTy->getDecl());
3759 if (!RD->hasDefinition())
3760 return false;
3761
3762 if (!RD->isDynamicClass())
3763 return false;
3764
3765 // FIXME: this may need to be reconsidered if the key function
3766 // changes.
3767 // N.B. We must always emit the RTTI data ourselves if there exists a key
3768 // function.
3769 bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3770
3771 // Don't import the RTTI but emit it locally.
3772 if (CGM.getTriple().isOSCygMing())
3773 return false;
3774
3775 if (CGM.getVTables().isVTableExternal(RD)) {
3776 if (CGM.getTarget().hasPS4DLLImportExport())
3777 return true;
3778
3779 return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
3780 ? false
3781 : true;
3782 }
3783 if (IsDLLImport)
3784 return true;
3785 }
3786
3787 return false;
3788}
3789
3790/// IsIncompleteClassType - Returns whether the given record type is incomplete.
3791static bool IsIncompleteClassType(const RecordType *RecordTy) {
3792 return !RecordTy->getDecl()->isCompleteDefinition();
3793}
3794
3795/// ContainsIncompleteClassType - Returns whether the given type contains an
3796/// incomplete class type. This is true if
3797///
3798/// * The given type is an incomplete class type.
3799/// * The given type is a pointer type whose pointee type contains an
3800/// incomplete class type.
3801/// * The given type is a member pointer type whose class is an incomplete
3802/// class type.
3803/// * The given type is a member pointer type whoise pointee type contains an
3804/// incomplete class type.
3805/// is an indirect or direct pointer to an incomplete class type.
3806static bool ContainsIncompleteClassType(QualType Ty) {
3807 if (const RecordType *RecordTy = dyn_cast<RecordType>(Val&: Ty)) {
3808 if (IsIncompleteClassType(RecordTy))
3809 return true;
3810 }
3811
3812 if (const PointerType *PointerTy = dyn_cast<PointerType>(Val&: Ty))
3813 return ContainsIncompleteClassType(Ty: PointerTy->getPointeeType());
3814
3815 if (const MemberPointerType *MemberPointerTy =
3816 dyn_cast<MemberPointerType>(Val&: Ty)) {
3817 // Check if the class type is incomplete.
3818 const auto *ClassType = cast<RecordType>(
3819 Val: MemberPointerTy->getMostRecentCXXRecordDecl()->getTypeForDecl());
3820 if (IsIncompleteClassType(RecordTy: ClassType))
3821 return true;
3822
3823 return ContainsIncompleteClassType(Ty: MemberPointerTy->getPointeeType());
3824 }
3825
3826 return false;
3827}
3828
3829// CanUseSingleInheritance - Return whether the given record decl has a "single,
3830// public, non-virtual base at offset zero (i.e. the derived class is dynamic
3831// iff the base is)", according to Itanium C++ ABI, 2.95p6b.
3832static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
3833 // Check the number of bases.
3834 if (RD->getNumBases() != 1)
3835 return false;
3836
3837 // Get the base.
3838 CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
3839
3840 // Check that the base is not virtual.
3841 if (Base->isVirtual())
3842 return false;
3843
3844 // Check that the base is public.
3845 if (Base->getAccessSpecifier() != AS_public)
3846 return false;
3847
3848 // Check that the class is dynamic iff the base is.
3849 auto *BaseDecl =
3850 cast<CXXRecordDecl>(Val: Base->getType()->castAs<RecordType>()->getDecl());
3851 if (!BaseDecl->isEmpty() &&
3852 BaseDecl->isDynamicClass() != RD->isDynamicClass())
3853 return false;
3854
3855 return true;
3856}
3857
3858void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty,
3859 llvm::Constant *StorageAddress) {
3860 // abi::__class_type_info.
3861 static const char * const ClassTypeInfo =
3862 "_ZTVN10__cxxabiv117__class_type_infoE";
3863 // abi::__si_class_type_info.
3864 static const char * const SIClassTypeInfo =
3865 "_ZTVN10__cxxabiv120__si_class_type_infoE";
3866 // abi::__vmi_class_type_info.
3867 static const char * const VMIClassTypeInfo =
3868 "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3869
3870 const char *VTableName = nullptr;
3871
3872 switch (Ty->getTypeClass()) {
3873#define TYPE(Class, Base)
3874#define ABSTRACT_TYPE(Class, Base)
3875#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3876#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3877#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3878#include "clang/AST/TypeNodes.inc"
3879 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3880
3881 case Type::LValueReference:
3882 case Type::RValueReference:
3883 llvm_unreachable("References shouldn't get here");
3884
3885 case Type::Auto:
3886 case Type::DeducedTemplateSpecialization:
3887 llvm_unreachable("Undeduced type shouldn't get here");
3888
3889 case Type::Pipe:
3890 llvm_unreachable("Pipe types shouldn't get here");
3891
3892 case Type::ArrayParameter:
3893 llvm_unreachable("Array Parameter types should not get here.");
3894
3895 case Type::Builtin:
3896 case Type::BitInt:
3897 // GCC treats vector and complex types as fundamental types.
3898 case Type::Vector:
3899 case Type::ExtVector:
3900 case Type::ConstantMatrix:
3901 case Type::Complex:
3902 case Type::Atomic:
3903 // FIXME: GCC treats block pointers as fundamental types?!
3904 case Type::BlockPointer:
3905 // abi::__fundamental_type_info.
3906 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3907 break;
3908
3909 case Type::ConstantArray:
3910 case Type::IncompleteArray:
3911 case Type::VariableArray:
3912 // abi::__array_type_info.
3913 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3914 break;
3915
3916 case Type::FunctionNoProto:
3917 case Type::FunctionProto:
3918 // abi::__function_type_info.
3919 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3920 break;
3921
3922 case Type::Enum:
3923 // abi::__enum_type_info.
3924 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3925 break;
3926
3927 case Type::Record: {
3928 const CXXRecordDecl *RD =
3929 cast<CXXRecordDecl>(Val: cast<RecordType>(Val: Ty)->getDecl());
3930
3931 if (!RD->hasDefinition() || !RD->getNumBases()) {
3932 VTableName = ClassTypeInfo;
3933 } else if (CanUseSingleInheritance(RD)) {
3934 VTableName = SIClassTypeInfo;
3935 } else {
3936 VTableName = VMIClassTypeInfo;
3937 }
3938
3939 break;
3940 }
3941
3942 case Type::ObjCObject:
3943 // Ignore protocol qualifiers.
3944 Ty = cast<ObjCObjectType>(Val: Ty)->getBaseType().getTypePtr();
3945
3946 // Handle id and Class.
3947 if (isa<BuiltinType>(Val: Ty)) {
3948 VTableName = ClassTypeInfo;
3949 break;
3950 }
3951
3952 assert(isa<ObjCInterfaceType>(Ty));
3953 [[fallthrough]];
3954
3955 case Type::ObjCInterface:
3956 if (cast<ObjCInterfaceType>(Val: Ty)->getDecl()->getSuperClass()) {
3957 VTableName = SIClassTypeInfo;
3958 } else {
3959 VTableName = ClassTypeInfo;
3960 }
3961 break;
3962
3963 case Type::ObjCObjectPointer:
3964 case Type::Pointer:
3965 // abi::__pointer_type_info.
3966 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3967 break;
3968
3969 case Type::MemberPointer:
3970 // abi::__pointer_to_member_type_info.
3971 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3972 break;
3973
3974 case Type::HLSLAttributedResource:
3975 case Type::HLSLInlineSpirv:
3976 llvm_unreachable("HLSL doesn't support virtual functions");
3977 }
3978
3979 llvm::Constant *VTable = nullptr;
3980
3981 // Check if the alias exists. If it doesn't, then get or create the global.
3982 if (CGM.getItaniumVTableContext().isRelativeLayout())
3983 VTable = CGM.getModule().getNamedAlias(Name: VTableName);
3984 if (!VTable) {
3985 llvm::Type *Ty = llvm::ArrayType::get(ElementType: CGM.GlobalsInt8PtrTy, NumElements: 0);
3986 VTable = CGM.getModule().getOrInsertGlobal(Name: VTableName, Ty);
3987 }
3988
3989 CGM.setDSOLocal(cast<llvm::GlobalValue>(Val: VTable->stripPointerCasts()));
3990
3991 llvm::Type *PtrDiffTy =
3992 CGM.getTypes().ConvertType(T: CGM.getContext().getPointerDiffType());
3993
3994 // The vtable address point is 2.
3995 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
3996 // The vtable address point is 8 bytes after its start:
3997 // 4 for the offset to top + 4 for the relative offset to rtti.
3998 llvm::Constant *Eight = llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: 8);
3999 VTable =
4000 llvm::ConstantExpr::getInBoundsGetElementPtr(Ty: CGM.Int8Ty, C: VTable, Idx: Eight);
4001 } else {
4002 llvm::Constant *Two = llvm::ConstantInt::get(Ty: PtrDiffTy, V: 2);
4003 VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(Ty: CGM.GlobalsInt8PtrTy,
4004 C: VTable, Idx: Two);
4005 }
4006
4007 if (const auto &Schema =
4008 CGM.getCodeGenOpts().PointerAuth.CXXTypeInfoVTablePointer)
4009 VTable = CGM.getConstantSignedPointer(
4010 Pointer: VTable, Schema,
4011 StorageAddress: Schema.isAddressDiscriminated() ? StorageAddress : nullptr,
4012 SchemaDecl: GlobalDecl(), SchemaType: QualType(Ty, 0));
4013
4014 Fields.push_back(Elt: VTable);
4015}
4016
4017/// Return the linkage that the type info and type info name constants
4018/// should have for the given type.
4019static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
4020 QualType Ty) {
4021 // Itanium C++ ABI 2.9.5p7:
4022 // In addition, it and all of the intermediate abi::__pointer_type_info
4023 // structs in the chain down to the abi::__class_type_info for the
4024 // incomplete class type must be prevented from resolving to the
4025 // corresponding type_info structs for the complete class type, possibly
4026 // by making them local static objects. Finally, a dummy class RTTI is
4027 // generated for the incomplete type that will not resolve to the final
4028 // complete class RTTI (because the latter need not exist), possibly by
4029 // making it a local static object.
4030 if (ContainsIncompleteClassType(Ty))
4031 return llvm::GlobalValue::InternalLinkage;
4032
4033 switch (Ty->getLinkage()) {
4034 case Linkage::Invalid:
4035 llvm_unreachable("Linkage hasn't been computed!");
4036
4037 case Linkage::None:
4038 case Linkage::Internal:
4039 case Linkage::UniqueExternal:
4040 return llvm::GlobalValue::InternalLinkage;
4041
4042 case Linkage::VisibleNone:
4043 case Linkage::Module:
4044 case Linkage::External:
4045 // RTTI is not enabled, which means that this type info struct is going
4046 // to be used for exception handling. Give it linkonce_odr linkage.
4047 if (!CGM.getLangOpts().RTTI)
4048 return llvm::GlobalValue::LinkOnceODRLinkage;
4049
4050 if (const RecordType *Record = dyn_cast<RecordType>(Val&: Ty)) {
4051 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Val: Record->getDecl());
4052 if (RD->hasAttr<WeakAttr>())
4053 return llvm::GlobalValue::WeakODRLinkage;
4054 if (CGM.getTriple().isWindowsItaniumEnvironment())
4055 if (RD->hasAttr<DLLImportAttr>() &&
4056 ShouldUseExternalRTTIDescriptor(CGM, Ty))
4057 return llvm::GlobalValue::ExternalLinkage;
4058 // MinGW always uses LinkOnceODRLinkage for type info.
4059 if (RD->isDynamicClass() &&
4060 !CGM.getContext().getTargetInfo().getTriple().isOSCygMing())
4061 return CGM.getVTableLinkage(RD);
4062 }
4063
4064 return llvm::GlobalValue::LinkOnceODRLinkage;
4065 }
4066
4067 llvm_unreachable("Invalid linkage!");
4068}
4069
4070llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
4071 // We want to operate on the canonical type.
4072 Ty = Ty.getCanonicalType();
4073
4074 // Check if we've already emitted an RTTI descriptor for this type.
4075 SmallString<256> Name;
4076 llvm::raw_svector_ostream Out(Name);
4077 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(T: Ty, Out);
4078
4079 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
4080 if (OldGV && !OldGV->isDeclaration()) {
4081 assert(!OldGV->hasAvailableExternallyLinkage() &&
4082 "available_externally typeinfos not yet implemented");
4083
4084 return OldGV;
4085 }
4086
4087 // Check if there is already an external RTTI descriptor for this type.
4088 if (IsStandardLibraryRTTIDescriptor(Ty) ||
4089 ShouldUseExternalRTTIDescriptor(CGM, Ty))
4090 return GetAddrOfExternalRTTIDescriptor(Ty);
4091
4092 // Emit the standard library with external linkage.
4093 llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
4094
4095 // Give the type_info object and name the formal visibility of the
4096 // type itself.
4097 llvm::GlobalValue::VisibilityTypes llvmVisibility;
4098 if (llvm::GlobalValue::isLocalLinkage(Linkage))
4099 // If the linkage is local, only default visibility makes sense.
4100 llvmVisibility = llvm::GlobalValue::DefaultVisibility;
4101 else if (CXXABI.classifyRTTIUniqueness(CanTy: Ty, Linkage) ==
4102 ItaniumCXXABI::RUK_NonUniqueHidden)
4103 llvmVisibility = llvm::GlobalValue::HiddenVisibility;
4104 else
4105 llvmVisibility = CodeGenModule::GetLLVMVisibility(V: Ty->getVisibility());
4106
4107 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4108 llvm::GlobalValue::DefaultStorageClass;
4109 if (auto RD = Ty->getAsCXXRecordDecl()) {
4110 if ((CGM.getTriple().isWindowsItaniumEnvironment() &&
4111 RD->hasAttr<DLLExportAttr>()) ||
4112 (CGM.shouldMapVisibilityToDLLExport(D: RD) &&
4113 !llvm::GlobalValue::isLocalLinkage(Linkage) &&
4114 llvmVisibility == llvm::GlobalValue::DefaultVisibility))
4115 DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
4116 }
4117 return BuildTypeInfo(Ty, Linkage, Visibility: llvmVisibility, DLLStorageClass);
4118}
4119
4120llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
4121 QualType Ty,
4122 llvm::GlobalVariable::LinkageTypes Linkage,
4123 llvm::GlobalValue::VisibilityTypes Visibility,
4124 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
4125 SmallString<256> Name;
4126 llvm::raw_svector_ostream Out(Name);
4127 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(T: Ty, Out);
4128 llvm::Module &M = CGM.getModule();
4129 llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
4130 // int8 is an arbitrary type to be replaced later with replaceInitializer.
4131 llvm::GlobalVariable *GV =
4132 new llvm::GlobalVariable(M, CGM.Int8Ty, /*isConstant=*/true, Linkage,
4133 /*Initializer=*/nullptr, Name);
4134
4135 // Add the vtable pointer.
4136 BuildVTablePointer(Ty: cast<Type>(Val&: Ty), StorageAddress: GV);
4137
4138 // And the name.
4139 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
4140 llvm::Constant *TypeNameField;
4141
4142 // If we're supposed to demote the visibility, be sure to set a flag
4143 // to use a string comparison for type_info comparisons.
4144 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
4145 CXXABI.classifyRTTIUniqueness(CanTy: Ty, Linkage);
4146 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
4147 // The flag is the sign bit, which on ARM64 is defined to be clear
4148 // for global pointers. This is very ARM64-specific.
4149 TypeNameField = llvm::ConstantExpr::getPtrToInt(C: TypeName, Ty: CGM.Int64Ty);
4150 llvm::Constant *flag =
4151 llvm::ConstantInt::get(Ty: CGM.Int64Ty, V: ((uint64_t)1) << 63);
4152 TypeNameField = llvm::ConstantExpr::getAdd(C1: TypeNameField, C2: flag);
4153 TypeNameField =
4154 llvm::ConstantExpr::getIntToPtr(C: TypeNameField, Ty: CGM.GlobalsInt8PtrTy);
4155 } else {
4156 TypeNameField = TypeName;
4157 }
4158 Fields.push_back(Elt: TypeNameField);
4159
4160 switch (Ty->getTypeClass()) {
4161#define TYPE(Class, Base)
4162#define ABSTRACT_TYPE(Class, Base)
4163#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
4164#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
4165#define DEPENDENT_TYPE(Class, Base) case Type::Class:
4166#include "clang/AST/TypeNodes.inc"
4167 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
4168
4169 // GCC treats vector types as fundamental types.
4170 case Type::Builtin:
4171 case Type::Vector:
4172 case Type::ExtVector:
4173 case Type::ConstantMatrix:
4174 case Type::Complex:
4175 case Type::BlockPointer:
4176 // Itanium C++ ABI 2.9.5p4:
4177 // abi::__fundamental_type_info adds no data members to std::type_info.
4178 break;
4179
4180 case Type::LValueReference:
4181 case Type::RValueReference:
4182 llvm_unreachable("References shouldn't get here");
4183
4184 case Type::Auto:
4185 case Type::DeducedTemplateSpecialization:
4186 llvm_unreachable("Undeduced type shouldn't get here");
4187
4188 case Type::Pipe:
4189 break;
4190
4191 case Type::BitInt:
4192 break;
4193
4194 case Type::ConstantArray:
4195 case Type::IncompleteArray:
4196 case Type::VariableArray:
4197 case Type::ArrayParameter:
4198 // Itanium C++ ABI 2.9.5p5:
4199 // abi::__array_type_info adds no data members to std::type_info.
4200 break;
4201
4202 case Type::FunctionNoProto:
4203 case Type::FunctionProto:
4204 // Itanium C++ ABI 2.9.5p5:
4205 // abi::__function_type_info adds no data members to std::type_info.
4206 break;
4207
4208 case Type::Enum:
4209 // Itanium C++ ABI 2.9.5p5:
4210 // abi::__enum_type_info adds no data members to std::type_info.
4211 break;
4212
4213 case Type::Record: {
4214 const CXXRecordDecl *RD =
4215 cast<CXXRecordDecl>(Val: cast<RecordType>(Val&: Ty)->getDecl());
4216 if (!RD->hasDefinition() || !RD->getNumBases()) {
4217 // We don't need to emit any fields.
4218 break;
4219 }
4220
4221 if (CanUseSingleInheritance(RD))
4222 BuildSIClassTypeInfo(RD);
4223 else
4224 BuildVMIClassTypeInfo(RD);
4225
4226 break;
4227 }
4228
4229 case Type::ObjCObject:
4230 case Type::ObjCInterface:
4231 BuildObjCObjectTypeInfo(Ty: cast<ObjCObjectType>(Val&: Ty));
4232 break;
4233
4234 case Type::ObjCObjectPointer:
4235 BuildPointerTypeInfo(PointeeTy: cast<ObjCObjectPointerType>(Val&: Ty)->getPointeeType());
4236 break;
4237
4238 case Type::Pointer:
4239 BuildPointerTypeInfo(PointeeTy: cast<PointerType>(Val&: Ty)->getPointeeType());
4240 break;
4241
4242 case Type::MemberPointer:
4243 BuildPointerToMemberTypeInfo(Ty: cast<MemberPointerType>(Val&: Ty));
4244 break;
4245
4246 case Type::Atomic:
4247 // No fields, at least for the moment.
4248 break;
4249
4250 case Type::HLSLAttributedResource:
4251 case Type::HLSLInlineSpirv:
4252 llvm_unreachable("HLSL doesn't support RTTI");
4253 }
4254
4255 GV->replaceInitializer(InitVal: llvm::ConstantStruct::getAnon(V: Fields));
4256
4257 // Export the typeinfo in the same circumstances as the vtable is exported.
4258 auto GVDLLStorageClass = DLLStorageClass;
4259 if (CGM.getTarget().hasPS4DLLImportExport() &&
4260 GVDLLStorageClass != llvm::GlobalVariable::DLLExportStorageClass) {
4261 if (const RecordType *RecordTy = dyn_cast<RecordType>(Val&: Ty)) {
4262 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Val: RecordTy->getDecl());
4263 if (RD->hasAttr<DLLExportAttr>() ||
4264 CXXRecordNonInlineHasAttr<DLLExportAttr>(RD))
4265 GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
4266 }
4267 }
4268
4269 // If there's already an old global variable, replace it with the new one.
4270 if (OldGV) {
4271 GV->takeName(V: OldGV);
4272 OldGV->replaceAllUsesWith(V: GV);
4273 OldGV->eraseFromParent();
4274 }
4275
4276 if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
4277 GV->setComdat(M.getOrInsertComdat(Name: GV->getName()));
4278
4279 CharUnits Align = CGM.getContext().toCharUnitsFromBits(
4280 BitSize: CGM.getTarget().getPointerAlign(AddrSpace: CGM.GetGlobalVarAddressSpace(D: nullptr)));
4281 GV->setAlignment(Align.getAsAlign());
4282
4283 // The Itanium ABI specifies that type_info objects must be globally
4284 // unique, with one exception: if the type is an incomplete class
4285 // type or a (possibly indirect) pointer to one. That exception
4286 // affects the general case of comparing type_info objects produced
4287 // by the typeid operator, which is why the comparison operators on
4288 // std::type_info generally use the type_info name pointers instead
4289 // of the object addresses. However, the language's built-in uses
4290 // of RTTI generally require class types to be complete, even when
4291 // manipulating pointers to those class types. This allows the
4292 // implementation of dynamic_cast to rely on address equality tests,
4293 // which is much faster.
4294
4295 // All of this is to say that it's important that both the type_info
4296 // object and the type_info name be uniqued when weakly emitted.
4297
4298 TypeName->setVisibility(Visibility);
4299 CGM.setDSOLocal(TypeName);
4300
4301 GV->setVisibility(Visibility);
4302 CGM.setDSOLocal(GV);
4303
4304 TypeName->setDLLStorageClass(DLLStorageClass);
4305 GV->setDLLStorageClass(GVDLLStorageClass);
4306
4307 TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
4308 GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
4309
4310 return GV;
4311}
4312
4313/// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
4314/// for the given Objective-C object type.
4315void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
4316 // Drop qualifiers.
4317 const Type *T = OT->getBaseType().getTypePtr();
4318 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
4319
4320 // The builtin types are abi::__class_type_infos and don't require
4321 // extra fields.
4322 if (isa<BuiltinType>(Val: T)) return;
4323
4324 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(Val: T)->getDecl();
4325 ObjCInterfaceDecl *Super = Class->getSuperClass();
4326
4327 // Root classes are also __class_type_info.
4328 if (!Super) return;
4329
4330 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Decl: Super);
4331
4332 // Everything else is single inheritance.
4333 llvm::Constant *BaseTypeInfo =
4334 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Ty: SuperTy);
4335 Fields.push_back(Elt: BaseTypeInfo);
4336}
4337
4338/// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
4339/// inheritance, according to the Itanium C++ ABI, 2.95p6b.
4340void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
4341 // Itanium C++ ABI 2.9.5p6b:
4342 // It adds to abi::__class_type_info a single member pointing to the
4343 // type_info structure for the base type,
4344 llvm::Constant *BaseTypeInfo =
4345 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Ty: RD->bases_begin()->getType());
4346 Fields.push_back(Elt: BaseTypeInfo);
4347}
4348
4349namespace {
4350 /// SeenBases - Contains virtual and non-virtual bases seen when traversing
4351 /// a class hierarchy.
4352 struct SeenBases {
4353 llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
4354 llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
4355 };
4356}
4357
4358/// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
4359/// abi::__vmi_class_type_info.
4360///
4361static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
4362 SeenBases &Bases) {
4363
4364 unsigned Flags = 0;
4365
4366 auto *BaseDecl =
4367 cast<CXXRecordDecl>(Val: Base->getType()->castAs<RecordType>()->getDecl());
4368
4369 if (Base->isVirtual()) {
4370 // Mark the virtual base as seen.
4371 if (!Bases.VirtualBases.insert(Ptr: BaseDecl).second) {
4372 // If this virtual base has been seen before, then the class is diamond
4373 // shaped.
4374 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
4375 } else {
4376 if (Bases.NonVirtualBases.count(Ptr: BaseDecl))
4377 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4378 }
4379 } else {
4380 // Mark the non-virtual base as seen.
4381 if (!Bases.NonVirtualBases.insert(Ptr: BaseDecl).second) {
4382 // If this non-virtual base has been seen before, then the class has non-
4383 // diamond shaped repeated inheritance.
4384 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4385 } else {
4386 if (Bases.VirtualBases.count(Ptr: BaseDecl))
4387 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4388 }
4389 }
4390
4391 // Walk all bases.
4392 for (const auto &I : BaseDecl->bases())
4393 Flags |= ComputeVMIClassTypeInfoFlags(Base: &I, Bases);
4394
4395 return Flags;
4396}
4397
4398static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
4399 unsigned Flags = 0;
4400 SeenBases Bases;
4401
4402 // Walk all bases.
4403 for (const auto &I : RD->bases())
4404 Flags |= ComputeVMIClassTypeInfoFlags(Base: &I, Bases);
4405
4406 return Flags;
4407}
4408
4409/// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
4410/// classes with bases that do not satisfy the abi::__si_class_type_info
4411/// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
4412void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
4413 llvm::Type *UnsignedIntLTy =
4414 CGM.getTypes().ConvertType(T: CGM.getContext().UnsignedIntTy);
4415
4416 // Itanium C++ ABI 2.9.5p6c:
4417 // __flags is a word with flags describing details about the class
4418 // structure, which may be referenced by using the __flags_masks
4419 // enumeration. These flags refer to both direct and indirect bases.
4420 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
4421 Fields.push_back(Elt: llvm::ConstantInt::get(Ty: UnsignedIntLTy, V: Flags));
4422
4423 // Itanium C++ ABI 2.9.5p6c:
4424 // __base_count is a word with the number of direct proper base class
4425 // descriptions that follow.
4426 Fields.push_back(Elt: llvm::ConstantInt::get(Ty: UnsignedIntLTy, V: RD->getNumBases()));
4427
4428 if (!RD->getNumBases())
4429 return;
4430
4431 // Now add the base class descriptions.
4432
4433 // Itanium C++ ABI 2.9.5p6c:
4434 // __base_info[] is an array of base class descriptions -- one for every
4435 // direct proper base. Each description is of the type:
4436 //
4437 // struct abi::__base_class_type_info {
4438 // public:
4439 // const __class_type_info *__base_type;
4440 // long __offset_flags;
4441 //
4442 // enum __offset_flags_masks {
4443 // __virtual_mask = 0x1,
4444 // __public_mask = 0x2,
4445 // __offset_shift = 8
4446 // };
4447 // };
4448
4449 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
4450 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
4451 // LLP64 platforms.
4452 // FIXME: Consider updating libc++abi to match, and extend this logic to all
4453 // LLP64 platforms.
4454 QualType OffsetFlagsTy = CGM.getContext().LongTy;
4455 const TargetInfo &TI = CGM.getContext().getTargetInfo();
4456 if (TI.getTriple().isOSCygMing() &&
4457 TI.getPointerWidth(AddrSpace: LangAS::Default) > TI.getLongWidth())
4458 OffsetFlagsTy = CGM.getContext().LongLongTy;
4459 llvm::Type *OffsetFlagsLTy =
4460 CGM.getTypes().ConvertType(T: OffsetFlagsTy);
4461
4462 for (const auto &Base : RD->bases()) {
4463 // The __base_type member points to the RTTI for the base type.
4464 Fields.push_back(Elt: ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Ty: Base.getType()));
4465
4466 auto *BaseDecl =
4467 cast<CXXRecordDecl>(Val: Base.getType()->castAs<RecordType>()->getDecl());
4468
4469 int64_t OffsetFlags = 0;
4470
4471 // All but the lower 8 bits of __offset_flags are a signed offset.
4472 // For a non-virtual base, this is the offset in the object of the base
4473 // subobject. For a virtual base, this is the offset in the virtual table of
4474 // the virtual base offset for the virtual base referenced (negative).
4475 CharUnits Offset;
4476 if (Base.isVirtual())
4477 Offset =
4478 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, VBase: BaseDecl);
4479 else {
4480 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(D: RD);
4481 Offset = Layout.getBaseClassOffset(Base: BaseDecl);
4482 };
4483
4484 OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
4485
4486 // The low-order byte of __offset_flags contains flags, as given by the
4487 // masks from the enumeration __offset_flags_masks.
4488 if (Base.isVirtual())
4489 OffsetFlags |= BCTI_Virtual;
4490 if (Base.getAccessSpecifier() == AS_public)
4491 OffsetFlags |= BCTI_Public;
4492
4493 Fields.push_back(Elt: llvm::ConstantInt::get(Ty: OffsetFlagsLTy, V: OffsetFlags));
4494 }
4495}
4496
4497/// Compute the flags for a __pbase_type_info, and remove the corresponding
4498/// pieces from \p Type.
4499static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
4500 unsigned Flags = 0;
4501
4502 if (Type.isConstQualified())
4503 Flags |= ItaniumRTTIBuilder::PTI_Const;
4504 if (Type.isVolatileQualified())
4505 Flags |= ItaniumRTTIBuilder::PTI_Volatile;
4506 if (Type.isRestrictQualified())
4507 Flags |= ItaniumRTTIBuilder::PTI_Restrict;
4508 Type = Type.getUnqualifiedType();
4509
4510 // Itanium C++ ABI 2.9.5p7:
4511 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
4512 // incomplete class type, the incomplete target type flag is set.
4513 if (ContainsIncompleteClassType(Ty: Type))
4514 Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
4515
4516 if (auto *Proto = Type->getAs<FunctionProtoType>()) {
4517 if (Proto->isNothrow()) {
4518 Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
4519 Type = Ctx.getFunctionTypeWithExceptionSpec(Orig: Type, ESI: EST_None);
4520 }
4521 }
4522
4523 return Flags;
4524}
4525
4526/// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
4527/// used for pointer types.
4528void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
4529 // Itanium C++ ABI 2.9.5p7:
4530 // __flags is a flag word describing the cv-qualification and other
4531 // attributes of the type pointed to
4532 unsigned Flags = extractPBaseFlags(Ctx&: CGM.getContext(), Type&: PointeeTy);
4533
4534 llvm::Type *UnsignedIntLTy =
4535 CGM.getTypes().ConvertType(T: CGM.getContext().UnsignedIntTy);
4536 Fields.push_back(Elt: llvm::ConstantInt::get(Ty: UnsignedIntLTy, V: Flags));
4537
4538 // Itanium C++ ABI 2.9.5p7:
4539 // __pointee is a pointer to the std::type_info derivation for the
4540 // unqualified type being pointed to.
4541 llvm::Constant *PointeeTypeInfo =
4542 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Ty: PointeeTy);
4543 Fields.push_back(Elt: PointeeTypeInfo);
4544}
4545
4546/// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
4547/// struct, used for member pointer types.
4548void
4549ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
4550 QualType PointeeTy = Ty->getPointeeType();
4551
4552 // Itanium C++ ABI 2.9.5p7:
4553 // __flags is a flag word describing the cv-qualification and other
4554 // attributes of the type pointed to.
4555 unsigned Flags = extractPBaseFlags(Ctx&: CGM.getContext(), Type&: PointeeTy);
4556
4557 const auto *ClassType =
4558 cast<RecordType>(Val: Ty->getMostRecentCXXRecordDecl()->getTypeForDecl());
4559 if (IsIncompleteClassType(RecordTy: ClassType))
4560 Flags |= PTI_ContainingClassIncomplete;
4561
4562 llvm::Type *UnsignedIntLTy =
4563 CGM.getTypes().ConvertType(T: CGM.getContext().UnsignedIntTy);
4564 Fields.push_back(Elt: llvm::ConstantInt::get(Ty: UnsignedIntLTy, V: Flags));
4565
4566 // Itanium C++ ABI 2.9.5p7:
4567 // __pointee is a pointer to the std::type_info derivation for the
4568 // unqualified type being pointed to.
4569 llvm::Constant *PointeeTypeInfo =
4570 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Ty: PointeeTy);
4571 Fields.push_back(Elt: PointeeTypeInfo);
4572
4573 // Itanium C++ ABI 2.9.5p9:
4574 // __context is a pointer to an abi::__class_type_info corresponding to the
4575 // class type containing the member pointed to
4576 // (e.g., the "A" in "int A::*").
4577 Fields.push_back(
4578 Elt: ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Ty: QualType(ClassType, 0)));
4579}
4580
4581llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
4582 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
4583}
4584
4585void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
4586 // Types added here must also be added to TypeInfoIsInStandardLibrary.
4587 QualType FundamentalTypes[] = {
4588 getContext().VoidTy, getContext().NullPtrTy,
4589 getContext().BoolTy, getContext().WCharTy,
4590 getContext().CharTy, getContext().UnsignedCharTy,
4591 getContext().SignedCharTy, getContext().ShortTy,
4592 getContext().UnsignedShortTy, getContext().IntTy,
4593 getContext().UnsignedIntTy, getContext().LongTy,
4594 getContext().UnsignedLongTy, getContext().LongLongTy,
4595 getContext().UnsignedLongLongTy, getContext().Int128Ty,
4596 getContext().UnsignedInt128Ty, getContext().HalfTy,
4597 getContext().FloatTy, getContext().DoubleTy,
4598 getContext().LongDoubleTy, getContext().Float128Ty,
4599 getContext().Char8Ty, getContext().Char16Ty,
4600 getContext().Char32Ty
4601 };
4602 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4603 RD->hasAttr<DLLExportAttr>() || CGM.shouldMapVisibilityToDLLExport(D: RD)
4604 ? llvm::GlobalValue::DLLExportStorageClass
4605 : llvm::GlobalValue::DefaultStorageClass;
4606 llvm::GlobalValue::VisibilityTypes Visibility =
4607 CodeGenModule::GetLLVMVisibility(V: RD->getVisibility());
4608 for (const QualType &FundamentalType : FundamentalTypes) {
4609 QualType PointerType = getContext().getPointerType(T: FundamentalType);
4610 QualType PointerTypeConst = getContext().getPointerType(
4611 T: FundamentalType.withConst());
4612 for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
4613 ItaniumRTTIBuilder(*this).BuildTypeInfo(
4614 Ty: Type, Linkage: llvm::GlobalValue::ExternalLinkage,
4615 Visibility, DLLStorageClass);
4616 }
4617}
4618
4619/// What sort of uniqueness rules should we use for the RTTI for the
4620/// given type?
4621ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
4622 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
4623 if (shouldRTTIBeUnique())
4624 return RUK_Unique;
4625
4626 // It's only necessary for linkonce_odr or weak_odr linkage.
4627 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
4628 Linkage != llvm::GlobalValue::WeakODRLinkage)
4629 return RUK_Unique;
4630
4631 // It's only necessary with default visibility.
4632 if (CanTy->getVisibility() != DefaultVisibility)
4633 return RUK_Unique;
4634
4635 // If we're not required to publish this symbol, hide it.
4636 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4637 return RUK_NonUniqueHidden;
4638
4639 // If we're required to publish this symbol, as we might be under an
4640 // explicit instantiation, leave it with default visibility but
4641 // enable string-comparisons.
4642 assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
4643 return RUK_NonUniqueVisible;
4644}
4645
4646// Find out how to codegen the complete destructor and constructor
4647namespace {
4648enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
4649}
4650static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
4651 const CXXMethodDecl *MD) {
4652 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
4653 return StructorCodegen::Emit;
4654
4655 // The complete and base structors are not equivalent if there are any virtual
4656 // bases, so emit separate functions.
4657 if (MD->getParent()->getNumVBases())
4658 return StructorCodegen::Emit;
4659
4660 GlobalDecl AliasDecl;
4661 if (const auto *DD = dyn_cast<CXXDestructorDecl>(Val: MD)) {
4662 AliasDecl = GlobalDecl(DD, Dtor_Complete);
4663 } else {
4664 const auto *CD = cast<CXXConstructorDecl>(Val: MD);
4665 AliasDecl = GlobalDecl(CD, Ctor_Complete);
4666 }
4667 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(GD: AliasDecl);
4668
4669 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
4670 return StructorCodegen::RAUW;
4671
4672 // FIXME: Should we allow available_externally aliases?
4673 if (!llvm::GlobalAlias::isValidLinkage(L: Linkage))
4674 return StructorCodegen::RAUW;
4675
4676 if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
4677 // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
4678 if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
4679 CGM.getTarget().getTriple().isOSBinFormatWasm())
4680 return StructorCodegen::COMDAT;
4681 return StructorCodegen::Emit;
4682 }
4683
4684 return StructorCodegen::Alias;
4685}
4686
4687static void emitConstructorDestructorAlias(CodeGenModule &CGM,
4688 GlobalDecl AliasDecl,
4689 GlobalDecl TargetDecl) {
4690 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(GD: AliasDecl);
4691
4692 StringRef MangledName = CGM.getMangledName(GD: AliasDecl);
4693 llvm::GlobalValue *Entry = CGM.GetGlobalValue(Ref: MangledName);
4694 if (Entry && !Entry->isDeclaration())
4695 return;
4696
4697 auto *Aliasee = cast<llvm::GlobalValue>(Val: CGM.GetAddrOfGlobal(GD: TargetDecl));
4698
4699 // Create the alias with no name.
4700 auto *Alias = llvm::GlobalAlias::create(Linkage, Name: "", Aliasee);
4701
4702 // Constructors and destructors are always unnamed_addr.
4703 Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4704
4705 // Switch any previous uses to the alias.
4706 if (Entry) {
4707 assert(Entry->getType() == Aliasee->getType() &&
4708 "declaration exists with different type");
4709 Alias->takeName(V: Entry);
4710 Entry->replaceAllUsesWith(V: Alias);
4711 Entry->eraseFromParent();
4712 } else {
4713 Alias->setName(MangledName);
4714 }
4715
4716 // Finally, set up the alias with its proper name and attributes.
4717 CGM.SetCommonAttributes(GD: AliasDecl, GV: Alias);
4718}
4719
4720void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
4721 auto *MD = cast<CXXMethodDecl>(Val: GD.getDecl());
4722 auto *CD = dyn_cast<CXXConstructorDecl>(Val: MD);
4723 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(Val: MD);
4724
4725 StructorCodegen CGType = getCodegenToUse(CGM, MD);
4726
4727 if (CD ? GD.getCtorType() == Ctor_Complete
4728 : GD.getDtorType() == Dtor_Complete) {
4729 GlobalDecl BaseDecl;
4730 if (CD)
4731 BaseDecl = GD.getWithCtorType(Type: Ctor_Base);
4732 else
4733 BaseDecl = GD.getWithDtorType(Type: Dtor_Base);
4734
4735 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
4736 emitConstructorDestructorAlias(CGM, AliasDecl: GD, TargetDecl: BaseDecl);
4737 return;
4738 }
4739
4740 if (CGType == StructorCodegen::RAUW) {
4741 StringRef MangledName = CGM.getMangledName(GD);
4742 auto *Aliasee = CGM.GetAddrOfGlobal(GD: BaseDecl);
4743 CGM.addReplacement(Name: MangledName, C: Aliasee);
4744 return;
4745 }
4746 }
4747
4748 // The base destructor is equivalent to the base destructor of its
4749 // base class if there is exactly one non-virtual base class with a
4750 // non-trivial destructor, there are no fields with a non-trivial
4751 // destructor, and the body of the destructor is trivial.
4752 if (DD && GD.getDtorType() == Dtor_Base &&
4753 CGType != StructorCodegen::COMDAT &&
4754 !CGM.TryEmitBaseDestructorAsAlias(D: DD))
4755 return;
4756
4757 // FIXME: The deleting destructor is equivalent to the selected operator
4758 // delete if:
4759 // * either the delete is a destroying operator delete or the destructor
4760 // would be trivial if it weren't virtual,
4761 // * the conversion from the 'this' parameter to the first parameter of the
4762 // destructor is equivalent to a bitcast,
4763 // * the destructor does not have an implicit "this" return, and
4764 // * the operator delete has the same calling convention and IR function type
4765 // as the destructor.
4766 // In such cases we should try to emit the deleting dtor as an alias to the
4767 // selected 'operator delete'.
4768
4769 llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4770
4771 if (CGType == StructorCodegen::COMDAT) {
4772 SmallString<256> Buffer;
4773 llvm::raw_svector_ostream Out(Buffer);
4774 if (DD)
4775 getMangleContext().mangleCXXDtorComdat(D: DD, Out);
4776 else
4777 getMangleContext().mangleCXXCtorComdat(D: CD, Out);
4778 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Name: Out.str());
4779 Fn->setComdat(C);
4780 } else {
4781 CGM.maybeSetTrivialComdat(D: *MD, GO&: *Fn);
4782 }
4783}
4784
4785static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4786 // void *__cxa_begin_catch(void*);
4787 llvm::FunctionType *FTy = llvm::FunctionType::get(
4788 Result: CGM.Int8PtrTy, Params: CGM.Int8PtrTy, /*isVarArg=*/false);
4789
4790 return CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_begin_catch");
4791}
4792
4793static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4794 // void __cxa_end_catch();
4795 llvm::FunctionType *FTy =
4796 llvm::FunctionType::get(Result: CGM.VoidTy, /*isVarArg=*/false);
4797
4798 return CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_end_catch");
4799}
4800
4801static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4802 // void *__cxa_get_exception_ptr(void*);
4803 llvm::FunctionType *FTy = llvm::FunctionType::get(
4804 Result: CGM.Int8PtrTy, Params: CGM.Int8PtrTy, /*isVarArg=*/false);
4805
4806 return CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_get_exception_ptr");
4807}
4808
4809namespace {
4810 /// A cleanup to call __cxa_end_catch. In many cases, the caught
4811 /// exception type lets us state definitively that the thrown exception
4812 /// type does not have a destructor. In particular:
4813 /// - Catch-alls tell us nothing, so we have to conservatively
4814 /// assume that the thrown exception might have a destructor.
4815 /// - Catches by reference behave according to their base types.
4816 /// - Catches of non-record types will only trigger for exceptions
4817 /// of non-record types, which never have destructors.
4818 /// - Catches of record types can trigger for arbitrary subclasses
4819 /// of the caught type, so we have to assume the actual thrown
4820 /// exception type might have a throwing destructor, even if the
4821 /// caught type's destructor is trivial or nothrow.
4822 struct CallEndCatch final : EHScopeStack::Cleanup {
4823 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4824 bool MightThrow;
4825
4826 void Emit(CodeGenFunction &CGF, Flags flags) override {
4827 if (!MightThrow) {
4828 CGF.EmitNounwindRuntimeCall(callee: getEndCatchFn(CGM&: CGF.CGM));
4829 return;
4830 }
4831
4832 CGF.EmitRuntimeCallOrInvoke(callee: getEndCatchFn(CGM&: CGF.CGM));
4833 }
4834 };
4835}
4836
4837/// Emits a call to __cxa_begin_catch and enters a cleanup to call
4838/// __cxa_end_catch. If -fassume-nothrow-exception-dtor is specified, we assume
4839/// that the exception object's dtor is nothrow, therefore the __cxa_end_catch
4840/// call can be marked as nounwind even if EndMightThrow is true.
4841///
4842/// \param EndMightThrow - true if __cxa_end_catch might throw
4843static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4844 llvm::Value *Exn,
4845 bool EndMightThrow) {
4846 llvm::CallInst *call =
4847 CGF.EmitNounwindRuntimeCall(callee: getBeginCatchFn(CGM&: CGF.CGM), args: Exn);
4848
4849 CGF.EHStack.pushCleanup<CallEndCatch>(
4850 Kind: NormalAndEHCleanup,
4851 A: EndMightThrow && !CGF.CGM.getLangOpts().AssumeNothrowExceptionDtor);
4852
4853 return call;
4854}
4855
4856/// A "special initializer" callback for initializing a catch
4857/// parameter during catch initialization.
4858static void InitCatchParam(CodeGenFunction &CGF,
4859 const VarDecl &CatchParam,
4860 Address ParamAddr,
4861 SourceLocation Loc) {
4862 // Load the exception from where the landing pad saved it.
4863 llvm::Value *Exn = CGF.getExceptionFromSlot();
4864
4865 CanQualType CatchType =
4866 CGF.CGM.getContext().getCanonicalType(T: CatchParam.getType());
4867 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(T: CatchType);
4868
4869 // If we're catching by reference, we can just cast the object
4870 // pointer to the appropriate pointer.
4871 if (isa<ReferenceType>(Val: CatchType)) {
4872 QualType CaughtType = cast<ReferenceType>(Val&: CatchType)->getPointeeType();
4873 bool EndCatchMightThrow = CaughtType->isRecordType();
4874
4875 // __cxa_begin_catch returns the adjusted object pointer.
4876 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndMightThrow: EndCatchMightThrow);
4877
4878 // We have no way to tell the personality function that we're
4879 // catching by reference, so if we're catching a pointer,
4880 // __cxa_begin_catch will actually return that pointer by value.
4881 if (const PointerType *PT = dyn_cast<PointerType>(Val&: CaughtType)) {
4882 QualType PointeeType = PT->getPointeeType();
4883
4884 // When catching by reference, generally we should just ignore
4885 // this by-value pointer and use the exception object instead.
4886 if (!PointeeType->isRecordType()) {
4887
4888 // Exn points to the struct _Unwind_Exception header, which
4889 // we have to skip past in order to reach the exception data.
4890 unsigned HeaderSize =
4891 CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
4892 AdjustedExn =
4893 CGF.Builder.CreateConstGEP1_32(Ty: CGF.Int8Ty, Ptr: Exn, Idx0: HeaderSize);
4894
4895 // However, if we're catching a pointer-to-record type that won't
4896 // work, because the personality function might have adjusted
4897 // the pointer. There's actually no way for us to fully satisfy
4898 // the language/ABI contract here: we can't use Exn because it
4899 // might have the wrong adjustment, but we can't use the by-value
4900 // pointer because it's off by a level of abstraction.
4901 //
4902 // The current solution is to dump the adjusted pointer into an
4903 // alloca, which breaks language semantics (because changing the
4904 // pointer doesn't change the exception) but at least works.
4905 // The better solution would be to filter out non-exact matches
4906 // and rethrow them, but this is tricky because the rethrow
4907 // really needs to be catchable by other sites at this landing
4908 // pad. The best solution is to fix the personality function.
4909 } else {
4910 // Pull the pointer for the reference type off.
4911 llvm::Type *PtrTy = CGF.ConvertTypeForMem(T: CaughtType);
4912
4913 // Create the temporary and write the adjusted pointer into it.
4914 Address ExnPtrTmp =
4915 CGF.CreateTempAlloca(Ty: PtrTy, align: CGF.getPointerAlign(), Name: "exn.byref.tmp");
4916 llvm::Value *Casted = CGF.Builder.CreateBitCast(V: AdjustedExn, DestTy: PtrTy);
4917 CGF.Builder.CreateStore(Val: Casted, Addr: ExnPtrTmp);
4918
4919 // Bind the reference to the temporary.
4920 AdjustedExn = ExnPtrTmp.emitRawPointer(CGF);
4921 }
4922 }
4923
4924 llvm::Value *ExnCast =
4925 CGF.Builder.CreateBitCast(V: AdjustedExn, DestTy: LLVMCatchTy, Name: "exn.byref");
4926 CGF.Builder.CreateStore(Val: ExnCast, Addr: ParamAddr);
4927 return;
4928 }
4929
4930 // Scalars and complexes.
4931 TypeEvaluationKind TEK = CGF.getEvaluationKind(T: CatchType);
4932 if (TEK != TEK_Aggregate) {
4933 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndMightThrow: false);
4934
4935 // If the catch type is a pointer type, __cxa_begin_catch returns
4936 // the pointer by value.
4937 if (CatchType->hasPointerRepresentation()) {
4938 llvm::Value *CastExn =
4939 CGF.Builder.CreateBitCast(V: AdjustedExn, DestTy: LLVMCatchTy, Name: "exn.casted");
4940
4941 switch (CatchType.getQualifiers().getObjCLifetime()) {
4942 case Qualifiers::OCL_Strong:
4943 CastExn = CGF.EmitARCRetainNonBlock(value: CastExn);
4944 [[fallthrough]];
4945
4946 case Qualifiers::OCL_None:
4947 case Qualifiers::OCL_ExplicitNone:
4948 case Qualifiers::OCL_Autoreleasing:
4949 CGF.Builder.CreateStore(Val: CastExn, Addr: ParamAddr);
4950 return;
4951
4952 case Qualifiers::OCL_Weak:
4953 CGF.EmitARCInitWeak(addr: ParamAddr, value: CastExn);
4954 return;
4955 }
4956 llvm_unreachable("bad ownership qualifier!");
4957 }
4958
4959 // Otherwise, it returns a pointer into the exception object.
4960
4961 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(V: AdjustedExn, T: CatchType);
4962 LValue destLV = CGF.MakeAddrLValue(Addr: ParamAddr, T: CatchType);
4963 switch (TEK) {
4964 case TEK_Complex:
4965 CGF.EmitStoreOfComplex(V: CGF.EmitLoadOfComplex(src: srcLV, loc: Loc), dest: destLV,
4966 /*init*/ isInit: true);
4967 return;
4968 case TEK_Scalar: {
4969 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(lvalue: srcLV, Loc);
4970 CGF.EmitStoreOfScalar(value: ExnLoad, lvalue: destLV, /*init*/ isInit: true);
4971 return;
4972 }
4973 case TEK_Aggregate:
4974 llvm_unreachable("evaluation kind filtered out!");
4975 }
4976 llvm_unreachable("bad evaluation kind");
4977 }
4978
4979 assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4980 auto catchRD = CatchType->getAsCXXRecordDecl();
4981 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(CD: catchRD);
4982
4983 llvm::Type *PtrTy = CGF.UnqualPtrTy; // addrspace 0 ok
4984
4985 // Check for a copy expression. If we don't have a copy expression,
4986 // that means a trivial copy is okay.
4987 const Expr *copyExpr = CatchParam.getInit();
4988 if (!copyExpr) {
4989 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, EndMightThrow: true);
4990 Address adjustedExn(CGF.Builder.CreateBitCast(V: rawAdjustedExn, DestTy: PtrTy),
4991 LLVMCatchTy, caughtExnAlignment);
4992 LValue Dest = CGF.MakeAddrLValue(Addr: ParamAddr, T: CatchType);
4993 LValue Src = CGF.MakeAddrLValue(Addr: adjustedExn, T: CatchType);
4994 CGF.EmitAggregateCopy(Dest, Src, EltTy: CatchType, MayOverlap: AggValueSlot::DoesNotOverlap);
4995 return;
4996 }
4997
4998 // We have to call __cxa_get_exception_ptr to get the adjusted
4999 // pointer before copying.
5000 llvm::CallInst *rawAdjustedExn =
5001 CGF.EmitNounwindRuntimeCall(callee: getGetExceptionPtrFn(CGM&: CGF.CGM), args: Exn);
5002
5003 // Cast that to the appropriate type.
5004 Address adjustedExn(CGF.Builder.CreateBitCast(V: rawAdjustedExn, DestTy: PtrTy),
5005 LLVMCatchTy, caughtExnAlignment);
5006
5007 // The copy expression is defined in terms of an OpaqueValueExpr.
5008 // Find it and map it to the adjusted expression.
5009 CodeGenFunction::OpaqueValueMapping
5010 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(expr: copyExpr),
5011 CGF.MakeAddrLValue(Addr: adjustedExn, T: CatchParam.getType()));
5012
5013 // Call the copy ctor in a terminate scope.
5014 CGF.EHStack.pushTerminate();
5015
5016 // Perform the copy construction.
5017 CGF.EmitAggExpr(E: copyExpr,
5018 AS: AggValueSlot::forAddr(addr: ParamAddr, quals: Qualifiers(),
5019 isDestructed: AggValueSlot::IsNotDestructed,
5020 needsGC: AggValueSlot::DoesNotNeedGCBarriers,
5021 isAliased: AggValueSlot::IsNotAliased,
5022 mayOverlap: AggValueSlot::DoesNotOverlap));
5023
5024 // Leave the terminate scope.
5025 CGF.EHStack.popTerminate();
5026
5027 // Undo the opaque value mapping.
5028 opaque.pop();
5029
5030 // Finally we can call __cxa_begin_catch.
5031 CallBeginCatch(CGF, Exn, EndMightThrow: true);
5032}
5033
5034/// Begins a catch statement by initializing the catch variable and
5035/// calling __cxa_begin_catch.
5036void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
5037 const CXXCatchStmt *S) {
5038 // We have to be very careful with the ordering of cleanups here:
5039 // C++ [except.throw]p4:
5040 // The destruction [of the exception temporary] occurs
5041 // immediately after the destruction of the object declared in
5042 // the exception-declaration in the handler.
5043 //
5044 // So the precise ordering is:
5045 // 1. Construct catch variable.
5046 // 2. __cxa_begin_catch
5047 // 3. Enter __cxa_end_catch cleanup
5048 // 4. Enter dtor cleanup
5049 //
5050 // We do this by using a slightly abnormal initialization process.
5051 // Delegation sequence:
5052 // - ExitCXXTryStmt opens a RunCleanupsScope
5053 // - EmitAutoVarAlloca creates the variable and debug info
5054 // - InitCatchParam initializes the variable from the exception
5055 // - CallBeginCatch calls __cxa_begin_catch
5056 // - CallBeginCatch enters the __cxa_end_catch cleanup
5057 // - EmitAutoVarCleanups enters the variable destructor cleanup
5058 // - EmitCXXTryStmt emits the code for the catch body
5059 // - EmitCXXTryStmt close the RunCleanupsScope
5060
5061 VarDecl *CatchParam = S->getExceptionDecl();
5062 if (!CatchParam) {
5063 llvm::Value *Exn = CGF.getExceptionFromSlot();
5064 CallBeginCatch(CGF, Exn, EndMightThrow: true);
5065 return;
5066 }
5067
5068 // Emit the local.
5069 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(var: *CatchParam);
5070 {
5071 ApplyAtomGroup Grp(CGF.getDebugInfo());
5072 InitCatchParam(CGF, CatchParam: *CatchParam, ParamAddr: var.getObjectAddress(CGF),
5073 Loc: S->getBeginLoc());
5074 }
5075 CGF.EmitAutoVarCleanups(emission: var);
5076}
5077
5078/// Get or define the following function:
5079/// void @__clang_call_terminate(i8* %exn) nounwind noreturn
5080/// This code is used only in C++.
5081static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
5082 ASTContext &C = CGM.getContext();
5083 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
5084 resultType: C.VoidTy, argTypes: {C.getPointerType(T: C.CharTy)});
5085 llvm::FunctionType *fnTy = CGM.getTypes().GetFunctionType(Info: FI);
5086 llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
5087 Ty: fnTy, Name: "__clang_call_terminate", ExtraAttrs: llvm::AttributeList(), /*Local=*/true);
5088 llvm::Function *fn =
5089 cast<llvm::Function>(Val: fnRef.getCallee()->stripPointerCasts());
5090 if (fn->empty()) {
5091 CGM.SetLLVMFunctionAttributes(GD: GlobalDecl(), Info: FI, F: fn, /*IsThunk=*/false);
5092 CGM.SetLLVMFunctionAttributesForDefinition(D: nullptr, F: fn);
5093 fn->setDoesNotThrow();
5094 fn->setDoesNotReturn();
5095
5096 // What we really want is to massively penalize inlining without
5097 // forbidding it completely. The difference between that and
5098 // 'noinline' is negligible.
5099 fn->addFnAttr(Kind: llvm::Attribute::NoInline);
5100
5101 // Allow this function to be shared across translation units, but
5102 // we don't want it to turn into an exported symbol.
5103 fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
5104 fn->setVisibility(llvm::Function::HiddenVisibility);
5105 if (CGM.supportsCOMDAT())
5106 fn->setComdat(CGM.getModule().getOrInsertComdat(Name: fn->getName()));
5107
5108 // Set up the function.
5109 llvm::BasicBlock *entry =
5110 llvm::BasicBlock::Create(Context&: CGM.getLLVMContext(), Name: "", Parent: fn);
5111 CGBuilderTy builder(CGM, entry);
5112
5113 // Pull the exception pointer out of the parameter list.
5114 llvm::Value *exn = &*fn->arg_begin();
5115
5116 // Call __cxa_begin_catch(exn).
5117 llvm::CallInst *catchCall = builder.CreateCall(Callee: getBeginCatchFn(CGM), Args: exn);
5118 catchCall->setDoesNotThrow();
5119 catchCall->setCallingConv(CGM.getRuntimeCC());
5120
5121 // Call std::terminate().
5122 llvm::CallInst *termCall = builder.CreateCall(Callee: CGM.getTerminateFn());
5123 termCall->setDoesNotThrow();
5124 termCall->setDoesNotReturn();
5125 termCall->setCallingConv(CGM.getRuntimeCC());
5126
5127 // std::terminate cannot return.
5128 builder.CreateUnreachable();
5129 }
5130 return fnRef;
5131}
5132
5133llvm::CallInst *
5134ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
5135 llvm::Value *Exn) {
5136 // In C++, we want to call __cxa_begin_catch() before terminating.
5137 if (Exn) {
5138 assert(CGF.CGM.getLangOpts().CPlusPlus);
5139 return CGF.EmitNounwindRuntimeCall(callee: getClangCallTerminateFn(CGM&: CGF.CGM), args: Exn);
5140 }
5141 return CGF.EmitNounwindRuntimeCall(callee: CGF.CGM.getTerminateFn());
5142}
5143
5144std::pair<llvm::Value *, const CXXRecordDecl *>
5145ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
5146 const CXXRecordDecl *RD) {
5147 return {CGF.GetVTablePtr(This, VTableTy: CGM.Int8PtrTy, VTableClass: RD), RD};
5148}
5149
5150llvm::Constant *
5151ItaniumCXXABI::getSignedVirtualMemberFunctionPointer(const CXXMethodDecl *MD) {
5152 const CXXMethodDecl *origMD =
5153 cast<CXXMethodDecl>(Val: CGM.getItaniumVTableContext()
5154 .findOriginalMethod(GD: MD->getCanonicalDecl())
5155 .getDecl());
5156 llvm::Constant *thunk = getOrCreateVirtualFunctionPointerThunk(MD: origMD);
5157 QualType funcType = CGM.getContext().getMemberPointerType(
5158 T: MD->getType(), /*Qualifier=*/nullptr, Cls: MD->getParent());
5159 return CGM.getMemberFunctionPointer(Pointer: thunk, FT: funcType);
5160}
5161
5162void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
5163 const CXXCatchStmt *C) {
5164 if (CGF.getTarget().hasFeature(Feature: "exception-handling"))
5165 CGF.EHStack.pushCleanup<CatchRetScope>(
5166 Kind: NormalCleanup, A: cast<llvm::CatchPadInst>(Val: CGF.CurrentFuncletPad));
5167 ItaniumCXXABI::emitBeginCatch(CGF, S: C);
5168}
5169
5170llvm::CallInst *
5171WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
5172 llvm::Value *Exn) {
5173 // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on
5174 // the violating exception to mark it handled, but it is currently hard to do
5175 // with wasm EH instruction structure with catch/catch_all, we just call
5176 // std::terminate and ignore the violating exception as in CGCXXABI in Wasm EH
5177 // and call __clang_call_terminate only in Emscripten EH.
5178 // TODO Consider code transformation that makes calling __clang_call_terminate
5179 // in Wasm EH possible.
5180 if (Exn && !EHPersonality::get(CGF).isWasmPersonality()) {
5181 assert(CGF.CGM.getLangOpts().CPlusPlus);
5182 return CGF.EmitNounwindRuntimeCall(callee: getClangCallTerminateFn(CGM&: CGF.CGM), args: Exn);
5183 }
5184 return CGCXXABI::emitTerminateForUnexpectedException(CGF, Exn);
5185}
5186
5187/// Register a global destructor as best as we know how.
5188void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
5189 llvm::FunctionCallee Dtor,
5190 llvm::Constant *Addr) {
5191 if (D.getTLSKind() != VarDecl::TLS_None) {
5192 llvm::PointerType *PtrTy = CGF.UnqualPtrTy;
5193
5194 // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
5195 llvm::FunctionType *AtExitTy =
5196 llvm::FunctionType::get(Result: CGM.IntTy, Params: {CGM.IntTy, PtrTy}, isVarArg: true);
5197
5198 // Fetch the actual function.
5199 llvm::FunctionCallee AtExit =
5200 CGM.CreateRuntimeFunction(Ty: AtExitTy, Name: "__pt_atexit_np");
5201
5202 // Create __dtor function for the var decl.
5203 llvm::Function *DtorStub = CGF.createTLSAtExitStub(VD: D, Dtor, Addr, AtExit);
5204
5205 // Register above __dtor with atexit().
5206 // First param is flags and must be 0, second param is function ptr
5207 llvm::Value *NV = llvm::Constant::getNullValue(Ty: CGM.IntTy);
5208 CGF.EmitNounwindRuntimeCall(callee: AtExit, args: {NV, DtorStub});
5209
5210 // Cannot unregister TLS __dtor so done
5211 return;
5212 }
5213
5214 // Create __dtor function for the var decl.
5215 llvm::Function *DtorStub =
5216 cast<llvm::Function>(Val: CGF.createAtExitStub(VD: D, Dtor, Addr));
5217
5218 // Register above __dtor with atexit().
5219 CGF.registerGlobalDtorWithAtExit(dtorStub: DtorStub);
5220
5221 // Emit __finalize function to unregister __dtor and (as appropriate) call
5222 // __dtor.
5223 emitCXXStermFinalizer(D, dtorStub: DtorStub, addr: Addr);
5224}
5225
5226void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
5227 llvm::Constant *addr) {
5228 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: CGM.VoidTy, isVarArg: false);
5229 SmallString<256> FnName;
5230 {
5231 llvm::raw_svector_ostream Out(FnName);
5232 getMangleContext().mangleDynamicStermFinalizer(D: &D, Out);
5233 }
5234
5235 // Create the finalization action associated with a variable.
5236 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
5237 llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
5238 ty: FTy, name: FnName.str(), FI, Loc: D.getLocation());
5239
5240 CodeGenFunction CGF(CGM);
5241
5242 CGF.StartFunction(GD: GlobalDecl(), RetTy: CGM.getContext().VoidTy, Fn: StermFinalizer, FnInfo: FI,
5243 Args: FunctionArgList(), Loc: D.getLocation(),
5244 StartLoc: D.getInit()->getExprLoc());
5245
5246 // The unatexit subroutine unregisters __dtor functions that were previously
5247 // registered by the atexit subroutine. If the referenced function is found,
5248 // the unatexit returns a value of 0, meaning that the cleanup is still
5249 // pending (and we should call the __dtor function).
5250 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
5251
5252 llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(Arg: V, Name: "needs_destruct");
5253
5254 llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock(name: "destruct.call");
5255 llvm::BasicBlock *EndBlock = CGF.createBasicBlock(name: "destruct.end");
5256
5257 // Check if unatexit returns a value of 0. If it does, jump to
5258 // DestructCallBlock, otherwise jump to EndBlock directly.
5259 CGF.Builder.CreateCondBr(Cond: NeedsDestruct, True: DestructCallBlock, False: EndBlock);
5260
5261 CGF.EmitBlock(BB: DestructCallBlock);
5262
5263 // Emit the call to dtorStub.
5264 llvm::CallInst *CI = CGF.Builder.CreateCall(Callee: dtorStub);
5265
5266 // Make sure the call and the callee agree on calling convention.
5267 CI->setCallingConv(dtorStub->getCallingConv());
5268
5269 CGF.EmitBlock(BB: EndBlock);
5270
5271 CGF.FinishFunction();
5272
5273 if (auto *IPA = D.getAttr<InitPriorityAttr>()) {
5274 CGM.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer,
5275 Priority: IPA->getPriority());
5276 } else if (isTemplateInstantiation(Kind: D.getTemplateSpecializationKind()) ||
5277 getContext().GetGVALinkageForVariable(VD: &D) == GVA_DiscardableODR) {
5278 // According to C++ [basic.start.init]p2, class template static data
5279 // members (i.e., implicitly or explicitly instantiated specializations)
5280 // have unordered initialization. As a consequence, we can put them into
5281 // their own llvm.global_dtors entry.
5282 CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, Priority: 65535);
5283 } else {
5284 CGM.AddCXXStermFinalizerEntry(DtorFn: StermFinalizer);
5285 }
5286}
5287