1//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This provides C++ code generation targeting the Itanium C++ ABI. The class
10// in this file generates structures that follow the Itanium C++ ABI, which is
11// documented at:
12// https://itanium-cxx-abi.github.io/cxx-abi/abi.html
13// https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
14//
15// It also supports the closely-related ARM ABI, documented at:
16// https://developer.arm.com/documentation/ihi0041/g/
17//
18//===----------------------------------------------------------------------===//
19
20#include "CGCXXABI.h"
21#include "CGCleanup.h"
22#include "CGDebugInfo.h"
23#include "CGRecordLayout.h"
24#include "CGVTables.h"
25#include "CodeGenFunction.h"
26#include "CodeGenModule.h"
27#include "TargetInfo.h"
28#include "clang/AST/Attr.h"
29#include "clang/AST/Mangle.h"
30#include "clang/AST/StmtCXX.h"
31#include "clang/AST/Type.h"
32#include "clang/CodeGen/ConstantInitBuilder.h"
33#include "llvm/IR/DataLayout.h"
34#include "llvm/IR/GlobalValue.h"
35#include "llvm/IR/Instructions.h"
36#include "llvm/IR/Intrinsics.h"
37#include "llvm/IR/Value.h"
38#include "llvm/Support/ConvertEBCDIC.h"
39#include "llvm/Support/ScopedPrinter.h"
40
41#include <optional>
42
43using namespace clang;
44using namespace CodeGen;
45
46namespace {
47class ItaniumCXXABI : public CodeGen::CGCXXABI {
48 /// VTables - All the vtables which have been defined.
49 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
50
51 /// All the thread wrapper functions that have been used.
52 llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8>
53 ThreadWrappers;
54
55protected:
56 bool UseARMMethodPtrABI;
57 bool UseARMGuardVarABI;
58 bool Use32BitVTableOffsetABI;
59
60 ItaniumMangleContext &getMangleContext() {
61 return cast<ItaniumMangleContext>(Val&: CodeGen::CGCXXABI::getMangleContext());
62 }
63
64public:
65 ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
66 bool UseARMMethodPtrABI = false,
67 bool UseARMGuardVarABI = false) :
68 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
69 UseARMGuardVarABI(UseARMGuardVarABI),
70 Use32BitVTableOffsetABI(false) { }
71
72 bool classifyReturnType(CGFunctionInfo &FI) const override;
73
74 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
75 // If C++ prohibits us from making a copy, pass by address.
76 if (!RD->canPassInRegisters())
77 return RAA_Indirect;
78 return RAA_Default;
79 }
80
81 bool isThisCompleteObject(GlobalDecl GD) const override {
82 // The Itanium ABI has separate complete-object vs. base-object
83 // variants of both constructors and destructors.
84 if (isa<CXXDestructorDecl>(Val: GD.getDecl())) {
85 switch (GD.getDtorType()) {
86 case Dtor_Complete:
87 case Dtor_Deleting:
88 return true;
89
90 case Dtor_Base:
91 return false;
92
93 case Dtor_Comdat:
94 llvm_unreachable("emitting dtor comdat as function?");
95 case Dtor_Unified:
96 llvm_unreachable("emitting unified dtor as function?");
97 case Dtor_VectorDeleting:
98 llvm_unreachable("unexpected dtor kind for this ABI");
99 }
100 llvm_unreachable("bad dtor kind");
101 }
102 if (isa<CXXConstructorDecl>(Val: GD.getDecl())) {
103 switch (GD.getCtorType()) {
104 case Ctor_Complete:
105 return true;
106
107 case Ctor_Base:
108 return false;
109
110 case Ctor_CopyingClosure:
111 case Ctor_DefaultClosure:
112 llvm_unreachable("closure ctors in Itanium ABI?");
113
114 case Ctor_Comdat:
115 llvm_unreachable("emitting ctor comdat as function?");
116
117 case Ctor_Unified:
118 llvm_unreachable("emitting unified ctor as function?");
119 }
120 llvm_unreachable("bad dtor kind");
121 }
122
123 // No other kinds.
124 return false;
125 }
126
127 bool isZeroInitializable(const MemberPointerType *MPT) override;
128
129 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
130
131 CGCallee
132 EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
133 const Expr *E,
134 Address This,
135 llvm::Value *&ThisPtrForCall,
136 llvm::Value *MemFnPtr,
137 const MemberPointerType *MPT) override;
138
139 llvm::Value *EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
140 Address Base, llvm::Value *MemPtr,
141 const MemberPointerType *MPT,
142 bool IsInBounds) override;
143
144 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
145 const CastExpr *E,
146 llvm::Value *Src) override;
147 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
148 llvm::Constant *Src) override;
149
150 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
151
152 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
153 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
154 CharUnits offset) override;
155 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
156 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
157 CharUnits ThisAdjustment);
158
159 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
160 llvm::Value *L, llvm::Value *R,
161 const MemberPointerType *MPT,
162 bool Inequality) override;
163
164 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
165 llvm::Value *Addr,
166 const MemberPointerType *MPT) override;
167
168 void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
169 Address Ptr, QualType ElementType,
170 const CXXDestructorDecl *Dtor) override;
171
172 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
173 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
174
175 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
176
177 llvm::CallInst *
178 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
179 llvm::Value *Exn) override;
180
181 void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
182 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
183 CatchTypeInfo
184 getAddrOfCXXCatchHandlerType(QualType Ty,
185 QualType CatchHandlerType) override {
186 return CatchTypeInfo{.RTTI: getAddrOfRTTIDescriptor(Ty), .Flags: 0};
187 }
188
189 bool shouldTypeidBeNullChecked(QualType SrcRecordTy) override;
190 void EmitBadTypeidCall(CodeGenFunction &CGF) override;
191 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
192 Address ThisPtr,
193 llvm::Type *StdTypeInfoPtrTy) override;
194
195 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
196 QualType SrcRecordTy) override;
197
198 /// Determine whether we know that all instances of type RecordTy will have
199 /// the same vtable pointer values, that is distinct from all other vtable
200 /// pointers. While this is required by the Itanium ABI, it doesn't happen in
201 /// practice in some cases due to language extensions.
202 bool hasUniqueVTablePointer(QualType RecordTy) {
203 const CXXRecordDecl *RD = RecordTy->getAsCXXRecordDecl();
204
205 // Under -fapple-kext, multiple definitions of the same vtable may be
206 // emitted.
207 if (!CGM.getCodeGenOpts().AssumeUniqueVTables ||
208 getContext().getLangOpts().AppleKext)
209 return false;
210
211 // If the type_info* would be null, the vtable might be merged with that of
212 // another type.
213 if (!CGM.shouldEmitRTTI())
214 return false;
215
216 // If there's only one definition of the vtable in the program, it has a
217 // unique address.
218 if (!llvm::GlobalValue::isWeakForLinker(Linkage: CGM.getVTableLinkage(RD)))
219 return true;
220
221 // Even if there are multiple definitions of the vtable, they are required
222 // by the ABI to use the same symbol name, so should be merged at load
223 // time. However, if the class has hidden visibility, there can be
224 // different versions of the class in different modules, and the ABI
225 // library might treat them as being the same.
226 if (CGM.GetLLVMVisibility(V: RD->getVisibility()) !=
227 llvm::GlobalValue::DefaultVisibility)
228 return false;
229
230 return true;
231 }
232
233 bool shouldEmitExactDynamicCast(QualType DestRecordTy) override {
234 return hasUniqueVTablePointer(RecordTy: DestRecordTy);
235 }
236
237 std::optional<ExactDynamicCastInfo>
238 getExactDynamicCastInfo(QualType SrcRecordTy, QualType DestTy,
239 QualType DestRecordTy) override;
240
241 llvm::Value *emitDynamicCastCall(CodeGenFunction &CGF, Address Value,
242 QualType SrcRecordTy, QualType DestTy,
243 QualType DestRecordTy,
244 llvm::BasicBlock *CastEnd) override;
245
246 llvm::Value *emitExactDynamicCast(CodeGenFunction &CGF, Address ThisAddr,
247 QualType SrcRecordTy, QualType DestTy,
248 QualType DestRecordTy,
249 const ExactDynamicCastInfo &CastInfo,
250 llvm::BasicBlock *CastSuccess,
251 llvm::BasicBlock *CastFail) override;
252
253 llvm::Value *emitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
254 QualType SrcRecordTy) override;
255
256 bool EmitBadCastCall(CodeGenFunction &CGF) override;
257
258 llvm::Value *
259 GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
260 const CXXRecordDecl *ClassDecl,
261 const CXXRecordDecl *BaseClassDecl) override;
262
263 void EmitCXXConstructors(const CXXConstructorDecl *D) override;
264
265 AddedStructorArgCounts
266 buildStructorSignature(GlobalDecl GD,
267 SmallVectorImpl<CanQualType> &ArgTys) override;
268
269 bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
270 CXXDtorType DT) const override {
271 // Itanium does not emit any destructor variant as an inline thunk.
272 // Delegating may occur as an optimization, but all variants are either
273 // emitted with external linkage or as linkonce if they are inline and used.
274 return false;
275 }
276
277 void EmitCXXDestructors(const CXXDestructorDecl *D) override;
278
279 void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
280 FunctionArgList &Params) override;
281
282 void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
283
284 AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
285 const CXXConstructorDecl *D,
286 CXXCtorType Type,
287 bool ForVirtualBase,
288 bool Delegating) override;
289
290 llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
291 const CXXDestructorDecl *DD,
292 CXXDtorType Type,
293 bool ForVirtualBase,
294 bool Delegating) override;
295
296 void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
297 CXXDtorType Type, bool ForVirtualBase,
298 bool Delegating, Address This,
299 QualType ThisTy) override;
300
301 void emitVTableDefinitions(CodeGenVTables &CGVT,
302 const CXXRecordDecl *RD) override;
303
304 bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
305 CodeGenFunction::VPtr Vptr) override;
306
307 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
308 return true;
309 }
310
311 llvm::Constant *
312 getVTableAddressPoint(BaseSubobject Base,
313 const CXXRecordDecl *VTableClass) override;
314
315 llvm::Value *getVTableAddressPointInStructor(
316 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
317 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
318
319 llvm::Value *getVTableAddressPointInStructorWithVTT(
320 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
321 BaseSubobject Base, const CXXRecordDecl *NearestVBase);
322
323 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
324 CharUnits VPtrOffset) override;
325
326 CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
327 Address This, llvm::Type *Ty,
328 SourceLocation Loc) override;
329
330 llvm::Value *
331 EmitVirtualDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *Dtor,
332 CXXDtorType DtorType, Address This,
333 DeleteOrMemberCallExpr E,
334 llvm::CallBase **CallOrInvoke) override;
335
336 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
337
338 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
339 bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
340
341 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
342 bool ReturnAdjustment) override {
343 // Allow inlining of thunks by emitting them with available_externally
344 // linkage together with vtables when needed.
345 if (ForVTable && !Thunk->hasLocalLinkage())
346 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
347 CGM.setGVProperties(GV: Thunk, GD);
348 }
349
350 bool exportThunk() override { return true; }
351
352 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
353 const CXXRecordDecl *UnadjustedThisClass,
354 const ThunkInfo &TI) override;
355
356 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
357 const CXXRecordDecl *UnadjustedRetClass,
358 const ReturnAdjustment &RA) override;
359
360 size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
361 FunctionArgList &Args) const override {
362 assert(!Args.empty() && "expected the arglist to not be empty!");
363 return Args.size() - 1;
364 }
365
366 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
367 StringRef GetDeletedVirtualCallName() override
368 { return "__cxa_deleted_virtual"; }
369
370 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
371 Address InitializeArrayCookie(CodeGenFunction &CGF,
372 Address NewPtr,
373 llvm::Value *NumElements,
374 const CXXNewExpr *expr,
375 QualType ElementType) override;
376 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
377 Address allocPtr,
378 CharUnits cookieSize) override;
379
380 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
381 llvm::GlobalVariable *DeclPtr,
382 bool PerformInit) override;
383 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
384 llvm::FunctionCallee dtor,
385 llvm::Constant *addr) override;
386
387 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
388 llvm::Value *Val);
389 void EmitThreadLocalInitFuncs(
390 CodeGenModule &CGM,
391 ArrayRef<const VarDecl *> CXXThreadLocals,
392 ArrayRef<llvm::Function *> CXXThreadLocalInits,
393 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
394
395 bool usesThreadWrapperFunction(const VarDecl *VD) const override {
396 return !isEmittedWithConstantInitializer(VD) ||
397 mayNeedDestruction(VD);
398 }
399 LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
400 QualType LValType) override;
401
402 bool NeedsVTTParameter(GlobalDecl GD) override;
403
404 llvm::Constant *
405 getOrCreateVirtualFunctionPointerThunk(const CXXMethodDecl *MD);
406
407 /**************************** RTTI Uniqueness ******************************/
408
409protected:
410 /// Returns true if the ABI requires RTTI type_info objects to be unique
411 /// across a program.
412 virtual bool shouldRTTIBeUnique() const { return true; }
413
414public:
415 /// What sort of unique-RTTI behavior should we use?
416 enum RTTIUniquenessKind {
417 /// We are guaranteeing, or need to guarantee, that the RTTI string
418 /// is unique.
419 RUK_Unique,
420
421 /// We are not guaranteeing uniqueness for the RTTI string, so we
422 /// can demote to hidden visibility but must use string comparisons.
423 RUK_NonUniqueHidden,
424
425 /// We are not guaranteeing uniqueness for the RTTI string, so we
426 /// have to use string comparisons, but we also have to emit it with
427 /// non-hidden visibility.
428 RUK_NonUniqueVisible
429 };
430
431 /// Return the required visibility status for the given type and linkage in
432 /// the current ABI.
433 RTTIUniquenessKind
434 classifyRTTIUniqueness(QualType CanTy,
435 llvm::GlobalValue::LinkageTypes Linkage) const;
436 friend class ItaniumRTTIBuilder;
437
438 void emitCXXStructor(GlobalDecl GD) override;
439
440 std::pair<llvm::Value *, const CXXRecordDecl *>
441 LoadVTablePtr(CodeGenFunction &CGF, Address This,
442 const CXXRecordDecl *RD) override;
443
444 private:
445 llvm::Constant *
446 getSignedVirtualMemberFunctionPointer(const CXXMethodDecl *MD);
447
448 bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
449 const auto &VtableLayout =
450 CGM.getItaniumVTableContext().getVTableLayout(RD);
451
452 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
453 // Skip empty slot.
454 if (!VtableComponent.isUsedFunctionPointerKind())
455 continue;
456
457 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
458 const FunctionDecl *FD = Method->getDefinition();
459 const bool IsInlined =
460 Method->getCanonicalDecl()->isInlined() || (FD && FD->isInlined());
461 if (!IsInlined)
462 continue;
463
464 StringRef Name = CGM.getMangledName(
465 GD: VtableComponent.getGlobalDecl(/*HasVectorDeletingDtors=*/false));
466 auto *Entry = CGM.GetGlobalValue(Ref: Name);
467 // This checks if virtual inline function has already been emitted.
468 // Note that it is possible that this inline function would be emitted
469 // after trying to emit vtable speculatively. Because of this we do
470 // an extra pass after emitting all deferred vtables to find and emit
471 // these vtables opportunistically.
472 if (!Entry || Entry->isDeclaration())
473 return true;
474 }
475 return false;
476 }
477
478 bool isVTableHidden(const CXXRecordDecl *RD) const {
479 const auto &VtableLayout =
480 CGM.getItaniumVTableContext().getVTableLayout(RD);
481
482 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
483 if (VtableComponent.isRTTIKind()) {
484 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
485 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
486 return true;
487 } else if (VtableComponent.isUsedFunctionPointerKind()) {
488 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
489 if (Method->getVisibility() == Visibility::HiddenVisibility &&
490 !Method->isDefined())
491 return true;
492 }
493 }
494 return false;
495 }
496};
497
498class ARMCXXABI : public ItaniumCXXABI {
499public:
500 ARMCXXABI(CodeGen::CodeGenModule &CGM) :
501 ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
502 /*UseARMGuardVarABI=*/true) {}
503
504 bool constructorsAndDestructorsReturnThis() const override { return true; }
505
506 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
507 QualType ResTy) override;
508
509 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
510 Address InitializeArrayCookie(CodeGenFunction &CGF,
511 Address NewPtr,
512 llvm::Value *NumElements,
513 const CXXNewExpr *expr,
514 QualType ElementType) override;
515 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
516 CharUnits cookieSize) override;
517};
518
519class AppleARM64CXXABI : public ARMCXXABI {
520public:
521 AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
522 Use32BitVTableOffsetABI = true;
523 }
524
525 // ARM64 libraries are prepared for non-unique RTTI.
526 bool shouldRTTIBeUnique() const override { return false; }
527};
528
529class FuchsiaCXXABI final : public ItaniumCXXABI {
530public:
531 explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
532 : ItaniumCXXABI(CGM) {}
533
534private:
535 bool constructorsAndDestructorsReturnThis() const override { return true; }
536};
537
538class WebAssemblyCXXABI final : public ItaniumCXXABI {
539public:
540 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
541 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
542 /*UseARMGuardVarABI=*/true) {}
543 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
544 llvm::CallInst *
545 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
546 llvm::Value *Exn) override;
547
548private:
549 bool constructorsAndDestructorsReturnThis() const override { return true; }
550 bool canCallMismatchedFunctionType() const override { return false; }
551};
552
553class XLCXXABI final : public ItaniumCXXABI {
554public:
555 explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
556 : ItaniumCXXABI(CGM) {}
557
558 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
559 llvm::FunctionCallee dtor,
560 llvm::Constant *addr) override;
561
562 bool useSinitAndSterm() const override { return true; }
563
564private:
565 void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
566 llvm::Constant *addr);
567};
568}
569
570CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
571 switch (CGM.getContext().getCXXABIKind()) {
572 // For IR-generation purposes, there's no significant difference
573 // between the ARM and iOS ABIs.
574 case TargetCXXABI::GenericARM:
575 case TargetCXXABI::iOS:
576 case TargetCXXABI::WatchOS:
577 return new ARMCXXABI(CGM);
578
579 case TargetCXXABI::AppleARM64:
580 return new AppleARM64CXXABI(CGM);
581
582 case TargetCXXABI::Fuchsia:
583 return new FuchsiaCXXABI(CGM);
584
585 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
586 // include the other 32-bit ARM oddities: constructor/destructor return values
587 // and array cookies.
588 case TargetCXXABI::GenericAArch64:
589 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
590 /*UseARMGuardVarABI=*/true);
591
592 case TargetCXXABI::GenericMIPS:
593 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
594
595 case TargetCXXABI::WebAssembly:
596 return new WebAssemblyCXXABI(CGM);
597
598 case TargetCXXABI::XL:
599 return new XLCXXABI(CGM);
600
601 case TargetCXXABI::GenericItanium:
602 return new ItaniumCXXABI(CGM);
603
604 case TargetCXXABI::Microsoft:
605 llvm_unreachable("Microsoft ABI is not Itanium-based");
606 }
607 llvm_unreachable("bad ABI kind");
608}
609
610llvm::Type *
611ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
612 if (MPT->isMemberDataPointer())
613 return CGM.PtrDiffTy;
614 return llvm::StructType::get(elt1: CGM.PtrDiffTy, elts: CGM.PtrDiffTy);
615}
616
617/// In the Itanium and ARM ABIs, method pointers have the form:
618/// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
619///
620/// In the Itanium ABI:
621/// - method pointers are virtual if (memptr.ptr & 1) is nonzero
622/// - the this-adjustment is (memptr.adj)
623/// - the virtual offset is (memptr.ptr - 1)
624///
625/// In the ARM ABI:
626/// - method pointers are virtual if (memptr.adj & 1) is nonzero
627/// - the this-adjustment is (memptr.adj >> 1)
628/// - the virtual offset is (memptr.ptr)
629/// ARM uses 'adj' for the virtual flag because Thumb functions
630/// may be only single-byte aligned.
631///
632/// If the member is virtual, the adjusted 'this' pointer points
633/// to a vtable pointer from which the virtual offset is applied.
634///
635/// If the member is non-virtual, memptr.ptr is the address of
636/// the function to call.
637CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
638 CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
639 llvm::Value *&ThisPtrForCall,
640 llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
641 CGBuilderTy &Builder = CGF.Builder;
642
643 const FunctionProtoType *FPT =
644 MPT->getPointeeType()->castAs<FunctionProtoType>();
645 auto *RD = MPT->getMostRecentCXXRecordDecl();
646
647 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: 1);
648
649 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock(name: "memptr.virtual");
650 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock(name: "memptr.nonvirtual");
651 llvm::BasicBlock *FnEnd = CGF.createBasicBlock(name: "memptr.end");
652
653 // Extract memptr.adj, which is in the second field.
654 llvm::Value *RawAdj = Builder.CreateExtractValue(Agg: MemFnPtr, Idxs: 1, Name: "memptr.adj");
655
656 // Compute the true adjustment.
657 llvm::Value *Adj = RawAdj;
658 if (UseARMMethodPtrABI)
659 Adj = Builder.CreateAShr(LHS: Adj, RHS: ptrdiff_1, Name: "memptr.adj.shifted");
660
661 // Apply the adjustment and cast back to the original struct type
662 // for consistency.
663 llvm::Value *This = ThisAddr.emitRawPointer(CGF);
664 This = Builder.CreateInBoundsGEP(Ty: Builder.getInt8Ty(), Ptr: This, IdxList: Adj);
665 ThisPtrForCall = This;
666
667 // Load the function pointer.
668 llvm::Value *FnAsInt = Builder.CreateExtractValue(Agg: MemFnPtr, Idxs: 0, Name: "memptr.ptr");
669
670 // If the LSB in the function pointer is 1, the function pointer points to
671 // a virtual function.
672 llvm::Value *IsVirtual;
673 if (UseARMMethodPtrABI)
674 IsVirtual = Builder.CreateAnd(LHS: RawAdj, RHS: ptrdiff_1);
675 else
676 IsVirtual = Builder.CreateAnd(LHS: FnAsInt, RHS: ptrdiff_1);
677 IsVirtual = Builder.CreateIsNotNull(Arg: IsVirtual, Name: "memptr.isvirtual");
678 Builder.CreateCondBr(Cond: IsVirtual, True: FnVirtual, False: FnNonVirtual);
679
680 // In the virtual path, the adjustment left 'This' pointing to the
681 // vtable of the correct base subobject. The "function pointer" is an
682 // offset within the vtable (+1 for the virtual flag on non-ARM).
683 CGF.EmitBlock(BB: FnVirtual);
684
685 // Cast the adjusted this to a pointer to vtable pointer and load.
686 llvm::Type *VTableTy = CGF.CGM.GlobalsInt8PtrTy;
687 CharUnits VTablePtrAlign =
688 CGF.CGM.getDynamicOffsetAlignment(ActualAlign: ThisAddr.getAlignment(), Class: RD,
689 ExpectedTargetAlign: CGF.getPointerAlign());
690 llvm::Value *VTable = CGF.GetVTablePtr(
691 This: Address(This, ThisAddr.getElementType(), VTablePtrAlign), VTableTy, VTableClass: RD);
692
693 // Apply the offset.
694 // On ARM64, to reserve extra space in virtual member function pointers,
695 // we only pay attention to the low 32 bits of the offset.
696 llvm::Value *VTableOffset = FnAsInt;
697 if (!UseARMMethodPtrABI)
698 VTableOffset = Builder.CreateSub(LHS: VTableOffset, RHS: ptrdiff_1);
699 if (Use32BitVTableOffsetABI) {
700 VTableOffset = Builder.CreateTrunc(V: VTableOffset, DestTy: CGF.Int32Ty);
701 VTableOffset = Builder.CreateZExt(V: VTableOffset, DestTy: CGM.PtrDiffTy);
702 }
703
704 // Check the address of the function pointer if CFI on member function
705 // pointers is enabled.
706 llvm::Constant *CheckSourceLocation;
707 llvm::Constant *CheckTypeDesc;
708 bool ShouldEmitCFICheck = CGF.SanOpts.has(K: SanitizerKind::CFIMFCall) &&
709 CGM.HasHiddenLTOVisibility(RD);
710
711 if (ShouldEmitCFICheck) {
712 if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: E)) {
713 if (BinOp->isPtrMemOp() &&
714 BinOp->getRHS()
715 ->getType()
716 ->hasPointeeToCFIUncheckedCalleeFunctionType())
717 ShouldEmitCFICheck = false;
718 }
719 }
720
721 bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
722 CGM.HasHiddenLTOVisibility(RD);
723 // TODO: Update this name not to be restricted to WPD only
724 // as we now emit the vtable info info for speculative devirtualization as
725 // well.
726 bool ShouldEmitWPDInfo =
727 (CGM.getCodeGenOpts().WholeProgramVTables &&
728 // Don't insert type tests if we are forcing public visibility.
729 !CGM.AlwaysHasLTOVisibilityPublic(RD)) ||
730 CGM.getCodeGenOpts().DevirtualizeSpeculatively;
731 llvm::Value *VirtualFn = nullptr;
732
733 {
734 auto CheckOrdinal = SanitizerKind::SO_CFIMFCall;
735 auto CheckHandler = SanitizerHandler::CFICheckFail;
736 SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
737
738 llvm::Value *TypeId = nullptr;
739 llvm::Value *CheckResult = nullptr;
740
741 if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
742 // If doing CFI, VFE or WPD, we will need the metadata node to check
743 // against.
744 llvm::Metadata *MD =
745 CGM.CreateMetadataIdentifierForVirtualMemPtrType(T: QualType(MPT, 0));
746 TypeId = llvm::MetadataAsValue::get(Context&: CGF.getLLVMContext(), MD);
747 }
748
749 if (ShouldEmitVFEInfo) {
750 llvm::Value *VFPAddr =
751 Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: VTable, IdxList: VTableOffset);
752
753 // If doing VFE, load from the vtable with a type.checked.load intrinsic
754 // call. Note that we use the GEP to calculate the address to load from
755 // and pass 0 as the offset to the intrinsic. This is because every
756 // vtable slot of the correct type is marked with matching metadata, and
757 // we know that the load must be from one of these slots.
758 llvm::Value *CheckedLoad = Builder.CreateCall(
759 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::type_checked_load),
760 Args: {VFPAddr, llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: 0), TypeId});
761 CheckResult = Builder.CreateExtractValue(Agg: CheckedLoad, Idxs: 1);
762 VirtualFn = Builder.CreateExtractValue(Agg: CheckedLoad, Idxs: 0);
763 } else {
764 // When not doing VFE, emit a normal load, as it allows more
765 // optimisations than type.checked.load.
766 if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
767 llvm::Value *VFPAddr =
768 Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: VTable, IdxList: VTableOffset);
769 llvm::Intrinsic::ID IID = CGM.HasHiddenLTOVisibility(RD)
770 ? llvm::Intrinsic::type_test
771 : llvm::Intrinsic::public_type_test;
772
773 CheckResult =
774 Builder.CreateCall(Callee: CGM.getIntrinsic(IID), Args: {VFPAddr, TypeId});
775 }
776
777 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
778 VirtualFn = CGF.Builder.CreateCall(
779 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::load_relative,
780 Tys: {VTableOffset->getType()}),
781 Args: {VTable, VTableOffset});
782 } else {
783 llvm::Value *VFPAddr =
784 CGF.Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: VTable, IdxList: VTableOffset);
785 VirtualFn = CGF.Builder.CreateAlignedLoad(Ty: CGF.DefaultPtrTy, Addr: VFPAddr,
786 Align: CGF.getPointerAlign(),
787 Name: "memptr.virtualfn");
788 }
789 }
790 assert(VirtualFn && "Virtual fuction pointer not created!");
791 assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
792 CheckResult) &&
793 "Check result required but not created!");
794
795 if (ShouldEmitCFICheck) {
796 // If doing CFI, emit the check.
797 CheckSourceLocation = CGF.EmitCheckSourceLocation(Loc: E->getBeginLoc());
798 CheckTypeDesc = CGF.EmitCheckTypeDescriptor(T: QualType(MPT, 0));
799 llvm::Constant *StaticData[] = {
800 llvm::ConstantInt::get(Ty: CGF.Int8Ty, V: CodeGenFunction::CFITCK_VMFCall),
801 CheckSourceLocation,
802 CheckTypeDesc,
803 };
804
805 if (CGM.getCodeGenOpts().SanitizeTrap.has(K: SanitizerKind::CFIMFCall)) {
806 CGF.EmitTrapCheck(Checked: CheckResult, CheckHandlerID: CheckHandler);
807 } else {
808 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
809 Context&: CGM.getLLVMContext(),
810 MD: llvm::MDString::get(Context&: CGM.getLLVMContext(), Str: "all-vtables"));
811 llvm::Value *ValidVtable = Builder.CreateCall(
812 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::type_test), Args: {VTable, AllVtables});
813 CGF.EmitCheck(Checked: std::make_pair(x&: CheckResult, y&: CheckOrdinal), Check: CheckHandler,
814 StaticArgs: StaticData, DynamicArgs: {VTable, ValidVtable});
815 }
816
817 FnVirtual = Builder.GetInsertBlock();
818 }
819 } // End of sanitizer scope
820
821 CGF.EmitBranch(Block: FnEnd);
822
823 // In the non-virtual path, the function pointer is actually a
824 // function pointer.
825 CGF.EmitBlock(BB: FnNonVirtual);
826 llvm::Value *NonVirtualFn =
827 Builder.CreateIntToPtr(V: FnAsInt, DestTy: CGF.DefaultPtrTy, Name: "memptr.nonvirtualfn");
828
829 // Check the function pointer if CFI on member function pointers is enabled.
830 if (ShouldEmitCFICheck) {
831 CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
832 if (RD->hasDefinition()) {
833 auto CheckOrdinal = SanitizerKind::SO_CFIMFCall;
834 auto CheckHandler = SanitizerHandler::CFICheckFail;
835 SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
836
837 llvm::Constant *StaticData[] = {
838 llvm::ConstantInt::get(Ty: CGF.Int8Ty, V: CodeGenFunction::CFITCK_NVMFCall),
839 CheckSourceLocation,
840 CheckTypeDesc,
841 };
842
843 llvm::Value *Bit = Builder.getFalse();
844 for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
845 llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
846 T: getContext().getMemberPointerType(T: MPT->getPointeeType(),
847 /*Qualifier=*/std::nullopt,
848 Cls: Base->getCanonicalDecl()));
849 llvm::Value *TypeId =
850 llvm::MetadataAsValue::get(Context&: CGF.getLLVMContext(), MD);
851
852 llvm::Value *TypeTest =
853 Builder.CreateCall(Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::type_test),
854 Args: {NonVirtualFn, TypeId});
855 Bit = Builder.CreateOr(LHS: Bit, RHS: TypeTest);
856 }
857
858 CGF.EmitCheck(Checked: std::make_pair(x&: Bit, y&: CheckOrdinal), Check: CheckHandler, StaticArgs: StaticData,
859 DynamicArgs: {NonVirtualFn, llvm::UndefValue::get(T: CGF.IntPtrTy)});
860
861 FnNonVirtual = Builder.GetInsertBlock();
862 }
863 }
864
865 // We're done.
866 CGF.EmitBlock(BB: FnEnd);
867 llvm::PHINode *CalleePtr = Builder.CreatePHI(Ty: CGF.DefaultPtrTy, NumReservedValues: 2);
868 CalleePtr->addIncoming(V: VirtualFn, BB: FnVirtual);
869 CalleePtr->addIncoming(V: NonVirtualFn, BB: FnNonVirtual);
870
871 CGPointerAuthInfo PointerAuth;
872
873 if (const auto &Schema =
874 CGM.getCodeGenOpts().PointerAuth.CXXMemberFunctionPointers) {
875 llvm::PHINode *DiscriminatorPHI = Builder.CreatePHI(Ty: CGF.IntPtrTy, NumReservedValues: 2);
876 DiscriminatorPHI->addIncoming(V: llvm::ConstantInt::get(Ty: CGF.IntPtrTy, V: 0),
877 BB: FnVirtual);
878 const auto &AuthInfo =
879 CGM.getMemberFunctionPointerAuthInfo(FT: QualType(MPT, 0));
880 assert(Schema.getKey() == AuthInfo.getKey() &&
881 "Keys for virtual and non-virtual member functions must match");
882 auto *NonVirtualDiscriminator = AuthInfo.getDiscriminator();
883 DiscriminatorPHI->addIncoming(V: NonVirtualDiscriminator, BB: FnNonVirtual);
884 PointerAuth = CGPointerAuthInfo(
885 Schema.getKey(), Schema.getAuthenticationMode(), Schema.isIsaPointer(),
886 Schema.authenticatesNullValues(), DiscriminatorPHI);
887 }
888
889 CGCallee Callee(FPT, CalleePtr, PointerAuth);
890 return Callee;
891}
892
893/// Compute an l-value by applying the given pointer-to-member to a
894/// base object.
895llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
896 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
897 const MemberPointerType *MPT, bool IsInBounds) {
898 assert(MemPtr->getType() == CGM.PtrDiffTy);
899
900 CGBuilderTy &Builder = CGF.Builder;
901
902 // Apply the offset.
903 llvm::Value *BaseAddr = Base.emitRawPointer(CGF);
904 return Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: BaseAddr, IdxList: MemPtr, Name: "memptr.offset",
905 NW: IsInBounds ? llvm::GEPNoWrapFlags::inBounds()
906 : llvm::GEPNoWrapFlags::none());
907}
908
909// See if it's possible to return a constant signed pointer.
910static llvm::Constant *pointerAuthResignConstant(
911 llvm::Value *Ptr, const CGPointerAuthInfo &CurAuthInfo,
912 const CGPointerAuthInfo &NewAuthInfo, CodeGenModule &CGM) {
913 const auto *CPA = dyn_cast<llvm::ConstantPtrAuth>(Val: Ptr);
914
915 if (!CPA)
916 return nullptr;
917
918 assert(CPA->getKey()->getZExtValue() == CurAuthInfo.getKey() &&
919 CPA->getAddrDiscriminator()->isNullValue() &&
920 CPA->getDiscriminator() == CurAuthInfo.getDiscriminator() &&
921 "unexpected key or discriminators");
922
923 return CGM.getConstantSignedPointer(
924 Pointer: CPA->getPointer(), Key: NewAuthInfo.getKey(), StorageAddress: nullptr,
925 OtherDiscriminator: cast<llvm::ConstantInt>(Val: NewAuthInfo.getDiscriminator()));
926}
927
928/// Perform a bitcast, derived-to-base, or base-to-derived member pointer
929/// conversion.
930///
931/// Bitcast conversions are always a no-op under Itanium.
932///
933/// Obligatory offset/adjustment diagram:
934/// <-- offset --> <-- adjustment -->
935/// |--------------------------|----------------------|--------------------|
936/// ^Derived address point ^Base address point ^Member address point
937///
938/// So when converting a base member pointer to a derived member pointer,
939/// we add the offset to the adjustment because the address point has
940/// decreased; and conversely, when converting a derived MP to a base MP
941/// we subtract the offset from the adjustment because the address point
942/// has increased.
943///
944/// The standard forbids (at compile time) conversion to and from
945/// virtual bases, which is why we don't have to consider them here.
946///
947/// The standard forbids (at run time) casting a derived MP to a base
948/// MP when the derived MP does not point to a member of the base.
949/// This is why -1 is a reasonable choice for null data member
950/// pointers.
951llvm::Value *
952ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
953 const CastExpr *E,
954 llvm::Value *src) {
955 // Use constant emission if we can.
956 if (isa<llvm::Constant>(Val: src))
957 return EmitMemberPointerConversion(E, Src: cast<llvm::Constant>(Val: src));
958
959 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
960 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
961 E->getCastKind() == CK_ReinterpretMemberPointer);
962
963 CGBuilderTy &Builder = CGF.Builder;
964 QualType DstType = E->getType();
965
966 if (DstType->isMemberFunctionPointerType()) {
967 if (const auto &NewAuthInfo =
968 CGM.getMemberFunctionPointerAuthInfo(FT: DstType)) {
969 QualType SrcType = E->getSubExpr()->getType();
970 assert(SrcType->isMemberFunctionPointerType());
971 const auto &CurAuthInfo = CGM.getMemberFunctionPointerAuthInfo(FT: SrcType);
972 llvm::Value *MemFnPtr = Builder.CreateExtractValue(Agg: src, Idxs: 0, Name: "memptr.ptr");
973 llvm::Type *OrigTy = MemFnPtr->getType();
974
975 llvm::BasicBlock *StartBB = Builder.GetInsertBlock();
976 llvm::BasicBlock *ResignBB = CGF.createBasicBlock(name: "resign");
977 llvm::BasicBlock *MergeBB = CGF.createBasicBlock(name: "merge");
978
979 // Check whether we have a virtual offset or a pointer to a function.
980 assert(UseARMMethodPtrABI && "ARM ABI expected");
981 llvm::Value *Adj = Builder.CreateExtractValue(Agg: src, Idxs: 1, Name: "memptr.adj");
982 llvm::Constant *Ptrdiff_1 = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: 1);
983 llvm::Value *AndVal = Builder.CreateAnd(LHS: Adj, RHS: Ptrdiff_1);
984 llvm::Value *IsVirtualOffset =
985 Builder.CreateIsNotNull(Arg: AndVal, Name: "is.virtual.offset");
986 Builder.CreateCondBr(Cond: IsVirtualOffset, True: MergeBB, False: ResignBB);
987
988 CGF.EmitBlock(BB: ResignBB);
989 llvm::Type *PtrTy = llvm::PointerType::getUnqual(C&: CGM.getLLVMContext());
990 MemFnPtr = Builder.CreateIntToPtr(V: MemFnPtr, DestTy: PtrTy);
991 MemFnPtr =
992 CGF.emitPointerAuthResign(Pointer: MemFnPtr, PointerType: SrcType, CurAuthInfo, NewAuthInfo,
993 IsKnownNonNull: isa<llvm::Constant>(Val: src));
994 MemFnPtr = Builder.CreatePtrToInt(V: MemFnPtr, DestTy: OrigTy);
995 llvm::Value *ResignedVal = Builder.CreateInsertValue(Agg: src, Val: MemFnPtr, Idxs: 0);
996 ResignBB = Builder.GetInsertBlock();
997
998 CGF.EmitBlock(BB: MergeBB);
999 llvm::PHINode *NewSrc = Builder.CreatePHI(Ty: src->getType(), NumReservedValues: 2);
1000 NewSrc->addIncoming(V: src, BB: StartBB);
1001 NewSrc->addIncoming(V: ResignedVal, BB: ResignBB);
1002 src = NewSrc;
1003 }
1004 }
1005
1006 // Under Itanium, reinterprets don't require any additional processing.
1007 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
1008
1009 llvm::Constant *adj = getMemberPointerAdjustment(E);
1010 if (!adj) return src;
1011
1012 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
1013
1014 const MemberPointerType *destTy =
1015 E->getType()->castAs<MemberPointerType>();
1016
1017 // For member data pointers, this is just a matter of adding the
1018 // offset if the source is non-null.
1019 if (destTy->isMemberDataPointer()) {
1020 llvm::Value *dst;
1021 if (isDerivedToBase)
1022 dst = Builder.CreateNSWSub(LHS: src, RHS: adj, Name: "adj");
1023 else
1024 dst = Builder.CreateNSWAdd(LHS: src, RHS: adj, Name: "adj");
1025
1026 // Null check.
1027 llvm::Value *null = llvm::Constant::getAllOnesValue(Ty: src->getType());
1028 llvm::Value *isNull = Builder.CreateICmpEQ(LHS: src, RHS: null, Name: "memptr.isnull");
1029 return Builder.CreateSelect(C: isNull, True: src, False: dst);
1030 }
1031
1032 // The this-adjustment is left-shifted by 1 on ARM.
1033 if (UseARMMethodPtrABI) {
1034 uint64_t offset = cast<llvm::ConstantInt>(Val: adj)->getZExtValue();
1035 offset <<= 1;
1036 adj = llvm::ConstantInt::get(Ty: adj->getType(), V: offset);
1037 }
1038
1039 llvm::Value *srcAdj = Builder.CreateExtractValue(Agg: src, Idxs: 1, Name: "src.adj");
1040 llvm::Value *dstAdj;
1041 if (isDerivedToBase)
1042 dstAdj = Builder.CreateNSWSub(LHS: srcAdj, RHS: adj, Name: "adj");
1043 else
1044 dstAdj = Builder.CreateNSWAdd(LHS: srcAdj, RHS: adj, Name: "adj");
1045
1046 return Builder.CreateInsertValue(Agg: src, Val: dstAdj, Idxs: 1);
1047}
1048
1049static llvm::Constant *
1050pointerAuthResignMemberFunctionPointer(llvm::Constant *Src, QualType DestType,
1051 QualType SrcType, CodeGenModule &CGM) {
1052 assert(DestType->isMemberFunctionPointerType() &&
1053 SrcType->isMemberFunctionPointerType() &&
1054 "member function pointers expected");
1055 if (DestType == SrcType)
1056 return Src;
1057
1058 const auto &NewAuthInfo = CGM.getMemberFunctionPointerAuthInfo(FT: DestType);
1059 const auto &CurAuthInfo = CGM.getMemberFunctionPointerAuthInfo(FT: SrcType);
1060
1061 if (!NewAuthInfo && !CurAuthInfo)
1062 return Src;
1063
1064 llvm::Constant *MemFnPtr = Src->getAggregateElement(Elt: 0u);
1065 if (MemFnPtr->getNumOperands() == 0) {
1066 // src must be a pair of null pointers.
1067 assert(isa<llvm::ConstantInt>(MemFnPtr) && "constant int expected");
1068 return Src;
1069 }
1070
1071 llvm::Constant *ConstPtr = pointerAuthResignConstant(
1072 Ptr: cast<llvm::User>(Val: MemFnPtr)->getOperand(i: 0), CurAuthInfo, NewAuthInfo, CGM);
1073 ConstPtr = llvm::ConstantExpr::getPtrToInt(C: ConstPtr, Ty: MemFnPtr->getType());
1074 return ConstantFoldInsertValueInstruction(Agg: Src, Val: ConstPtr, Idxs: 0);
1075}
1076
1077llvm::Constant *
1078ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
1079 llvm::Constant *src) {
1080 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
1081 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
1082 E->getCastKind() == CK_ReinterpretMemberPointer);
1083
1084 QualType DstType = E->getType();
1085
1086 if (DstType->isMemberFunctionPointerType())
1087 src = pointerAuthResignMemberFunctionPointer(
1088 Src: src, DestType: DstType, SrcType: E->getSubExpr()->getType(), CGM);
1089
1090 // Under Itanium, reinterprets don't require any additional processing.
1091 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
1092
1093 // If the adjustment is trivial, we don't need to do anything.
1094 llvm::Constant *adj = getMemberPointerAdjustment(E);
1095 if (!adj) return src;
1096
1097 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
1098
1099 const MemberPointerType *destTy =
1100 E->getType()->castAs<MemberPointerType>();
1101
1102 // For member data pointers, this is just a matter of adding the
1103 // offset if the source is non-null.
1104 if (destTy->isMemberDataPointer()) {
1105 // null maps to null.
1106 if (src->isAllOnesValue()) return src;
1107
1108 if (isDerivedToBase)
1109 return llvm::ConstantExpr::getNSWSub(C1: src, C2: adj);
1110 else
1111 return llvm::ConstantExpr::getNSWAdd(C1: src, C2: adj);
1112 }
1113
1114 // The this-adjustment is left-shifted by 1 on ARM.
1115 if (UseARMMethodPtrABI) {
1116 uint64_t offset = cast<llvm::ConstantInt>(Val: adj)->getZExtValue();
1117 offset <<= 1;
1118 adj = llvm::ConstantInt::get(Ty: adj->getType(), V: offset);
1119 }
1120
1121 llvm::Constant *srcAdj = src->getAggregateElement(Elt: 1);
1122 llvm::Constant *dstAdj;
1123 if (isDerivedToBase)
1124 dstAdj = llvm::ConstantExpr::getNSWSub(C1: srcAdj, C2: adj);
1125 else
1126 dstAdj = llvm::ConstantExpr::getNSWAdd(C1: srcAdj, C2: adj);
1127
1128 llvm::Constant *res = ConstantFoldInsertValueInstruction(Agg: src, Val: dstAdj, Idxs: 1);
1129 assert(res != nullptr && "Folding must succeed");
1130 return res;
1131}
1132
1133llvm::Constant *
1134ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
1135 // Itanium C++ ABI 2.3:
1136 // A NULL pointer is represented as -1.
1137 if (MPT->isMemberDataPointer())
1138 return llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: -1ULL, /*isSigned=*/IsSigned: true);
1139
1140 llvm::Constant *Zero = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: 0);
1141 llvm::Constant *Values[2] = { Zero, Zero };
1142 return llvm::ConstantStruct::getAnon(V: Values);
1143}
1144
1145llvm::Constant *
1146ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
1147 CharUnits offset) {
1148 // Itanium C++ ABI 2.3:
1149 // A pointer to data member is an offset from the base address of
1150 // the class object containing it, represented as a ptrdiff_t
1151 return llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: offset.getQuantity());
1152}
1153
1154llvm::Constant *
1155ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
1156 return BuildMemberPointer(MD, ThisAdjustment: CharUnits::Zero());
1157}
1158
1159llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
1160 CharUnits ThisAdjustment) {
1161 assert(MD->isInstance() && "Member function must not be static!");
1162
1163 CodeGenTypes &Types = CGM.getTypes();
1164
1165 // Get the function pointer (or index if this is a virtual function).
1166 llvm::Constant *MemPtr[2];
1167 if (MD->isVirtual()) {
1168 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(GD: MD);
1169 uint64_t VTableOffset;
1170 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1171 // Multiply by 4-byte relative offsets.
1172 VTableOffset = Index * 4;
1173 } else {
1174 const ASTContext &Context = getContext();
1175 CharUnits PointerWidth = Context.toCharUnitsFromBits(
1176 BitSize: Context.getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default));
1177 VTableOffset = Index * PointerWidth.getQuantity();
1178 }
1179
1180 if (UseARMMethodPtrABI) {
1181 // ARM C++ ABI 3.2.1:
1182 // This ABI specifies that adj contains twice the this
1183 // adjustment, plus 1 if the member function is virtual. The
1184 // least significant bit of adj then makes exactly the same
1185 // discrimination as the least significant bit of ptr does for
1186 // Itanium.
1187
1188 // We cannot use the Itanium ABI's representation for virtual member
1189 // function pointers under pointer authentication because it would
1190 // require us to store both the virtual offset and the constant
1191 // discriminator in the pointer, which would be immediately vulnerable
1192 // to attack. Instead we introduce a thunk that does the virtual dispatch
1193 // and store it as if it were a non-virtual member function. This means
1194 // that virtual function pointers may not compare equal anymore, but
1195 // fortunately they aren't required to by the standard, and we do make
1196 // a best-effort attempt to re-use the thunk.
1197 //
1198 // To support interoperation with code in which pointer authentication
1199 // is disabled, derefencing a member function pointer must still handle
1200 // the virtual case, but it can use a discriminator which should never
1201 // be valid.
1202 const auto &Schema =
1203 CGM.getCodeGenOpts().PointerAuth.CXXMemberFunctionPointers;
1204 if (Schema)
1205 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(
1206 C: getSignedVirtualMemberFunctionPointer(MD), Ty: CGM.PtrDiffTy);
1207 else
1208 MemPtr[0] = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: VTableOffset);
1209 // Don't set the LSB of adj to 1 if pointer authentication for member
1210 // function pointers is enabled.
1211 MemPtr[1] = llvm::ConstantInt::get(
1212 Ty: CGM.PtrDiffTy, V: 2 * ThisAdjustment.getQuantity() + !Schema);
1213 } else {
1214 // Itanium C++ ABI 2.3:
1215 // For a virtual function, [the pointer field] is 1 plus the
1216 // virtual table offset (in bytes) of the function,
1217 // represented as a ptrdiff_t.
1218 MemPtr[0] = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: VTableOffset + 1);
1219 MemPtr[1] = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy,
1220 V: ThisAdjustment.getQuantity());
1221 }
1222 } else {
1223 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1224 llvm::Type *Ty;
1225 // Check whether the function has a computable LLVM signature.
1226 if (Types.isFuncTypeConvertible(FT: FPT)) {
1227 // The function has a computable LLVM signature; use the correct type.
1228 Ty = Types.GetFunctionType(Info: Types.arrangeCXXMethodDeclaration(MD));
1229 } else {
1230 // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1231 // function type is incomplete.
1232 Ty = CGM.PtrDiffTy;
1233 }
1234 llvm::Constant *addr = CGM.getMemberFunctionPointer(FD: MD, Ty);
1235
1236 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(C: addr, Ty: CGM.PtrDiffTy);
1237 MemPtr[1] = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy,
1238 V: (UseARMMethodPtrABI ? 2 : 1) *
1239 ThisAdjustment.getQuantity());
1240 }
1241
1242 return llvm::ConstantStruct::getAnon(V: MemPtr);
1243}
1244
1245llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1246 QualType MPType) {
1247 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1248 const ValueDecl *MPD = MP.getMemberPointerDecl();
1249 if (!MPD)
1250 return EmitNullMemberPointer(MPT);
1251
1252 CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
1253
1254 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: MPD)) {
1255 llvm::Constant *Src = BuildMemberPointer(MD, ThisAdjustment);
1256 QualType SrcType = getContext().getMemberPointerType(
1257 T: MD->getType(), /*Qualifier=*/std::nullopt, Cls: MD->getParent());
1258 return pointerAuthResignMemberFunctionPointer(Src, DestType: MPType, SrcType, CGM);
1259 }
1260
1261 getContext().recordMemberDataPointerEvaluation(VD: MPD);
1262 CharUnits FieldOffset =
1263 getContext().toCharUnitsFromBits(BitSize: getContext().getFieldOffset(FD: MPD));
1264 return EmitMemberDataPointer(MPT, offset: ThisAdjustment + FieldOffset);
1265}
1266
1267/// The comparison algorithm is pretty easy: the member pointers are
1268/// the same if they're either bitwise identical *or* both null.
1269///
1270/// ARM is different here only because null-ness is more complicated.
1271llvm::Value *
1272ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1273 llvm::Value *L,
1274 llvm::Value *R,
1275 const MemberPointerType *MPT,
1276 bool Inequality) {
1277 CGBuilderTy &Builder = CGF.Builder;
1278
1279 llvm::ICmpInst::Predicate Eq;
1280 llvm::Instruction::BinaryOps And, Or;
1281 if (Inequality) {
1282 Eq = llvm::ICmpInst::ICMP_NE;
1283 And = llvm::Instruction::Or;
1284 Or = llvm::Instruction::And;
1285 } else {
1286 Eq = llvm::ICmpInst::ICMP_EQ;
1287 And = llvm::Instruction::And;
1288 Or = llvm::Instruction::Or;
1289 }
1290
1291 // Member data pointers are easy because there's a unique null
1292 // value, so it just comes down to bitwise equality.
1293 if (MPT->isMemberDataPointer())
1294 return Builder.CreateICmp(P: Eq, LHS: L, RHS: R);
1295
1296 // For member function pointers, the tautologies are more complex.
1297 // The Itanium tautology is:
1298 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1299 // The ARM tautology is:
1300 // (L == R) <==> (L.ptr == R.ptr &&
1301 // (L.adj == R.adj ||
1302 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1303 // The inequality tautologies have exactly the same structure, except
1304 // applying De Morgan's laws.
1305
1306 llvm::Value *LPtr = Builder.CreateExtractValue(Agg: L, Idxs: 0, Name: "lhs.memptr.ptr");
1307 llvm::Value *RPtr = Builder.CreateExtractValue(Agg: R, Idxs: 0, Name: "rhs.memptr.ptr");
1308
1309 // This condition tests whether L.ptr == R.ptr. This must always be
1310 // true for equality to hold.
1311 llvm::Value *PtrEq = Builder.CreateICmp(P: Eq, LHS: LPtr, RHS: RPtr, Name: "cmp.ptr");
1312
1313 // This condition, together with the assumption that L.ptr == R.ptr,
1314 // tests whether the pointers are both null. ARM imposes an extra
1315 // condition.
1316 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: LPtr->getType());
1317 llvm::Value *EqZero = Builder.CreateICmp(P: Eq, LHS: LPtr, RHS: Zero, Name: "cmp.ptr.null");
1318
1319 // This condition tests whether L.adj == R.adj. If this isn't
1320 // true, the pointers are unequal unless they're both null.
1321 llvm::Value *LAdj = Builder.CreateExtractValue(Agg: L, Idxs: 1, Name: "lhs.memptr.adj");
1322 llvm::Value *RAdj = Builder.CreateExtractValue(Agg: R, Idxs: 1, Name: "rhs.memptr.adj");
1323 llvm::Value *AdjEq = Builder.CreateICmp(P: Eq, LHS: LAdj, RHS: RAdj, Name: "cmp.adj");
1324
1325 // Null member function pointers on ARM clear the low bit of Adj,
1326 // so the zero condition has to check that neither low bit is set.
1327 if (UseARMMethodPtrABI) {
1328 llvm::Value *One = llvm::ConstantInt::get(Ty: LPtr->getType(), V: 1);
1329
1330 // Compute (l.adj | r.adj) & 1 and test it against zero.
1331 llvm::Value *OrAdj = Builder.CreateOr(LHS: LAdj, RHS: RAdj, Name: "or.adj");
1332 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(LHS: OrAdj, RHS: One);
1333 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(P: Eq, LHS: OrAdjAnd1, RHS: Zero,
1334 Name: "cmp.or.adj");
1335 EqZero = Builder.CreateBinOp(Opc: And, LHS: EqZero, RHS: OrAdjAnd1EqZero);
1336 }
1337
1338 // Tie together all our conditions.
1339 llvm::Value *Result = Builder.CreateBinOp(Opc: Or, LHS: EqZero, RHS: AdjEq);
1340 Result = Builder.CreateBinOp(Opc: And, LHS: PtrEq, RHS: Result,
1341 Name: Inequality ? "memptr.ne" : "memptr.eq");
1342 return Result;
1343}
1344
1345llvm::Value *
1346ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1347 llvm::Value *MemPtr,
1348 const MemberPointerType *MPT) {
1349 CGBuilderTy &Builder = CGF.Builder;
1350
1351 /// For member data pointers, this is just a check against -1.
1352 if (MPT->isMemberDataPointer()) {
1353 assert(MemPtr->getType() == CGM.PtrDiffTy);
1354 llvm::Value *NegativeOne =
1355 llvm::Constant::getAllOnesValue(Ty: MemPtr->getType());
1356 return Builder.CreateICmpNE(LHS: MemPtr, RHS: NegativeOne, Name: "memptr.tobool");
1357 }
1358
1359 // In Itanium, a member function pointer is not null if 'ptr' is not null.
1360 llvm::Value *Ptr = Builder.CreateExtractValue(Agg: MemPtr, Idxs: 0, Name: "memptr.ptr");
1361
1362 llvm::Constant *Zero = llvm::ConstantInt::get(Ty: Ptr->getType(), V: 0);
1363 llvm::Value *Result = Builder.CreateICmpNE(LHS: Ptr, RHS: Zero, Name: "memptr.tobool");
1364
1365 // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1366 // (the virtual bit) is set.
1367 if (UseARMMethodPtrABI) {
1368 llvm::Constant *One = llvm::ConstantInt::get(Ty: Ptr->getType(), V: 1);
1369 llvm::Value *Adj = Builder.CreateExtractValue(Agg: MemPtr, Idxs: 1, Name: "memptr.adj");
1370 llvm::Value *VirtualBit = Builder.CreateAnd(LHS: Adj, RHS: One, Name: "memptr.virtualbit");
1371 llvm::Value *IsVirtual = Builder.CreateICmpNE(LHS: VirtualBit, RHS: Zero,
1372 Name: "memptr.isvirtual");
1373 Result = Builder.CreateOr(LHS: Result, RHS: IsVirtual);
1374 }
1375
1376 return Result;
1377}
1378
1379bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1380 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1381 if (!RD)
1382 return false;
1383
1384 // If C++ prohibits us from making a copy, return by address.
1385 if (!RD->canPassInRegisters()) {
1386 auto Align = CGM.getContext().getTypeAlignInChars(T: FI.getReturnType());
1387 FI.getReturnInfo() = ABIArgInfo::getIndirect(
1388 Alignment: Align, /*AddrSpace=*/CGM.getDataLayout().getAllocaAddrSpace(),
1389 /*ByVal=*/false);
1390 return true;
1391 }
1392 return false;
1393}
1394
1395/// The Itanium ABI requires non-zero initialization only for data
1396/// member pointers, for which '0' is a valid offset.
1397bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1398 return MPT->isMemberFunctionPointer();
1399}
1400
1401/// The Itanium ABI always places an offset to the complete object
1402/// at entry -2 in the vtable.
1403void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1404 const CXXDeleteExpr *DE,
1405 Address Ptr,
1406 QualType ElementType,
1407 const CXXDestructorDecl *Dtor) {
1408 bool UseGlobalDelete = DE->isGlobalDelete();
1409 if (UseGlobalDelete) {
1410 // Derive the complete-object pointer, which is what we need
1411 // to pass to the deallocation function.
1412
1413 // Grab the vtable pointer as an intptr_t*.
1414 auto *ClassDecl = ElementType->castAsCXXRecordDecl();
1415 llvm::Value *VTable = CGF.GetVTablePtr(This: Ptr, VTableTy: CGF.DefaultPtrTy, VTableClass: ClassDecl);
1416
1417 // Track back to entry -2 and pull out the offset there.
1418 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1419 Ty: CGF.IntPtrTy, Ptr: VTable, Idx0: -2, Name: "complete-offset.ptr");
1420 llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(Ty: CGF.IntPtrTy, Addr: OffsetPtr,
1421 Align: CGF.getPointerAlign());
1422
1423 // Apply the offset.
1424 llvm::Value *CompletePtr = Ptr.emitRawPointer(CGF);
1425 CompletePtr =
1426 CGF.Builder.CreateInBoundsGEP(Ty: CGF.Int8Ty, Ptr: CompletePtr, IdxList: Offset);
1427
1428 // If we're supposed to call the global delete, make sure we do so
1429 // even if the destructor throws.
1430 CGF.pushCallObjectDeleteCleanup(OperatorDelete: DE->getOperatorDelete(), CompletePtr,
1431 ElementType);
1432 }
1433
1434 // FIXME: Provide a source location here even though there's no
1435 // CXXMemberCallExpr for dtor call.
1436 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1437 EmitVirtualDestructorCall(CGF, Dtor, DtorType, This: Ptr, E: DE,
1438 /*CallOrInvoke=*/nullptr);
1439
1440 if (UseGlobalDelete)
1441 CGF.PopCleanupBlock();
1442}
1443
1444void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1445 // void __cxa_rethrow();
1446
1447 llvm::FunctionType *FTy =
1448 llvm::FunctionType::get(Result: CGM.VoidTy, /*isVarArg=*/false);
1449
1450 llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_rethrow");
1451
1452 if (isNoReturn)
1453 CGF.EmitNoreturnRuntimeCallOrInvoke(callee: Fn, args: {});
1454 else
1455 CGF.EmitRuntimeCallOrInvoke(callee: Fn);
1456}
1457
1458static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1459 // void *__cxa_allocate_exception(size_t thrown_size);
1460
1461 llvm::FunctionType *FTy =
1462 llvm::FunctionType::get(Result: CGM.Int8PtrTy, Params: CGM.SizeTy, /*isVarArg=*/false);
1463
1464 return CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_allocate_exception");
1465}
1466
1467static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1468 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1469 // void (*dest) (void *));
1470
1471 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.GlobalsInt8PtrTy, CGM.Int8PtrTy };
1472 llvm::FunctionType *FTy =
1473 llvm::FunctionType::get(Result: CGM.VoidTy, Params: Args, /*isVarArg=*/false);
1474
1475 return CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_throw");
1476}
1477
1478void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1479 QualType ThrowType = E->getSubExpr()->getType();
1480 // Now allocate the exception object.
1481 llvm::Type *SizeTy = CGF.ConvertType(T: getContext().getSizeType());
1482 uint64_t TypeSize = getContext().getTypeSizeInChars(T: ThrowType).getQuantity();
1483
1484 llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1485 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1486 callee: AllocExceptionFn, args: llvm::ConstantInt::get(Ty: SizeTy, V: TypeSize), name: "exception");
1487
1488 CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1489 CGF.EmitAnyExprToExn(
1490 E: E->getSubExpr(), Addr: Address(ExceptionPtr, CGM.Int8Ty, ExnAlign));
1491
1492 // Now throw the exception.
1493 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(Ty: ThrowType,
1494 /*ForEH=*/true);
1495
1496 // The address of the destructor. If the exception type has a
1497 // trivial destructor (or isn't a record), we just pass null.
1498 llvm::Constant *Dtor = nullptr;
1499 if (const auto *Record = ThrowType->getAsCXXRecordDecl();
1500 Record && !Record->hasTrivialDestructor()) {
1501 // __cxa_throw is declared to take its destructor as void (*)(void *). We
1502 // must match that if function pointers can be authenticated with a
1503 // discriminator based on their type.
1504 const ASTContext &Ctx = getContext();
1505 QualType DtorTy = Ctx.getFunctionType(ResultTy: Ctx.VoidTy, Args: {Ctx.VoidPtrTy},
1506 EPI: FunctionProtoType::ExtProtoInfo());
1507
1508 CXXDestructorDecl *DtorD = Record->getDestructor();
1509 Dtor = CGM.getAddrOfCXXStructor(GD: GlobalDecl(DtorD, Dtor_Complete));
1510 Dtor = CGM.getFunctionPointer(Pointer: Dtor, FunctionType: DtorTy);
1511 }
1512 if (!Dtor) Dtor = llvm::Constant::getNullValue(Ty: CGM.Int8PtrTy);
1513
1514 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1515 CGF.EmitNoreturnRuntimeCallOrInvoke(callee: getThrowFn(CGM), args);
1516}
1517
1518static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1519 // void *__dynamic_cast(const void *sub,
1520 // global_as const abi::__class_type_info *src,
1521 // global_as const abi::__class_type_info *dst,
1522 // std::ptrdiff_t src2dst_offset);
1523
1524 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1525 llvm::Type *GlobInt8PtrTy = CGF.GlobalsInt8PtrTy;
1526 llvm::Type *PtrDiffTy =
1527 CGF.ConvertType(T: CGF.getContext().getPointerDiffType());
1528
1529 llvm::Type *Args[4] = { Int8PtrTy, GlobInt8PtrTy, GlobInt8PtrTy, PtrDiffTy };
1530
1531 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: Int8PtrTy, Params: Args, isVarArg: false);
1532
1533 // Mark the function as nounwind willreturn readonly.
1534 llvm::AttrBuilder FuncAttrs(CGF.getLLVMContext());
1535 FuncAttrs.addAttribute(Val: llvm::Attribute::NoUnwind);
1536 FuncAttrs.addAttribute(Val: llvm::Attribute::WillReturn);
1537 FuncAttrs.addMemoryAttr(ME: llvm::MemoryEffects::readOnly());
1538 llvm::AttributeList Attrs = llvm::AttributeList::get(
1539 C&: CGF.getLLVMContext(), Index: llvm::AttributeList::FunctionIndex, B: FuncAttrs);
1540
1541 return CGF.CGM.CreateRuntimeFunction(Ty: FTy, Name: "__dynamic_cast", ExtraAttrs: Attrs);
1542}
1543
1544static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1545 // void __cxa_bad_cast();
1546 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: CGF.VoidTy, isVarArg: false);
1547 return CGF.CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_bad_cast");
1548}
1549
1550/// Compute the src2dst_offset hint as described in the
1551/// Itanium C++ ABI [2.9.7]
1552static CharUnits computeOffsetHint(ASTContext &Context,
1553 const CXXRecordDecl *Src,
1554 const CXXRecordDecl *Dst) {
1555 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1556 /*DetectVirtual=*/false);
1557
1558 // If Dst is not derived from Src we can skip the whole computation below and
1559 // return that Src is not a public base of Dst. Record all inheritance paths.
1560 if (!Dst->isDerivedFrom(Base: Src, Paths))
1561 return CharUnits::fromQuantity(Quantity: -2ULL);
1562
1563 unsigned NumPublicPaths = 0;
1564 CharUnits Offset;
1565
1566 // Now walk all possible inheritance paths.
1567 for (const CXXBasePath &Path : Paths) {
1568 if (Path.Access != AS_public) // Ignore non-public inheritance.
1569 continue;
1570
1571 ++NumPublicPaths;
1572
1573 for (const CXXBasePathElement &PathElement : Path) {
1574 // If the path contains a virtual base class we can't give any hint.
1575 // -1: no hint.
1576 if (PathElement.Base->isVirtual())
1577 return CharUnits::fromQuantity(Quantity: -1ULL);
1578
1579 if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1580 continue;
1581
1582 // Accumulate the base class offsets.
1583 const ASTRecordLayout &L = Context.getASTRecordLayout(D: PathElement.Class);
1584 Offset += L.getBaseClassOffset(
1585 Base: PathElement.Base->getType()->getAsCXXRecordDecl());
1586 }
1587 }
1588
1589 // -2: Src is not a public base of Dst.
1590 if (NumPublicPaths == 0)
1591 return CharUnits::fromQuantity(Quantity: -2ULL);
1592
1593 // -3: Src is a multiple public base type but never a virtual base type.
1594 if (NumPublicPaths > 1)
1595 return CharUnits::fromQuantity(Quantity: -3ULL);
1596
1597 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1598 // Return the offset of Src from the origin of Dst.
1599 return Offset;
1600}
1601
1602static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1603 // void __cxa_bad_typeid();
1604 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: CGF.VoidTy, isVarArg: false);
1605
1606 return CGF.CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_bad_typeid");
1607}
1608
1609bool ItaniumCXXABI::shouldTypeidBeNullChecked(QualType SrcRecordTy) {
1610 return true;
1611}
1612
1613void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1614 llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1615 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(callee: Fn);
1616 Call->setDoesNotReturn();
1617 CGF.Builder.CreateUnreachable();
1618}
1619
1620llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1621 QualType SrcRecordTy,
1622 Address ThisPtr,
1623 llvm::Type *StdTypeInfoPtrTy) {
1624 auto *ClassDecl = SrcRecordTy->castAsCXXRecordDecl();
1625 llvm::Value *Value = CGF.GetVTablePtr(This: ThisPtr, VTableTy: CGM.GlobalsInt8PtrTy,
1626 VTableClass: ClassDecl);
1627
1628 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1629 // Load the type info.
1630 Value = CGF.Builder.CreateCall(
1631 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::load_relative, Tys: {CGM.Int32Ty}),
1632 Args: {Value, llvm::ConstantInt::getSigned(Ty: CGM.Int32Ty, V: -4)});
1633 } else {
1634 // Load the type info.
1635 Value =
1636 CGF.Builder.CreateConstInBoundsGEP1_64(Ty: StdTypeInfoPtrTy, Ptr: Value, Idx0: -1ULL);
1637 }
1638 return CGF.Builder.CreateAlignedLoad(Ty: StdTypeInfoPtrTy, Addr: Value,
1639 Align: CGF.getPointerAlign());
1640}
1641
1642bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1643 QualType SrcRecordTy) {
1644 return SrcIsPtr;
1645}
1646
1647llvm::Value *ItaniumCXXABI::emitDynamicCastCall(
1648 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1649 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1650 llvm::Type *PtrDiffLTy =
1651 CGF.ConvertType(T: CGF.getContext().getPointerDiffType());
1652
1653 llvm::Value *SrcRTTI =
1654 CGF.CGM.GetAddrOfRTTIDescriptor(Ty: SrcRecordTy.getUnqualifiedType());
1655 llvm::Value *DestRTTI =
1656 CGF.CGM.GetAddrOfRTTIDescriptor(Ty: DestRecordTy.getUnqualifiedType());
1657
1658 // Compute the offset hint.
1659 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1660 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1661 llvm::Value *OffsetHint = llvm::ConstantInt::getSigned(
1662 Ty: PtrDiffLTy,
1663 V: computeOffsetHint(Context&: CGF.getContext(), Src: SrcDecl, Dst: DestDecl).getQuantity());
1664
1665 // Emit the call to __dynamic_cast.
1666 llvm::Value *Value = ThisAddr.emitRawPointer(CGF);
1667 if (CGM.getCodeGenOpts().PointerAuth.CXXVTablePointers) {
1668 // We perform a no-op load of the vtable pointer here to force an
1669 // authentication. In environments that do not support pointer
1670 // authentication this is a an actual no-op that will be elided. When
1671 // pointer authentication is supported and enforced on vtable pointers this
1672 // load can trap.
1673 llvm::Value *Vtable =
1674 CGF.GetVTablePtr(This: ThisAddr, VTableTy: CGM.Int8PtrTy, VTableClass: SrcDecl,
1675 AuthMode: CodeGenFunction::VTableAuthMode::MustTrap);
1676 assert(Vtable);
1677 (void)Vtable;
1678 }
1679
1680 llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1681 Value = CGF.EmitNounwindRuntimeCall(callee: getItaniumDynamicCastFn(CGF), args);
1682
1683 /// C++ [expr.dynamic.cast]p9:
1684 /// A failed cast to reference type throws std::bad_cast
1685 if (DestTy->isReferenceType()) {
1686 llvm::BasicBlock *BadCastBlock =
1687 CGF.createBasicBlock(name: "dynamic_cast.bad_cast");
1688
1689 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Arg: Value);
1690 CGF.Builder.CreateCondBr(Cond: IsNull, True: BadCastBlock, False: CastEnd);
1691
1692 CGF.EmitBlock(BB: BadCastBlock);
1693 EmitBadCastCall(CGF);
1694 }
1695
1696 return Value;
1697}
1698
1699std::optional<CGCXXABI::ExactDynamicCastInfo>
1700ItaniumCXXABI::getExactDynamicCastInfo(QualType SrcRecordTy, QualType DestTy,
1701 QualType DestRecordTy) {
1702 assert(shouldEmitExactDynamicCast(DestRecordTy));
1703
1704 ASTContext &Context = getContext();
1705
1706 // Find all the inheritance paths.
1707 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1708 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1709 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1710 /*DetectVirtual=*/false);
1711 (void)DestDecl->isDerivedFrom(Base: SrcDecl, Paths);
1712
1713 // Find an offset within `DestDecl` where a `SrcDecl` instance and its vptr
1714 // might appear.
1715 std::optional<CharUnits> Offset;
1716 for (const CXXBasePath &Path : Paths) {
1717 // dynamic_cast only finds public inheritance paths.
1718 if (Path.Access != AS_public)
1719 continue;
1720
1721 CharUnits PathOffset;
1722 for (const CXXBasePathElement &PathElement : Path) {
1723 // Find the offset along this inheritance step.
1724 const CXXRecordDecl *Base =
1725 PathElement.Base->getType()->getAsCXXRecordDecl();
1726 if (PathElement.Base->isVirtual()) {
1727 // For a virtual base class, we know that the derived class is exactly
1728 // DestDecl, so we can use the vbase offset from its layout.
1729 const ASTRecordLayout &L = Context.getASTRecordLayout(D: DestDecl);
1730 PathOffset = L.getVBaseClassOffset(VBase: Base);
1731 } else {
1732 const ASTRecordLayout &L =
1733 Context.getASTRecordLayout(D: PathElement.Class);
1734 PathOffset += L.getBaseClassOffset(Base);
1735 }
1736 }
1737
1738 if (!Offset)
1739 Offset = PathOffset;
1740 else if (Offset != PathOffset) {
1741 // Base appears in at least two different places.
1742 return ExactDynamicCastInfo{/*RequiresCastToPrimaryBase=*/true,
1743 .Offset: CharUnits::Zero()};
1744 }
1745 }
1746 if (!Offset)
1747 return std::nullopt;
1748 return ExactDynamicCastInfo{/*RequiresCastToPrimaryBase=*/false, .Offset: *Offset};
1749}
1750
1751llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
1752 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1753 QualType DestTy, QualType DestRecordTy,
1754 const ExactDynamicCastInfo &ExactCastInfo, llvm::BasicBlock *CastSuccess,
1755 llvm::BasicBlock *CastFail) {
1756 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1757 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1758 auto AuthenticateVTable = [&](Address ThisAddr, const CXXRecordDecl *Decl) {
1759 if (!CGF.getLangOpts().PointerAuthCalls)
1760 return;
1761 (void)CGF.GetVTablePtr(This: ThisAddr, VTableTy: CGF.DefaultPtrTy, VTableClass: Decl,
1762 AuthMode: CodeGenFunction::VTableAuthMode::MustTrap);
1763 };
1764
1765 bool PerformPostCastAuthentication = false;
1766 llvm::Value *VTable = nullptr;
1767 if (ExactCastInfo.RequiresCastToPrimaryBase) {
1768 // Base appears in at least two different places. Find the most-derived
1769 // object and see if it's a DestDecl. Note that the most-derived object
1770 // must be at least as aligned as this base class subobject, and must
1771 // have a vptr at offset 0.
1772 llvm::Value *PrimaryBase =
1773 emitDynamicCastToVoid(CGF, Value: ThisAddr, SrcRecordTy);
1774 ThisAddr = Address(PrimaryBase, CGF.VoidPtrTy, ThisAddr.getAlignment());
1775 SrcDecl = DestDecl;
1776 // This unauthenticated load is unavoidable, so we're relying on the
1777 // authenticated load in the dynamic cast to void, and we'll manually
1778 // authenticate the resulting v-table at the end of the cast check.
1779 PerformPostCastAuthentication = CGF.getLangOpts().PointerAuthCalls;
1780 CGPointerAuthInfo StrippingAuthInfo(0, PointerAuthenticationMode::Strip,
1781 false, false, nullptr);
1782 Address VTablePtrPtr = ThisAddr.withElementType(ElemTy: CGF.VoidPtrPtrTy);
1783 VTable = CGF.Builder.CreateLoad(Addr: VTablePtrPtr, Name: "vtable");
1784 if (PerformPostCastAuthentication)
1785 VTable = CGF.EmitPointerAuthAuth(Info: StrippingAuthInfo, Pointer: VTable);
1786 } else
1787 VTable = CGF.GetVTablePtr(This: ThisAddr, VTableTy: CGF.DefaultPtrTy, VTableClass: SrcDecl);
1788
1789 // Compare the vptr against the expected vptr for the destination type at
1790 // this offset.
1791 llvm::Constant *ExpectedVTable = getVTableAddressPoint(
1792 Base: BaseSubobject(SrcDecl, ExactCastInfo.Offset), VTableClass: DestDecl);
1793 llvm::Value *Success = CGF.Builder.CreateICmpEQ(LHS: VTable, RHS: ExpectedVTable);
1794 llvm::Value *AdjustedThisPtr = ThisAddr.emitRawPointer(CGF);
1795
1796 if (!ExactCastInfo.Offset.isZero()) {
1797 CharUnits::QuantityType Offset = ExactCastInfo.Offset.getQuantity();
1798 llvm::Constant *OffsetConstant =
1799 llvm::ConstantInt::get(Ty: CGF.PtrDiffTy, V: -Offset);
1800 AdjustedThisPtr = CGF.Builder.CreateInBoundsGEP(Ty: CGF.CharTy, Ptr: AdjustedThisPtr,
1801 IdxList: OffsetConstant);
1802 PerformPostCastAuthentication = CGF.getLangOpts().PointerAuthCalls;
1803 }
1804
1805 if (PerformPostCastAuthentication) {
1806 // If we've changed the object pointer we authenticate the vtable pointer
1807 // of the resulting object.
1808 llvm::BasicBlock *NonNullBlock = CGF.Builder.GetInsertBlock();
1809 llvm::BasicBlock *PostCastAuthSuccess =
1810 CGF.createBasicBlock(name: "dynamic_cast.postauth.success");
1811 llvm::BasicBlock *PostCastAuthComplete =
1812 CGF.createBasicBlock(name: "dynamic_cast.postauth.complete");
1813 CGF.Builder.CreateCondBr(Cond: Success, True: PostCastAuthSuccess,
1814 False: PostCastAuthComplete);
1815 CGF.EmitBlock(BB: PostCastAuthSuccess);
1816 Address AdjustedThisAddr =
1817 Address(AdjustedThisPtr, CGF.IntPtrTy, CGF.getPointerAlign());
1818 AuthenticateVTable(AdjustedThisAddr, DestDecl);
1819 CGF.EmitBranch(Block: PostCastAuthComplete);
1820 CGF.EmitBlock(BB: PostCastAuthComplete);
1821 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Ty: AdjustedThisPtr->getType(), NumReservedValues: 2);
1822 PHI->addIncoming(V: AdjustedThisPtr, BB: PostCastAuthSuccess);
1823 llvm::Value *NullValue =
1824 llvm::Constant::getNullValue(Ty: AdjustedThisPtr->getType());
1825 PHI->addIncoming(V: NullValue, BB: NonNullBlock);
1826 AdjustedThisPtr = PHI;
1827 }
1828 CGF.Builder.CreateCondBr(Cond: Success, True: CastSuccess, False: CastFail);
1829 return AdjustedThisPtr;
1830}
1831
1832llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
1833 Address ThisAddr,
1834 QualType SrcRecordTy) {
1835 auto *ClassDecl = SrcRecordTy->castAsCXXRecordDecl();
1836 llvm::Value *OffsetToTop;
1837 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1838 // Get the vtable pointer.
1839 llvm::Value *VTable =
1840 CGF.GetVTablePtr(This: ThisAddr, VTableTy: CGF.DefaultPtrTy, VTableClass: ClassDecl);
1841
1842 // Get the offset-to-top from the vtable.
1843 OffsetToTop =
1844 CGF.Builder.CreateConstInBoundsGEP1_32(Ty: CGM.Int32Ty, Ptr: VTable, Idx0: -2U);
1845 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1846 Ty: CGM.Int32Ty, Addr: OffsetToTop, Align: CharUnits::fromQuantity(Quantity: 4), Name: "offset.to.top");
1847 } else {
1848 llvm::Type *PtrDiffLTy =
1849 CGF.ConvertType(T: CGF.getContext().getPointerDiffType());
1850
1851 // Get the vtable pointer.
1852 llvm::Value *VTable =
1853 CGF.GetVTablePtr(This: ThisAddr, VTableTy: CGF.DefaultPtrTy, VTableClass: ClassDecl);
1854
1855 // Get the offset-to-top from the vtable.
1856 OffsetToTop =
1857 CGF.Builder.CreateConstInBoundsGEP1_64(Ty: PtrDiffLTy, Ptr: VTable, Idx0: -2ULL);
1858 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1859 Ty: PtrDiffLTy, Addr: OffsetToTop, Align: CGF.getPointerAlign(), Name: "offset.to.top");
1860 }
1861 // Finally, add the offset to the pointer.
1862 return CGF.Builder.CreateInBoundsGEP(Ty: CGF.Int8Ty, Ptr: ThisAddr.emitRawPointer(CGF),
1863 IdxList: OffsetToTop);
1864}
1865
1866bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1867 llvm::FunctionCallee Fn = getBadCastFn(CGF);
1868 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(callee: Fn);
1869 Call->setDoesNotReturn();
1870 CGF.Builder.CreateUnreachable();
1871 return true;
1872}
1873
1874llvm::Value *
1875ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1876 Address This,
1877 const CXXRecordDecl *ClassDecl,
1878 const CXXRecordDecl *BaseClassDecl) {
1879 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, VTableTy: CGM.Int8PtrTy, VTableClass: ClassDecl);
1880 CharUnits VBaseOffsetOffset =
1881 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD: ClassDecl,
1882 VBase: BaseClassDecl);
1883 llvm::Value *VBaseOffsetPtr =
1884 CGF.Builder.CreateConstGEP1_64(
1885 Ty: CGF.Int8Ty, Ptr: VTablePtr, Idx0: VBaseOffsetOffset.getQuantity(),
1886 Name: "vbase.offset.ptr");
1887
1888 llvm::Value *VBaseOffset;
1889 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1890 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1891 Ty: CGF.Int32Ty, Addr: VBaseOffsetPtr, Align: CharUnits::fromQuantity(Quantity: 4),
1892 Name: "vbase.offset");
1893 } else {
1894 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1895 Ty: CGM.PtrDiffTy, Addr: VBaseOffsetPtr, Align: CGF.getPointerAlign(), Name: "vbase.offset");
1896 }
1897 return VBaseOffset;
1898}
1899
1900void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1901 // Just make sure we're in sync with TargetCXXABI.
1902 assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1903
1904 // The constructor used for constructing this as a base class;
1905 // ignores virtual bases.
1906 CGM.EmitGlobal(D: GlobalDecl(D, Ctor_Base));
1907
1908 // The constructor used for constructing this as a complete class;
1909 // constructs the virtual bases, then calls the base constructor.
1910 if (!D->getParent()->isAbstract()) {
1911 // We don't need to emit the complete ctor if the class is abstract.
1912 CGM.EmitGlobal(D: GlobalDecl(D, Ctor_Complete));
1913 }
1914}
1915
1916CGCXXABI::AddedStructorArgCounts
1917ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1918 SmallVectorImpl<CanQualType> &ArgTys) {
1919 ASTContext &Context = getContext();
1920
1921 // All parameters are already in place except VTT, which goes after 'this'.
1922 // These are Clang types, so we don't need to worry about sret yet.
1923
1924 // Check if we need to add a VTT parameter (which has type global void **).
1925 if ((isa<CXXConstructorDecl>(Val: GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1926 : GD.getDtorType() == Dtor_Base) &&
1927 cast<CXXMethodDecl>(Val: GD.getDecl())->getParent()->getNumVBases() != 0) {
1928 LangAS AS = CGM.GetGlobalVarAddressSpace(D: nullptr);
1929 QualType Q = Context.getAddrSpaceQualType(T: Context.VoidPtrTy, AddressSpace: AS);
1930 ArgTys.insert(I: ArgTys.begin() + 1,
1931 Elt: Context.getPointerType(T: CanQualType::CreateUnsafe(Other: Q)));
1932 return AddedStructorArgCounts::prefix(N: 1);
1933 }
1934 return AddedStructorArgCounts{};
1935}
1936
1937void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1938 // The destructor used for destructing this as a base class; ignores
1939 // virtual bases.
1940 CGM.EmitGlobal(D: GlobalDecl(D, Dtor_Base));
1941
1942 // The destructor used for destructing this as a most-derived class;
1943 // call the base destructor and then destructs any virtual bases.
1944 CGM.EmitGlobal(D: GlobalDecl(D, Dtor_Complete));
1945
1946 // The destructor in a virtual table is always a 'deleting'
1947 // destructor, which calls the complete destructor and then uses the
1948 // appropriate operator delete.
1949 if (D->isVirtual())
1950 CGM.EmitGlobal(D: GlobalDecl(D, Dtor_Deleting));
1951}
1952
1953void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1954 QualType &ResTy,
1955 FunctionArgList &Params) {
1956 const CXXMethodDecl *MD = cast<CXXMethodDecl>(Val: CGF.CurGD.getDecl());
1957 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1958
1959 // Check if we need a VTT parameter as well.
1960 if (NeedsVTTParameter(GD: CGF.CurGD)) {
1961 ASTContext &Context = getContext();
1962
1963 // FIXME: avoid the fake decl
1964 LangAS AS = CGM.GetGlobalVarAddressSpace(D: nullptr);
1965 QualType Q = Context.getAddrSpaceQualType(T: Context.VoidPtrTy, AddressSpace: AS);
1966 QualType T = Context.getPointerType(T: Q);
1967 auto *VTTDecl = ImplicitParamDecl::Create(
1968 C&: Context, /*DC=*/nullptr, IdLoc: MD->getLocation(), Id: &Context.Idents.get(Name: "vtt"),
1969 T, ParamKind: ImplicitParamKind::CXXVTT);
1970 Params.insert(I: Params.begin() + 1, Elt: VTTDecl);
1971 getStructorImplicitParamDecl(CGF) = VTTDecl;
1972 }
1973}
1974
1975void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1976 // Naked functions have no prolog.
1977 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1978 return;
1979
1980 /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1981 /// adjustments are required, because they are all handled by thunks.
1982 setCXXABIThisValue(CGF, ThisPtr: loadIncomingCXXThis(CGF));
1983
1984 /// Initialize the 'vtt' slot if needed.
1985 if (getStructorImplicitParamDecl(CGF)) {
1986 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1987 Addr: CGF.GetAddrOfLocalVar(VD: getStructorImplicitParamDecl(CGF)), Name: "vtt");
1988 }
1989
1990 /// If this is a function that the ABI specifies returns 'this', initialize
1991 /// the return slot to 'this' at the start of the function.
1992 ///
1993 /// Unlike the setting of return types, this is done within the ABI
1994 /// implementation instead of by clients of CGCXXABI because:
1995 /// 1) getThisValue is currently protected
1996 /// 2) in theory, an ABI could implement 'this' returns some other way;
1997 /// HasThisReturn only specifies a contract, not the implementation
1998 if (HasThisReturn(GD: CGF.CurGD))
1999 CGF.Builder.CreateStore(Val: getThisValue(CGF), Addr: CGF.ReturnValue);
2000}
2001
2002CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
2003 CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
2004 bool ForVirtualBase, bool Delegating) {
2005 if (!NeedsVTTParameter(GD: GlobalDecl(D, Type)))
2006 return AddedStructorArgs{};
2007
2008 // Insert the implicit 'vtt' argument as the second argument. Make sure to
2009 // correctly reflect its address space, which can differ from generic on
2010 // some targets.
2011 llvm::Value *VTT =
2012 CGF.GetVTTParameter(GD: GlobalDecl(D, Type), ForVirtualBase, Delegating);
2013 LangAS AS = CGM.GetGlobalVarAddressSpace(D: nullptr);
2014 QualType Q = getContext().getAddrSpaceQualType(T: getContext().VoidPtrTy, AddressSpace: AS);
2015 QualType VTTTy = getContext().getPointerType(T: Q);
2016 return AddedStructorArgs::prefix(Args: {{.Value: VTT, .Type: VTTTy}});
2017}
2018
2019llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
2020 CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
2021 bool ForVirtualBase, bool Delegating) {
2022 GlobalDecl GD(DD, Type);
2023 return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
2024}
2025
2026void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
2027 const CXXDestructorDecl *DD,
2028 CXXDtorType Type, bool ForVirtualBase,
2029 bool Delegating, Address This,
2030 QualType ThisTy) {
2031 GlobalDecl GD(DD, Type);
2032 llvm::Value *VTT =
2033 getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
2034 QualType VTTTy = getContext().getPointerType(T: getContext().VoidPtrTy);
2035
2036 CGCallee Callee;
2037 if (getContext().getLangOpts().AppleKext &&
2038 Type != Dtor_Base && DD->isVirtual())
2039 Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, RD: DD->getParent());
2040 else
2041 Callee = CGCallee::forDirect(functionPtr: CGM.getAddrOfCXXStructor(GD), abstractInfo: GD);
2042
2043 CGF.EmitCXXDestructorCall(Dtor: GD, Callee, This: CGF.getAsNaturalPointerTo(Addr: This, PointeeType: ThisTy),
2044 ThisTy, ImplicitParam: VTT, ImplicitParamTy: VTTTy, E: nullptr);
2045}
2046
2047// Check if any non-inline method has the specified attribute.
2048template <typename T>
2049static bool CXXRecordNonInlineHasAttr(const CXXRecordDecl *RD) {
2050 for (const auto *D : RD->noload_decls()) {
2051 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
2052 if (FD->isInlined() || FD->doesThisDeclarationHaveABody() ||
2053 FD->isPureVirtual())
2054 continue;
2055 if (D->hasAttr<T>())
2056 return true;
2057 }
2058 }
2059
2060 return false;
2061}
2062
2063static void setVTableSelectiveDLLImportExport(CodeGenModule &CGM,
2064 llvm::GlobalVariable *VTable,
2065 const CXXRecordDecl *RD) {
2066 if (VTable->getDLLStorageClass() !=
2067 llvm::GlobalVariable::DefaultStorageClass ||
2068 RD->hasAttr<DLLImportAttr>() || RD->hasAttr<DLLExportAttr>())
2069 return;
2070
2071 if (CGM.getVTables().isVTableExternal(RD)) {
2072 if (CXXRecordNonInlineHasAttr<DLLImportAttr>(RD))
2073 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
2074 } else if (CXXRecordNonInlineHasAttr<DLLExportAttr>(RD))
2075 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
2076}
2077
2078void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
2079 const CXXRecordDecl *RD) {
2080 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, VPtrOffset: CharUnits());
2081 if (VTable->hasInitializer())
2082 return;
2083
2084 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
2085 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
2086 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
2087 llvm::Constant *RTTI =
2088 CGM.GetAddrOfRTTIDescriptor(Ty: CGM.getContext().getCanonicalTagType(TD: RD));
2089
2090 // Create and set the initializer.
2091 ConstantInitBuilder builder(CGM);
2092 auto components = builder.beginStruct();
2093 CGVT.createVTableInitializer(builder&: components, layout: VTLayout, rtti: RTTI,
2094 vtableHasLocalLinkage: llvm::GlobalValue::isLocalLinkage(Linkage));
2095 components.finishAndSetAsInitializer(global: VTable);
2096
2097 // Set the correct linkage.
2098 VTable->setLinkage(Linkage);
2099
2100 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
2101 VTable->setComdat(CGM.getModule().getOrInsertComdat(Name: VTable->getName()));
2102
2103 if (CGM.getTarget().hasPS4DLLImportExport())
2104 setVTableSelectiveDLLImportExport(CGM, VTable, RD);
2105
2106 // Set the right visibility.
2107 CGM.setGVProperties(GV: VTable, D: RD);
2108
2109 // If this is the magic class __cxxabiv1::__fundamental_type_info,
2110 // we will emit the typeinfo for the fundamental types. This is the
2111 // same behaviour as GCC.
2112 const DeclContext *DC = RD->getDeclContext();
2113 if (RD->getIdentifier() &&
2114 RD->getIdentifier()->isStr(Str: "__fundamental_type_info") &&
2115 isa<NamespaceDecl>(Val: DC) && cast<NamespaceDecl>(Val: DC)->getIdentifier() &&
2116 cast<NamespaceDecl>(Val: DC)->getIdentifier()->isStr(Str: "__cxxabiv1") &&
2117 DC->getParent()->isTranslationUnit())
2118 EmitFundamentalRTTIDescriptors(RD);
2119
2120 // Always emit type metadata on non-available_externally definitions, and on
2121 // available_externally definitions if we are performing whole program
2122 // devirtualization or speculative devirtualization. We need the type metadata
2123 // on all vtable definitions to ensure we associate derived classes with base
2124 // classes defined in headers but with a strong definition only in a shared
2125 // library.
2126 if (!VTable->isDeclarationForLinker() ||
2127 CGM.getCodeGenOpts().WholeProgramVTables ||
2128 CGM.getCodeGenOpts().DevirtualizeSpeculatively) {
2129 CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
2130 // For available_externally definitions, add the vtable to
2131 // @llvm.compiler.used so that it isn't deleted before whole program
2132 // analysis.
2133 if (VTable->isDeclarationForLinker()) {
2134 assert(CGM.getCodeGenOpts().WholeProgramVTables ||
2135 CGM.getCodeGenOpts().DevirtualizeSpeculatively);
2136 CGM.addCompilerUsedGlobal(GV: VTable);
2137 }
2138 }
2139
2140 if (VTContext.isRelativeLayout()) {
2141 CGVT.RemoveHwasanMetadata(GV: VTable);
2142 if (!VTable->isDSOLocal())
2143 CGVT.GenerateRelativeVTableAlias(VTable, AliasNameRef: VTable->getName());
2144 }
2145
2146 // Emit symbol for debugger only if requested debug info.
2147 if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
2148 DI->emitVTableSymbol(VTable, RD);
2149}
2150
2151bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
2152 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
2153 if (Vptr.NearestVBase == nullptr)
2154 return false;
2155 return NeedsVTTParameter(GD: CGF.CurGD);
2156}
2157
2158llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
2159 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
2160 const CXXRecordDecl *NearestVBase) {
2161
2162 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
2163 NeedsVTTParameter(GD: CGF.CurGD)) {
2164 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
2165 NearestVBase);
2166 }
2167 return getVTableAddressPoint(Base, VTableClass);
2168}
2169
2170llvm::Constant *
2171ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
2172 const CXXRecordDecl *VTableClass) {
2173 llvm::GlobalValue *VTable = getAddrOfVTable(RD: VTableClass, VPtrOffset: CharUnits());
2174
2175 // Find the appropriate vtable within the vtable group, and the address point
2176 // within that vtable.
2177 const VTableLayout &Layout =
2178 CGM.getItaniumVTableContext().getVTableLayout(RD: VTableClass);
2179 VTableLayout::AddressPointLocation AddressPoint =
2180 Layout.getAddressPoint(Base);
2181 llvm::Value *Indices[] = {
2182 llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: 0),
2183 llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: AddressPoint.VTableIndex),
2184 llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: AddressPoint.AddressPointIndex),
2185 };
2186
2187 // Add inrange attribute to indicate that only the VTableIndex can be
2188 // accessed.
2189 unsigned ComponentSize =
2190 CGM.getDataLayout().getTypeAllocSize(Ty: CGM.getVTableComponentType());
2191 unsigned VTableSize =
2192 ComponentSize * Layout.getVTableSize(i: AddressPoint.VTableIndex);
2193 unsigned Offset = ComponentSize * AddressPoint.AddressPointIndex;
2194 llvm::ConstantRange InRange(
2195 llvm::APInt(32, (int)-Offset, true),
2196 llvm::APInt(32, (int)(VTableSize - Offset), true));
2197 return llvm::ConstantExpr::getGetElementPtr(
2198 Ty: VTable->getValueType(), C: VTable, IdxList: Indices, /*InBounds=*/NW: true, InRange);
2199}
2200
2201llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
2202 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
2203 const CXXRecordDecl *NearestVBase) {
2204 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
2205 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
2206
2207 // Get the secondary vpointer index.
2208 uint64_t VirtualPointerIndex =
2209 CGM.getVTables().getSecondaryVirtualPointerIndex(RD: VTableClass, Base);
2210
2211 /// Load the VTT.
2212 llvm::Value *VTT = CGF.LoadCXXVTT();
2213 if (VirtualPointerIndex)
2214 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(Ty: CGF.GlobalsVoidPtrTy, Ptr: VTT,
2215 Idx0: VirtualPointerIndex);
2216
2217 // And load the address point from the VTT.
2218 llvm::Value *AP =
2219 CGF.Builder.CreateAlignedLoad(Ty: CGF.GlobalsVoidPtrTy, Addr: VTT,
2220 Align: CGF.getPointerAlign());
2221
2222 if (auto &Schema = CGF.CGM.getCodeGenOpts().PointerAuth.CXXVTTVTablePointers) {
2223 CGPointerAuthInfo PointerAuth = CGF.EmitPointerAuthInfo(Schema, StorageAddress: VTT,
2224 SchemaDecl: GlobalDecl(),
2225 SchemaType: QualType());
2226 AP = CGF.EmitPointerAuthAuth(Info: PointerAuth, Pointer: AP);
2227 }
2228
2229 return AP;
2230}
2231
2232llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
2233 CharUnits VPtrOffset) {
2234 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
2235
2236 llvm::GlobalVariable *&VTable = VTables[RD];
2237 if (VTable)
2238 return VTable;
2239
2240 // Queue up this vtable for possible deferred emission.
2241 CGM.addDeferredVTable(RD);
2242
2243 SmallString<256> Name;
2244 llvm::raw_svector_ostream Out(Name);
2245 getMangleContext().mangleCXXVTable(RD, Out);
2246
2247 const VTableLayout &VTLayout =
2248 CGM.getItaniumVTableContext().getVTableLayout(RD);
2249 llvm::Type *VTableType = CGM.getVTables().getVTableType(layout: VTLayout);
2250
2251 // Use pointer to global alignment for the vtable. Otherwise we would align
2252 // them based on the size of the initializer which doesn't make sense as only
2253 // single values are read.
2254 unsigned PAlign = CGM.getVtableGlobalVarAlignment();
2255
2256 VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
2257 Name, Ty: VTableType, Linkage: llvm::GlobalValue::ExternalLinkage,
2258 Alignment: getContext().toCharUnitsFromBits(BitSize: PAlign).getAsAlign());
2259 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2260
2261 if (CGM.getTarget().hasPS4DLLImportExport())
2262 setVTableSelectiveDLLImportExport(CGM, VTable, RD);
2263
2264 CGM.setGVProperties(GV: VTable, D: RD);
2265 return VTable;
2266}
2267
2268CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
2269 GlobalDecl GD,
2270 Address This,
2271 llvm::Type *Ty,
2272 SourceLocation Loc) {
2273 llvm::Type *PtrTy = CGM.GlobalsInt8PtrTy;
2274 auto *MethodDecl = cast<CXXMethodDecl>(Val: GD.getDecl());
2275 llvm::Value *VTable = CGF.GetVTablePtr(This, VTableTy: PtrTy, VTableClass: MethodDecl->getParent());
2276
2277 // For the translation of virtual functions, we need to map the (potential)
2278 // host vtable to the device vtable. This is done by calling the runtime
2279 // function
2280 // __llvm_omp_indirect_call_lookup.
2281 if (CGM.getLangOpts().OpenMPIsTargetDevice) {
2282 auto *NewPtrTy = CGM.VoidPtrTy;
2283 llvm::Type *RtlFnArgs[] = {NewPtrTy};
2284 llvm::FunctionCallee DeviceRtlFn = CGM.CreateRuntimeFunction(
2285 Ty: llvm::FunctionType::get(Result: NewPtrTy, Params: RtlFnArgs, isVarArg: false),
2286 Name: "__llvm_omp_indirect_call_lookup");
2287 auto *BackupTy = VTable->getType();
2288 // Need to convert to generic address space
2289 VTable = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(V: VTable, DestTy: NewPtrTy);
2290 VTable = CGF.EmitRuntimeCall(callee: DeviceRtlFn, args: {VTable});
2291 // convert to original address space
2292 VTable = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(V: VTable, DestTy: BackupTy);
2293 }
2294
2295 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
2296 llvm::Value *VFunc, *VTableSlotPtr = nullptr;
2297 auto &Schema = CGM.getCodeGenOpts().PointerAuth.CXXVirtualFunctionPointers;
2298
2299 llvm::Type *ComponentTy = CGM.getVTables().getVTableComponentType();
2300 uint64_t ByteOffset =
2301 VTableIndex * CGM.getDataLayout().getTypeSizeInBits(Ty: ComponentTy) / 8;
2302
2303 if (!Schema && CGF.ShouldEmitVTableTypeCheckedLoad(RD: MethodDecl->getParent())) {
2304 VFunc = CGF.EmitVTableTypeCheckedLoad(RD: MethodDecl->getParent(), VTable,
2305 VTableTy: PtrTy, VTableByteOffset: ByteOffset);
2306 } else {
2307 CGF.EmitTypeMetadataCodeForVCall(RD: MethodDecl->getParent(), VTable, Loc);
2308
2309 llvm::Value *VFuncLoad;
2310 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
2311 VFuncLoad = CGF.Builder.CreateCall(
2312 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::load_relative, Tys: {CGM.Int32Ty}),
2313 Args: {VTable, llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: ByteOffset)});
2314 } else {
2315 VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2316 Ty: PtrTy, Ptr: VTable, Idx0: VTableIndex, Name: "vfn");
2317 VFuncLoad = CGF.Builder.CreateAlignedLoad(Ty: PtrTy, Addr: VTableSlotPtr,
2318 Align: CGF.getPointerAlign());
2319 }
2320
2321 // Add !invariant.load md to virtual function load to indicate that
2322 // function didn't change inside vtable.
2323 // It's safe to add it without -fstrict-vtable-pointers, but it would not
2324 // help in devirtualization because it will only matter if we will have 2
2325 // the same virtual function loads from the same vtable load, which won't
2326 // happen without enabled devirtualization with -fstrict-vtable-pointers.
2327 if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
2328 CGM.getCodeGenOpts().StrictVTablePointers) {
2329 if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(Val: VFuncLoad)) {
2330 VFuncLoadInstr->setMetadata(
2331 KindID: llvm::LLVMContext::MD_invariant_load,
2332 Node: llvm::MDNode::get(Context&: CGM.getLLVMContext(),
2333 MDs: llvm::ArrayRef<llvm::Metadata *>()));
2334 }
2335 }
2336 VFunc = VFuncLoad;
2337 }
2338
2339 CGPointerAuthInfo PointerAuth;
2340 if (Schema) {
2341 assert(VTableSlotPtr && "virtual function pointer not set");
2342 GD = CGM.getItaniumVTableContext().findOriginalMethod(GD: GD.getCanonicalDecl());
2343 PointerAuth = CGF.EmitPointerAuthInfo(Schema, StorageAddress: VTableSlotPtr, SchemaDecl: GD, SchemaType: QualType());
2344 }
2345 CGCallee Callee(GD, VFunc, PointerAuth);
2346 return Callee;
2347}
2348
2349llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
2350 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
2351 Address This, DeleteOrMemberCallExpr E, llvm::CallBase **CallOrInvoke) {
2352 auto *CE = dyn_cast<const CXXMemberCallExpr *>(Val&: E);
2353 auto *D = dyn_cast<const CXXDeleteExpr *>(Val&: E);
2354 assert((CE != nullptr) ^ (D != nullptr));
2355 assert(CE == nullptr || CE->arguments().empty());
2356 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
2357
2358 GlobalDecl GD(Dtor, DtorType);
2359 const CGFunctionInfo *FInfo =
2360 &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
2361 llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(Info: *FInfo);
2362 CGCallee Callee = CGCallee::forVirtual(CE, MD: GD, Addr: This, FTy: Ty);
2363
2364 QualType ThisTy;
2365 if (CE) {
2366 ThisTy = CE->getObjectType();
2367 } else {
2368 ThisTy = D->getDestroyedType();
2369 }
2370
2371 CGF.EmitCXXDestructorCall(Dtor: GD, Callee, This: This.emitRawPointer(CGF), ThisTy,
2372 ImplicitParam: nullptr, ImplicitParamTy: QualType(), E: nullptr, CallOrInvoke);
2373 return nullptr;
2374}
2375
2376void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
2377 CodeGenVTables &VTables = CGM.getVTables();
2378 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
2379 VTables.EmitVTTDefinition(VTT, Linkage: CGM.getVTableLinkage(RD), RD);
2380}
2381
2382bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
2383 const CXXRecordDecl *RD) const {
2384 // We don't emit available_externally vtables if we are in -fapple-kext mode
2385 // because kext mode does not permit devirtualization.
2386 if (CGM.getLangOpts().AppleKext)
2387 return false;
2388
2389 // If the vtable is hidden then it is not safe to emit an available_externally
2390 // copy of vtable.
2391 if (isVTableHidden(RD))
2392 return false;
2393
2394 if (CGM.getCodeGenOpts().ForceEmitVTables)
2395 return true;
2396
2397 // A speculative vtable can only be generated if all virtual inline functions
2398 // defined by this class are emitted. The vtable in the final program contains
2399 // for each virtual inline function not used in the current TU a function that
2400 // is equivalent to the unused function. The function in the actual vtable
2401 // does not have to be declared under the same symbol (e.g., a virtual
2402 // destructor that can be substituted with its base class's destructor). Since
2403 // inline functions are emitted lazily and this emissions does not account for
2404 // speculative emission of a vtable, we might generate a speculative vtable
2405 // with references to inline functions that are not emitted under that name.
2406 // This can lead to problems when devirtualizing a call to such a function,
2407 // that result in linking errors. Hence, if there are any unused virtual
2408 // inline function, we cannot emit the speculative vtable.
2409 // FIXME we can still emit a copy of the vtable if we
2410 // can emit definition of the inline functions.
2411 if (hasAnyUnusedVirtualInlineFunction(RD))
2412 return false;
2413
2414 // For a class with virtual bases, we must also be able to speculatively
2415 // emit the VTT, because CodeGen doesn't have separate notions of "can emit
2416 // the vtable" and "can emit the VTT". For a base subobject, this means we
2417 // need to be able to emit non-virtual base vtables.
2418 if (RD->getNumVBases()) {
2419 for (const auto &B : RD->bases()) {
2420 auto *BRD = B.getType()->getAsCXXRecordDecl();
2421 assert(BRD && "no class for base specifier");
2422 if (B.isVirtual() || !BRD->isDynamicClass())
2423 continue;
2424 if (!canSpeculativelyEmitVTableAsBaseClass(RD: BRD))
2425 return false;
2426 }
2427 }
2428
2429 return true;
2430}
2431
2432bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
2433 if (!canSpeculativelyEmitVTableAsBaseClass(RD))
2434 return false;
2435
2436 if (RD->shouldEmitInExternalSource())
2437 return false;
2438
2439 // For a complete-object vtable (or more specifically, for the VTT), we need
2440 // to be able to speculatively emit the vtables of all dynamic virtual bases.
2441 for (const auto &B : RD->vbases()) {
2442 auto *BRD = B.getType()->getAsCXXRecordDecl();
2443 assert(BRD && "no class for base specifier");
2444 if (!BRD->isDynamicClass())
2445 continue;
2446 if (!canSpeculativelyEmitVTableAsBaseClass(RD: BRD))
2447 return false;
2448 }
2449
2450 return true;
2451}
2452static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
2453 Address InitialPtr,
2454 const CXXRecordDecl *UnadjustedClass,
2455 int64_t NonVirtualAdjustment,
2456 int64_t VirtualAdjustment,
2457 bool IsReturnAdjustment) {
2458 if (!NonVirtualAdjustment && !VirtualAdjustment)
2459 return InitialPtr.emitRawPointer(CGF);
2460
2461 Address V = InitialPtr.withElementType(ElemTy: CGF.Int8Ty);
2462
2463 // In a base-to-derived cast, the non-virtual adjustment is applied first.
2464 if (NonVirtualAdjustment && !IsReturnAdjustment) {
2465 V = CGF.Builder.CreateConstInBoundsByteGEP(Addr: V,
2466 Offset: CharUnits::fromQuantity(Quantity: NonVirtualAdjustment));
2467 }
2468
2469 // Perform the virtual adjustment if we have one.
2470 llvm::Value *ResultPtr;
2471 if (VirtualAdjustment) {
2472 llvm::Value *VTablePtr =
2473 CGF.GetVTablePtr(This: V, VTableTy: CGF.Int8PtrTy, VTableClass: UnadjustedClass);
2474
2475 llvm::Value *Offset;
2476 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2477 Ty: CGF.Int8Ty, Ptr: VTablePtr, Idx0: VirtualAdjustment);
2478 if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
2479 // Load the adjustment offset from the vtable as a 32-bit int.
2480 Offset =
2481 CGF.Builder.CreateAlignedLoad(Ty: CGF.Int32Ty, Addr: OffsetPtr,
2482 Align: CharUnits::fromQuantity(Quantity: 4));
2483 } else {
2484 llvm::Type *PtrDiffTy =
2485 CGF.ConvertType(T: CGF.getContext().getPointerDiffType());
2486
2487 // Load the adjustment offset from the vtable.
2488 Offset = CGF.Builder.CreateAlignedLoad(Ty: PtrDiffTy, Addr: OffsetPtr,
2489 Align: CGF.getPointerAlign());
2490 }
2491 // Adjust our pointer.
2492 ResultPtr = CGF.Builder.CreateInBoundsGEP(Ty: V.getElementType(),
2493 Ptr: V.emitRawPointer(CGF), IdxList: Offset);
2494 } else {
2495 ResultPtr = V.emitRawPointer(CGF);
2496 }
2497
2498 // In a derived-to-base conversion, the non-virtual adjustment is
2499 // applied second.
2500 if (NonVirtualAdjustment && IsReturnAdjustment) {
2501 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(Ty: CGF.Int8Ty, Ptr: ResultPtr,
2502 Idx0: NonVirtualAdjustment);
2503 }
2504
2505 return ResultPtr;
2506}
2507
2508llvm::Value *
2509ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF, Address This,
2510 const CXXRecordDecl *UnadjustedClass,
2511 const ThunkInfo &TI) {
2512 return performTypeAdjustment(CGF, InitialPtr: This, UnadjustedClass, NonVirtualAdjustment: TI.This.NonVirtual,
2513 VirtualAdjustment: TI.This.Virtual.Itanium.VCallOffsetOffset,
2514 /*IsReturnAdjustment=*/false);
2515}
2516
2517llvm::Value *
2518ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
2519 const CXXRecordDecl *UnadjustedClass,
2520 const ReturnAdjustment &RA) {
2521 return performTypeAdjustment(CGF, InitialPtr: Ret, UnadjustedClass, NonVirtualAdjustment: RA.NonVirtual,
2522 VirtualAdjustment: RA.Virtual.Itanium.VBaseOffsetOffset,
2523 /*IsReturnAdjustment=*/true);
2524}
2525
2526void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
2527 RValue RV, QualType ResultType) {
2528 if (!isa<CXXDestructorDecl>(Val: CGF.CurGD.getDecl()))
2529 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
2530
2531 // Destructor thunks in the ARM ABI have indeterminate results.
2532 llvm::Type *T = CGF.ReturnValue.getElementType();
2533 RValue Undef = RValue::get(V: llvm::UndefValue::get(T));
2534 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV: Undef, ResultType);
2535}
2536
2537/************************** Array allocation cookies **************************/
2538
2539CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2540 // The array cookie is a size_t; pad that up to the element alignment.
2541 // The cookie is actually right-justified in that space.
2542 return std::max(a: CharUnits::fromQuantity(Quantity: CGM.SizeSizeInBytes),
2543 b: CGM.getContext().getPreferredTypeAlignInChars(T: elementType));
2544}
2545
2546Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2547 Address NewPtr,
2548 llvm::Value *NumElements,
2549 const CXXNewExpr *expr,
2550 QualType ElementType) {
2551 assert(requiresArrayCookie(expr));
2552
2553 unsigned AS = NewPtr.getAddressSpace();
2554
2555 ASTContext &Ctx = getContext();
2556 CharUnits SizeSize = CGF.getSizeSize();
2557
2558 // The size of the cookie.
2559 CharUnits CookieSize =
2560 std::max(a: SizeSize, b: Ctx.getPreferredTypeAlignInChars(T: ElementType));
2561 assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2562
2563 // Compute an offset to the cookie.
2564 Address CookiePtr = NewPtr;
2565 CharUnits CookieOffset = CookieSize - SizeSize;
2566 if (!CookieOffset.isZero())
2567 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(Addr: CookiePtr, Offset: CookieOffset);
2568
2569 // Write the number of elements into the appropriate slot.
2570 Address NumElementsPtr = CookiePtr.withElementType(ElemTy: CGF.SizeTy);
2571 llvm::Instruction *SI = CGF.Builder.CreateStore(Val: NumElements, Addr: NumElementsPtr);
2572
2573 // Handle the array cookie specially in ASan.
2574 if (CGM.getLangOpts().Sanitize.has(K: SanitizerKind::Address) && AS == 0 &&
2575 (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2576 CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
2577 // The store to the CookiePtr does not need to be instrumented.
2578 SI->setNoSanitizeMetadata();
2579 llvm::FunctionType *FTy =
2580 llvm::FunctionType::get(Result: CGM.VoidTy, Params: NumElementsPtr.getType(), isVarArg: false);
2581 llvm::FunctionCallee F =
2582 CGM.CreateRuntimeFunction(Ty: FTy, Name: "__asan_poison_cxx_array_cookie");
2583 CGF.Builder.CreateCall(Callee: F, Args: NumElementsPtr.emitRawPointer(CGF));
2584 }
2585
2586 // Finally, compute a pointer to the actual data buffer by skipping
2587 // over the cookie completely.
2588 return CGF.Builder.CreateConstInBoundsByteGEP(Addr: NewPtr, Offset: CookieSize);
2589}
2590
2591llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2592 Address allocPtr,
2593 CharUnits cookieSize) {
2594 // The element size is right-justified in the cookie.
2595 Address numElementsPtr = allocPtr;
2596 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2597 if (!numElementsOffset.isZero())
2598 numElementsPtr =
2599 CGF.Builder.CreateConstInBoundsByteGEP(Addr: numElementsPtr, Offset: numElementsOffset);
2600
2601 unsigned AS = allocPtr.getAddressSpace();
2602 numElementsPtr = numElementsPtr.withElementType(ElemTy: CGF.SizeTy);
2603 if (!CGM.getLangOpts().Sanitize.has(K: SanitizerKind::Address) || AS != 0)
2604 return CGF.Builder.CreateLoad(Addr: numElementsPtr);
2605 // In asan mode emit a function call instead of a regular load and let the
2606 // run-time deal with it: if the shadow is properly poisoned return the
2607 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2608 // We can't simply ignore this load using nosanitize metadata because
2609 // the metadata may be lost.
2610 llvm::FunctionType *FTy =
2611 llvm::FunctionType::get(Result: CGF.SizeTy, Params: CGF.DefaultPtrTy, isVarArg: false);
2612 llvm::FunctionCallee F =
2613 CGM.CreateRuntimeFunction(Ty: FTy, Name: "__asan_load_cxx_array_cookie");
2614 return CGF.Builder.CreateCall(Callee: F, Args: numElementsPtr.emitRawPointer(CGF));
2615}
2616
2617CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2618 // ARM says that the cookie is always:
2619 // struct array_cookie {
2620 // std::size_t element_size; // element_size != 0
2621 // std::size_t element_count;
2622 // };
2623 // But the base ABI doesn't give anything an alignment greater than
2624 // 8, so we can dismiss this as typical ABI-author blindness to
2625 // actual language complexity and round up to the element alignment.
2626 return std::max(a: CharUnits::fromQuantity(Quantity: 2 * CGM.SizeSizeInBytes),
2627 b: CGM.getContext().getTypeAlignInChars(T: elementType));
2628}
2629
2630Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2631 Address newPtr,
2632 llvm::Value *numElements,
2633 const CXXNewExpr *expr,
2634 QualType elementType) {
2635 assert(requiresArrayCookie(expr));
2636
2637 // The cookie is always at the start of the buffer.
2638 Address cookie = newPtr;
2639
2640 // The first element is the element size.
2641 cookie = cookie.withElementType(ElemTy: CGF.SizeTy);
2642 llvm::Value *elementSize = llvm::ConstantInt::get(Ty: CGF.SizeTy,
2643 V: getContext().getTypeSizeInChars(T: elementType).getQuantity());
2644 CGF.Builder.CreateStore(Val: elementSize, Addr: cookie);
2645
2646 // The second element is the element count.
2647 cookie = CGF.Builder.CreateConstInBoundsGEP(Addr: cookie, Index: 1);
2648 CGF.Builder.CreateStore(Val: numElements, Addr: cookie);
2649
2650 // Finally, compute a pointer to the actual data buffer by skipping
2651 // over the cookie completely.
2652 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2653 return CGF.Builder.CreateConstInBoundsByteGEP(Addr: newPtr, Offset: cookieSize);
2654}
2655
2656llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2657 Address allocPtr,
2658 CharUnits cookieSize) {
2659 // The number of elements is at offset sizeof(size_t) relative to
2660 // the allocated pointer.
2661 Address numElementsPtr
2662 = CGF.Builder.CreateConstInBoundsByteGEP(Addr: allocPtr, Offset: CGF.getSizeSize());
2663
2664 numElementsPtr = numElementsPtr.withElementType(ElemTy: CGF.SizeTy);
2665 return CGF.Builder.CreateLoad(Addr: numElementsPtr);
2666}
2667
2668/*********************** Static local initialization **************************/
2669
2670static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2671 llvm::PointerType *GuardPtrTy) {
2672 // int __cxa_guard_acquire(__guard *guard_object);
2673 llvm::FunctionType *FTy =
2674 llvm::FunctionType::get(Result: CGM.getTypes().ConvertType(T: CGM.getContext().IntTy),
2675 Params: GuardPtrTy, /*isVarArg=*/false);
2676 return CGM.CreateRuntimeFunction(
2677 Ty: FTy, Name: "__cxa_guard_acquire",
2678 ExtraAttrs: llvm::AttributeList::get(C&: CGM.getLLVMContext(),
2679 Index: llvm::AttributeList::FunctionIndex,
2680 Kinds: llvm::Attribute::NoUnwind));
2681}
2682
2683static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2684 llvm::PointerType *GuardPtrTy) {
2685 // void __cxa_guard_release(__guard *guard_object);
2686 llvm::FunctionType *FTy =
2687 llvm::FunctionType::get(Result: CGM.VoidTy, Params: GuardPtrTy, /*isVarArg=*/false);
2688 return CGM.CreateRuntimeFunction(
2689 Ty: FTy, Name: "__cxa_guard_release",
2690 ExtraAttrs: llvm::AttributeList::get(C&: CGM.getLLVMContext(),
2691 Index: llvm::AttributeList::FunctionIndex,
2692 Kinds: llvm::Attribute::NoUnwind));
2693}
2694
2695static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2696 llvm::PointerType *GuardPtrTy) {
2697 // void __cxa_guard_abort(__guard *guard_object);
2698 llvm::FunctionType *FTy =
2699 llvm::FunctionType::get(Result: CGM.VoidTy, Params: GuardPtrTy, /*isVarArg=*/false);
2700 return CGM.CreateRuntimeFunction(
2701 Ty: FTy, Name: "__cxa_guard_abort",
2702 ExtraAttrs: llvm::AttributeList::get(C&: CGM.getLLVMContext(),
2703 Index: llvm::AttributeList::FunctionIndex,
2704 Kinds: llvm::Attribute::NoUnwind));
2705}
2706
2707namespace {
2708 struct CallGuardAbort final : EHScopeStack::Cleanup {
2709 llvm::GlobalVariable *Guard;
2710 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2711
2712 void Emit(CodeGenFunction &CGF, Flags flags) override {
2713 CGF.EmitNounwindRuntimeCall(callee: getGuardAbortFn(CGM&: CGF.CGM, GuardPtrTy: Guard->getType()),
2714 args: Guard);
2715 }
2716 };
2717}
2718
2719/// The ARM code here follows the Itanium code closely enough that we
2720/// just special-case it at particular places.
2721void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2722 const VarDecl &D,
2723 llvm::GlobalVariable *var,
2724 bool shouldPerformInit) {
2725 CGBuilderTy &Builder = CGF.Builder;
2726
2727 // Inline variables that weren't instantiated from variable templates have
2728 // partially-ordered initialization within their translation unit.
2729 bool NonTemplateInline =
2730 D.isInline() &&
2731 !isTemplateInstantiation(Kind: D.getTemplateSpecializationKind());
2732
2733 // We only need to use thread-safe statics for local non-TLS variables and
2734 // inline variables; other global initialization is always single-threaded
2735 // or (through lazy dynamic loading in multiple threads) unsequenced.
2736 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2737 (D.isLocalVarDecl() || NonTemplateInline) &&
2738 !D.getTLSKind();
2739
2740 // If we have a global variable with internal linkage and thread-safe statics
2741 // are disabled, we can just let the guard variable be of type i8.
2742 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2743
2744 llvm::IntegerType *guardTy;
2745 CharUnits guardAlignment;
2746 if (useInt8GuardVariable) {
2747 guardTy = CGF.Int8Ty;
2748 guardAlignment = CharUnits::One();
2749 } else {
2750 // Guard variables are 64 bits in the generic ABI and size width on ARM
2751 // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2752 if (UseARMGuardVarABI) {
2753 guardTy = CGF.SizeTy;
2754 guardAlignment = CGF.getSizeAlign();
2755 } else {
2756 guardTy = CGF.Int64Ty;
2757 guardAlignment =
2758 CharUnits::fromQuantity(Quantity: CGM.getDataLayout().getABITypeAlign(Ty: guardTy));
2759 }
2760 }
2761 llvm::PointerType *guardPtrTy = llvm::PointerType::get(
2762 C&: CGF.CGM.getLLVMContext(),
2763 AddressSpace: CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
2764
2765 // Create the guard variable if we don't already have it (as we
2766 // might if we're double-emitting this function body).
2767 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(D: &D);
2768 if (!guard) {
2769 // Mangle the name for the guard.
2770 SmallString<256> guardName;
2771 {
2772 llvm::raw_svector_ostream out(guardName);
2773 getMangleContext().mangleStaticGuardVariable(D: &D, out);
2774 }
2775
2776 // Create the guard variable with a zero-initializer.
2777 // Just absorb linkage, visibility and dll storage class from the guarded
2778 // variable.
2779 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2780 false, var->getLinkage(),
2781 llvm::ConstantInt::get(Ty: guardTy, V: 0),
2782 guardName.str());
2783 guard->setDSOLocal(var->isDSOLocal());
2784 guard->setVisibility(var->getVisibility());
2785 guard->setDLLStorageClass(var->getDLLStorageClass());
2786 // If the variable is thread-local, so is its guard variable.
2787 guard->setThreadLocalMode(var->getThreadLocalMode());
2788 guard->setAlignment(guardAlignment.getAsAlign());
2789
2790 // The ABI says: "It is suggested that it be emitted in the same COMDAT
2791 // group as the associated data object." In practice, this doesn't work for
2792 // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2793 llvm::Comdat *C = var->getComdat();
2794 if (!D.isLocalVarDecl() && C &&
2795 (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2796 CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2797 guard->setComdat(C);
2798 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2799 guard->setComdat(CGM.getModule().getOrInsertComdat(Name: guard->getName()));
2800 }
2801
2802 CGM.setStaticLocalDeclGuardAddress(D: &D, C: guard);
2803 }
2804
2805 Address guardAddr = Address(guard, guard->getValueType(), guardAlignment);
2806
2807 // Test whether the variable has completed initialization.
2808 //
2809 // Itanium C++ ABI 3.3.2:
2810 // The following is pseudo-code showing how these functions can be used:
2811 // if (obj_guard.first_byte == 0) {
2812 // if ( __cxa_guard_acquire (&obj_guard) ) {
2813 // try {
2814 // ... initialize the object ...;
2815 // } catch (...) {
2816 // __cxa_guard_abort (&obj_guard);
2817 // throw;
2818 // }
2819 // ... queue object destructor with __cxa_atexit() ...;
2820 // __cxa_guard_release (&obj_guard);
2821 // }
2822 // }
2823 //
2824 // If threadsafe statics are enabled, but we don't have inline atomics, just
2825 // call __cxa_guard_acquire unconditionally. The "inline" check isn't
2826 // actually inline, and the user might not expect calls to __atomic libcalls.
2827
2828 unsigned MaxInlineWidthInBits = CGF.getTarget().getMaxAtomicInlineWidth();
2829 llvm::BasicBlock *EndBlock = CGF.createBasicBlock(name: "init.end");
2830 if (!threadsafe || MaxInlineWidthInBits) {
2831 // Load the first byte of the guard variable.
2832 llvm::LoadInst *LI =
2833 Builder.CreateLoad(Addr: guardAddr.withElementType(ElemTy: CGM.Int8Ty));
2834
2835 // Itanium ABI:
2836 // An implementation supporting thread-safety on multiprocessor
2837 // systems must also guarantee that references to the initialized
2838 // object do not occur before the load of the initialization flag.
2839 //
2840 // In LLVM, we do this by marking the load Acquire.
2841 if (threadsafe)
2842 LI->setAtomic(Ordering: llvm::AtomicOrdering::Acquire);
2843
2844 // For ARM, we should only check the first bit, rather than the entire byte:
2845 //
2846 // ARM C++ ABI 3.2.3.1:
2847 // To support the potential use of initialization guard variables
2848 // as semaphores that are the target of ARM SWP and LDREX/STREX
2849 // synchronizing instructions we define a static initialization
2850 // guard variable to be a 4-byte aligned, 4-byte word with the
2851 // following inline access protocol.
2852 // #define INITIALIZED 1
2853 // if ((obj_guard & INITIALIZED) != INITIALIZED) {
2854 // if (__cxa_guard_acquire(&obj_guard))
2855 // ...
2856 // }
2857 //
2858 // and similarly for ARM64:
2859 //
2860 // ARM64 C++ ABI 3.2.2:
2861 // This ABI instead only specifies the value bit 0 of the static guard
2862 // variable; all other bits are platform defined. Bit 0 shall be 0 when the
2863 // variable is not initialized and 1 when it is.
2864 llvm::Value *V =
2865 (UseARMGuardVarABI && !useInt8GuardVariable)
2866 ? Builder.CreateAnd(LHS: LI, RHS: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: 1))
2867 : LI;
2868 llvm::Value *NeedsInit = Builder.CreateIsNull(Arg: V, Name: "guard.uninitialized");
2869
2870 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock(name: "init.check");
2871
2872 // Check if the first byte of the guard variable is zero.
2873 CGF.EmitCXXGuardedInitBranch(NeedsInit, InitBlock: InitCheckBlock, NoInitBlock: EndBlock,
2874 Kind: CodeGenFunction::GuardKind::VariableGuard, D: &D);
2875
2876 CGF.EmitBlock(BB: InitCheckBlock);
2877 }
2878
2879 // The semantics of dynamic initialization of variables with static or thread
2880 // storage duration depends on whether they are declared at block-scope. The
2881 // initialization of such variables at block-scope can be aborted with an
2882 // exception and later retried (per C++20 [stmt.dcl]p4), and recursive entry
2883 // to their initialization has undefined behavior (also per C++20
2884 // [stmt.dcl]p4). For such variables declared at non-block scope, exceptions
2885 // lead to termination (per C++20 [except.terminate]p1), and recursive
2886 // references to the variables are governed only by the lifetime rules (per
2887 // C++20 [class.cdtor]p2), which means such references are perfectly fine as
2888 // long as they avoid touching memory. As a result, block-scope variables must
2889 // not be marked as initialized until after initialization completes (unless
2890 // the mark is reverted following an exception), but non-block-scope variables
2891 // must be marked prior to initialization so that recursive accesses during
2892 // initialization do not restart initialization.
2893
2894 // Variables used when coping with thread-safe statics and exceptions.
2895 if (threadsafe) {
2896 // Call __cxa_guard_acquire.
2897 llvm::Value *V
2898 = CGF.EmitNounwindRuntimeCall(callee: getGuardAcquireFn(CGM, GuardPtrTy: guardPtrTy), args: guard);
2899
2900 llvm::BasicBlock *InitBlock = CGF.createBasicBlock(name: "init");
2901
2902 Builder.CreateCondBr(Cond: Builder.CreateIsNotNull(Arg: V, Name: "tobool"),
2903 True: InitBlock, False: EndBlock);
2904
2905 // Call __cxa_guard_abort along the exceptional edge.
2906 CGF.EHStack.pushCleanup<CallGuardAbort>(Kind: EHCleanup, A: guard);
2907
2908 CGF.EmitBlock(BB: InitBlock);
2909 } else if (!D.isLocalVarDecl()) {
2910 // For non-local variables, store 1 into the first byte of the guard
2911 // variable before the object initialization begins so that references
2912 // to the variable during initialization don't restart initialization.
2913 Builder.CreateStore(Val: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: 1),
2914 Addr: guardAddr.withElementType(ElemTy: CGM.Int8Ty));
2915 }
2916
2917 // Emit the initializer and add a global destructor if appropriate.
2918 CGF.EmitCXXGlobalVarDeclInit(D, GV: var, PerformInit: shouldPerformInit);
2919
2920 if (threadsafe) {
2921 // Pop the guard-abort cleanup if we pushed one.
2922 CGF.PopCleanupBlock();
2923
2924 // Call __cxa_guard_release. This cannot throw.
2925 CGF.EmitNounwindRuntimeCall(callee: getGuardReleaseFn(CGM, GuardPtrTy: guardPtrTy),
2926 args: guardAddr.emitRawPointer(CGF));
2927 } else if (D.isLocalVarDecl()) {
2928 // For local variables, store 1 into the first byte of the guard variable
2929 // after the object initialization completes so that initialization is
2930 // retried if initialization is interrupted by an exception.
2931 Builder.CreateStore(Val: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: 1),
2932 Addr: guardAddr.withElementType(ElemTy: CGM.Int8Ty));
2933 }
2934
2935 CGF.EmitBlock(BB: EndBlock);
2936}
2937
2938/// Register a global destructor using __cxa_atexit.
2939static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2940 llvm::FunctionCallee dtor,
2941 llvm::Constant *addr, bool TLS) {
2942 assert(!CGF.getTarget().getTriple().isOSAIX() &&
2943 "unexpected call to emitGlobalDtorWithCXAAtExit");
2944 assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2945 "__cxa_atexit is disabled");
2946 const char *Name = "__cxa_atexit";
2947 if (TLS) {
2948 const llvm::Triple &T = CGF.getTarget().getTriple();
2949 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit";
2950 }
2951
2952 // We're assuming that the destructor function is something we can
2953 // reasonably call with the default CC.
2954 llvm::Type *dtorTy = CGF.DefaultPtrTy;
2955
2956 // Preserve address space of addr.
2957 auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
2958 auto AddrPtrTy = AddrAS ? llvm::PointerType::get(C&: CGF.getLLVMContext(), AddressSpace: AddrAS)
2959 : CGF.Int8PtrTy;
2960
2961 // Create a variable that binds the atexit to this shared object.
2962 llvm::Constant *handle =
2963 CGF.CGM.CreateRuntimeVariable(Ty: CGF.Int8Ty, Name: "__dso_handle");
2964 auto *GV = cast<llvm::GlobalValue>(Val: handle->stripPointerCasts());
2965 GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2966
2967 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2968 llvm::Type *paramTys[] = {dtorTy, AddrPtrTy, handle->getType()};
2969 llvm::FunctionType *atexitTy =
2970 llvm::FunctionType::get(Result: CGF.IntTy, Params: paramTys, isVarArg: false);
2971
2972 // Fetch the actual function.
2973 llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(Ty: atexitTy, Name);
2974 if (llvm::Function *fn = dyn_cast<llvm::Function>(Val: atexit.getCallee()))
2975 fn->setDoesNotThrow();
2976
2977 const auto &Context = CGF.CGM.getContext();
2978 FunctionProtoType::ExtProtoInfo EPI(Context.getDefaultCallingConvention(
2979 /*IsVariadic=*/false, /*IsCXXMethod=*/false));
2980 QualType fnType =
2981 Context.getFunctionType(ResultTy: Context.VoidTy, Args: {Context.VoidPtrTy}, EPI);
2982 llvm::Value *dtorCallee = dtor.getCallee();
2983 dtorCallee =
2984 CGF.CGM.getFunctionPointer(Pointer: cast<llvm::Constant>(Val: dtorCallee), FunctionType: fnType);
2985
2986 if (dtorCallee->getType()->getPointerAddressSpace() != AddrAS)
2987 dtorCallee = CGF.performAddrSpaceCast(Src: dtorCallee, DestTy: AddrPtrTy);
2988
2989 if (!addr)
2990 // addr is null when we are trying to register a dtor annotated with
2991 // __attribute__((destructor)) in a constructor function. Using null here is
2992 // okay because this argument is just passed back to the destructor
2993 // function.
2994 addr = llvm::Constant::getNullValue(Ty: CGF.Int8PtrTy);
2995
2996 llvm::Value *args[] = {dtorCallee, addr, handle};
2997 CGF.EmitNounwindRuntimeCall(callee: atexit, args);
2998}
2999
3000static llvm::Function *createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM,
3001 StringRef FnName) {
3002 // Create a function that registers/unregisters destructors that have the same
3003 // priority.
3004 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: CGM.VoidTy, isVarArg: false);
3005 llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction(
3006 ty: FTy, name: FnName, FI: CGM.getTypes().arrangeNullaryFunction(), Loc: SourceLocation());
3007
3008 return GlobalInitOrCleanupFn;
3009}
3010
3011void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
3012 for (const auto &I : DtorsUsingAtExit) {
3013 int Priority = I.first;
3014 std::string GlobalCleanupFnName =
3015 std::string("__GLOBAL_cleanup_") + llvm::to_string(Value: Priority);
3016
3017 llvm::Function *GlobalCleanupFn =
3018 createGlobalInitOrCleanupFn(CGM&: *this, FnName: GlobalCleanupFnName);
3019
3020 CodeGenFunction CGF(*this);
3021 CGF.StartFunction(GD: GlobalDecl(), RetTy: getContext().VoidTy, Fn: GlobalCleanupFn,
3022 FnInfo: getTypes().arrangeNullaryFunction(), Args: FunctionArgList(),
3023 Loc: SourceLocation(), StartLoc: SourceLocation());
3024 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
3025
3026 // Get the destructor function type, void(*)(void).
3027 llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(Result: CGF.VoidTy, isVarArg: false);
3028
3029 // Destructor functions are run/unregistered in non-ascending
3030 // order of their priorities.
3031 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
3032 auto itv = Dtors.rbegin();
3033 while (itv != Dtors.rend()) {
3034 llvm::Function *Dtor = *itv;
3035
3036 // We're assuming that the destructor function is something we can
3037 // reasonably call with the correct CC.
3038 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub: Dtor);
3039 llvm::Value *NeedsDestruct =
3040 CGF.Builder.CreateIsNull(Arg: V, Name: "needs_destruct");
3041
3042 llvm::BasicBlock *DestructCallBlock =
3043 CGF.createBasicBlock(name: "destruct.call");
3044 llvm::BasicBlock *EndBlock = CGF.createBasicBlock(
3045 name: (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end");
3046 // Check if unatexit returns a value of 0. If it does, jump to
3047 // DestructCallBlock, otherwise jump to EndBlock directly.
3048 CGF.Builder.CreateCondBr(Cond: NeedsDestruct, True: DestructCallBlock, False: EndBlock);
3049
3050 CGF.EmitBlock(BB: DestructCallBlock);
3051
3052 // Emit the call to casted Dtor.
3053 llvm::CallInst *CI = CGF.Builder.CreateCall(FTy: dtorFuncTy, Callee: Dtor);
3054 // Make sure the call and the callee agree on calling convention.
3055 CI->setCallingConv(Dtor->getCallingConv());
3056
3057 CGF.EmitBlock(BB: EndBlock);
3058
3059 itv++;
3060 }
3061
3062 CGF.FinishFunction();
3063 AddGlobalDtor(Dtor: GlobalCleanupFn, Priority);
3064 }
3065}
3066
3067void CodeGenModule::registerGlobalDtorsWithAtExit() {
3068 for (const auto &I : DtorsUsingAtExit) {
3069 int Priority = I.first;
3070 std::string GlobalInitFnName =
3071 std::string("__GLOBAL_init_") + llvm::to_string(Value: Priority);
3072 llvm::Function *GlobalInitFn =
3073 createGlobalInitOrCleanupFn(CGM&: *this, FnName: GlobalInitFnName);
3074
3075 CodeGenFunction CGF(*this);
3076 CGF.StartFunction(GD: GlobalDecl(), RetTy: getContext().VoidTy, Fn: GlobalInitFn,
3077 FnInfo: getTypes().arrangeNullaryFunction(), Args: FunctionArgList(),
3078 Loc: SourceLocation(), StartLoc: SourceLocation());
3079 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
3080
3081 // Since constructor functions are run in non-descending order of their
3082 // priorities, destructors are registered in non-descending order of their
3083 // priorities, and since destructor functions are run in the reverse order
3084 // of their registration, destructor functions are run in non-ascending
3085 // order of their priorities.
3086 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
3087 for (auto *Dtor : Dtors) {
3088 // Register the destructor function calling __cxa_atexit if it is
3089 // available. Otherwise fall back on calling atexit.
3090 if (getCodeGenOpts().CXAAtExit) {
3091 emitGlobalDtorWithCXAAtExit(CGF, dtor: Dtor, addr: nullptr, TLS: false);
3092 } else {
3093 // We're assuming that the destructor function is something we can
3094 // reasonably call with the correct CC.
3095 CGF.registerGlobalDtorWithAtExit(dtorStub: Dtor);
3096 }
3097 }
3098
3099 CGF.FinishFunction();
3100 AddGlobalCtor(Ctor: GlobalInitFn, Priority);
3101 }
3102
3103 if (getCXXABI().useSinitAndSterm())
3104 unregisterGlobalDtorsWithUnAtExit();
3105}
3106
3107/// Register a global destructor as best as we know how.
3108void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
3109 llvm::FunctionCallee dtor,
3110 llvm::Constant *addr) {
3111 if (D.isNoDestroy(CGM.getContext()))
3112 return;
3113
3114 // HLSL doesn't support atexit.
3115 if (CGM.getLangOpts().HLSL)
3116 return CGM.AddCXXDtorEntry(DtorFn: dtor, Object: addr);
3117
3118 // OpenMP offloading supports C++ constructors and destructors but we do not
3119 // always have 'atexit' available. Instead lower these to use the LLVM global
3120 // destructors which we can handle directly in the runtime. Note that this is
3121 // not strictly 1-to-1 with using `atexit` because we no longer tear down
3122 // globals in reverse order of when they were constructed.
3123 if (!CGM.getLangOpts().hasAtExit() && !D.isStaticLocal())
3124 return CGF.registerGlobalDtorWithLLVM(D, fn: dtor, addr);
3125
3126 // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
3127 // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
3128 // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
3129 // We can always use __cxa_thread_atexit.
3130 if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
3131 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, TLS: D.getTLSKind());
3132
3133 // In Apple kexts, we want to add a global destructor entry.
3134 // FIXME: shouldn't this be guarded by some variable?
3135 if (CGM.getLangOpts().AppleKext) {
3136 // Generate a global destructor entry.
3137 return CGM.AddCXXDtorEntry(DtorFn: dtor, Object: addr);
3138 }
3139
3140 CGF.registerGlobalDtorWithAtExit(D, fn: dtor, addr);
3141}
3142
3143static bool isThreadWrapperReplaceable(const VarDecl *VD,
3144 CodeGen::CodeGenModule &CGM) {
3145 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
3146 // Darwin prefers to have references to thread local variables to go through
3147 // the thread wrapper instead of directly referencing the backing variable.
3148 return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
3149 CGM.getTarget().getTriple().isOSDarwin();
3150}
3151
3152/// Get the appropriate linkage for the wrapper function. This is essentially
3153/// the weak form of the variable's linkage; every translation unit which needs
3154/// the wrapper emits a copy, and we want the linker to merge them.
3155static llvm::GlobalValue::LinkageTypes
3156getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
3157 llvm::GlobalValue::LinkageTypes VarLinkage =
3158 CGM.getLLVMLinkageVarDefinition(VD);
3159
3160 // For internal linkage variables, we don't need an external or weak wrapper.
3161 if (llvm::GlobalValue::isLocalLinkage(Linkage: VarLinkage))
3162 return VarLinkage;
3163
3164 // If the thread wrapper is replaceable, give it appropriate linkage.
3165 if (isThreadWrapperReplaceable(VD, CGM))
3166 if (!llvm::GlobalVariable::isLinkOnceLinkage(Linkage: VarLinkage) &&
3167 !llvm::GlobalVariable::isWeakODRLinkage(Linkage: VarLinkage))
3168 return VarLinkage;
3169 return llvm::GlobalValue::WeakODRLinkage;
3170}
3171
3172llvm::Function *
3173ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
3174 llvm::Value *Val) {
3175 // Mangle the name for the thread_local wrapper function.
3176 SmallString<256> WrapperName;
3177 {
3178 llvm::raw_svector_ostream Out(WrapperName);
3179 getMangleContext().mangleItaniumThreadLocalWrapper(D: VD, Out);
3180 }
3181
3182 // FIXME: If VD is a definition, we should regenerate the function attributes
3183 // before returning.
3184 if (llvm::Value *V = CGM.getModule().getNamedValue(Name: WrapperName))
3185 return cast<llvm::Function>(Val: V);
3186
3187 QualType RetQT = VD->getType();
3188 if (RetQT->isReferenceType())
3189 RetQT = RetQT.getNonReferenceType();
3190
3191 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
3192 resultType: getContext().getPointerType(T: RetQT), args: FunctionArgList());
3193
3194 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(Info: FI);
3195 llvm::Function *Wrapper =
3196 llvm::Function::Create(Ty: FnTy, Linkage: getThreadLocalWrapperLinkage(VD, CGM),
3197 N: WrapperName.str(), M: &CGM.getModule());
3198
3199 if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker())
3200 Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Name: Wrapper->getName()));
3201
3202 CGM.SetLLVMFunctionAttributes(GD: GlobalDecl(), Info: FI, F: Wrapper, /*IsThunk=*/false);
3203
3204 // Always resolve references to the wrapper at link time.
3205 if (!Wrapper->hasLocalLinkage())
3206 if (!isThreadWrapperReplaceable(VD, CGM) ||
3207 llvm::GlobalVariable::isLinkOnceLinkage(Linkage: Wrapper->getLinkage()) ||
3208 llvm::GlobalVariable::isWeakODRLinkage(Linkage: Wrapper->getLinkage()) ||
3209 VD->getVisibility() == HiddenVisibility)
3210 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
3211
3212 if (isThreadWrapperReplaceable(VD, CGM)) {
3213 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3214 Wrapper->addFnAttr(Kind: llvm::Attribute::NoUnwind);
3215 }
3216
3217 ThreadWrappers.push_back(Elt: {VD, Wrapper});
3218 return Wrapper;
3219}
3220
3221void ItaniumCXXABI::EmitThreadLocalInitFuncs(
3222 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
3223 ArrayRef<llvm::Function *> CXXThreadLocalInits,
3224 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
3225 llvm::Function *InitFunc = nullptr;
3226
3227 // Separate initializers into those with ordered (or partially-ordered)
3228 // initialization and those with unordered initialization.
3229 llvm::SmallVector<llvm::Function *, 8> OrderedInits;
3230 llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
3231 for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
3232 if (isTemplateInstantiation(
3233 Kind: CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
3234 UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
3235 CXXThreadLocalInits[I];
3236 else
3237 OrderedInits.push_back(Elt: CXXThreadLocalInits[I]);
3238 }
3239
3240 if (!OrderedInits.empty()) {
3241 // Generate a guarded initialization function.
3242 llvm::FunctionType *FTy =
3243 llvm::FunctionType::get(Result: CGM.VoidTy, /*isVarArg=*/false);
3244 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
3245 InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(ty: FTy, name: "__tls_init", FI,
3246 Loc: SourceLocation(),
3247 /*TLS=*/true);
3248 llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
3249 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
3250 llvm::GlobalVariable::InternalLinkage,
3251 llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: 0), "__tls_guard");
3252 Guard->setThreadLocal(true);
3253 Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
3254
3255 CharUnits GuardAlign = CharUnits::One();
3256 Guard->setAlignment(GuardAlign.getAsAlign());
3257
3258 CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
3259 Fn: InitFunc, CXXThreadLocals: OrderedInits, Guard: ConstantAddress(Guard, CGM.Int8Ty, GuardAlign));
3260 // On Darwin platforms, use CXX_FAST_TLS calling convention.
3261 if (CGM.getTarget().getTriple().isOSDarwin()) {
3262 InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3263 InitFunc->addFnAttr(Kind: llvm::Attribute::NoUnwind);
3264 }
3265 }
3266
3267 // Create declarations for thread wrappers for all thread-local variables
3268 // with non-discardable definitions in this translation unit.
3269 for (const VarDecl *VD : CXXThreadLocals) {
3270 if (VD->hasDefinition() &&
3271 !isDiscardableGVALinkage(L: getContext().GetGVALinkageForVariable(VD))) {
3272 llvm::GlobalValue *GV = CGM.GetGlobalValue(Ref: CGM.getMangledName(GD: VD));
3273 getOrCreateThreadLocalWrapper(VD, Val: GV);
3274 }
3275 }
3276
3277 // Emit all referenced thread wrappers.
3278 for (auto VDAndWrapper : ThreadWrappers) {
3279 const VarDecl *VD = VDAndWrapper.first;
3280 llvm::GlobalVariable *Var =
3281 cast<llvm::GlobalVariable>(Val: CGM.GetGlobalValue(Ref: CGM.getMangledName(GD: VD)));
3282 llvm::Function *Wrapper = VDAndWrapper.second;
3283
3284 // Some targets require that all access to thread local variables go through
3285 // the thread wrapper. This means that we cannot attempt to create a thread
3286 // wrapper or a thread helper.
3287 if (!VD->hasDefinition()) {
3288 if (isThreadWrapperReplaceable(VD, CGM)) {
3289 Wrapper->setLinkage(llvm::Function::ExternalLinkage);
3290 continue;
3291 }
3292
3293 // If this isn't a TU in which this variable is defined, the thread
3294 // wrapper is discardable.
3295 if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
3296 Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
3297 }
3298
3299 CGM.SetLLVMFunctionAttributesForDefinition(D: nullptr, F: Wrapper);
3300
3301 // Mangle the name for the thread_local initialization function.
3302 SmallString<256> InitFnName;
3303 {
3304 llvm::raw_svector_ostream Out(InitFnName);
3305 getMangleContext().mangleItaniumThreadLocalInit(D: VD, Out);
3306 }
3307
3308 llvm::FunctionType *InitFnTy = llvm::FunctionType::get(Result: CGM.VoidTy, isVarArg: false);
3309
3310 // If we have a definition for the variable, emit the initialization
3311 // function as an alias to the global Init function (if any). Otherwise,
3312 // produce a declaration of the initialization function.
3313 llvm::GlobalValue *Init = nullptr;
3314 bool InitIsInitFunc = false;
3315 bool HasConstantInitialization = false;
3316 if (!usesThreadWrapperFunction(VD)) {
3317 HasConstantInitialization = true;
3318 } else if (VD->hasDefinition()) {
3319 InitIsInitFunc = true;
3320 llvm::Function *InitFuncToUse = InitFunc;
3321 if (isTemplateInstantiation(Kind: VD->getTemplateSpecializationKind()))
3322 InitFuncToUse = UnorderedInits.lookup(Val: VD->getCanonicalDecl());
3323 if (InitFuncToUse)
3324 Init = llvm::GlobalAlias::create(Linkage: Var->getLinkage(), Name: InitFnName.str(),
3325 Aliasee: InitFuncToUse);
3326 } else {
3327 // Emit a weak global function referring to the initialization function.
3328 // This function will not exist if the TU defining the thread_local
3329 // variable in question does not need any dynamic initialization for
3330 // its thread_local variables.
3331 Init = llvm::Function::Create(Ty: InitFnTy,
3332 Linkage: llvm::GlobalVariable::ExternalWeakLinkage,
3333 N: InitFnName.str(), M: &CGM.getModule());
3334 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
3335 CGM.SetLLVMFunctionAttributes(
3336 GD: GlobalDecl(), Info: FI, F: cast<llvm::Function>(Val: Init), /*IsThunk=*/false);
3337 }
3338
3339 if (Init) {
3340 Init->setVisibility(Var->getVisibility());
3341 // Don't mark an extern_weak function DSO local on windows.
3342 if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage())
3343 Init->setDSOLocal(Var->isDSOLocal());
3344 }
3345
3346 llvm::LLVMContext &Context = CGM.getModule().getContext();
3347
3348 // The linker on AIX is not happy with missing weak symbols. However,
3349 // other TUs will not know whether the initialization routine exists
3350 // so create an empty, init function to satisfy the linker.
3351 // This is needed whenever a thread wrapper function is not used, and
3352 // also when the symbol is weak.
3353 if (CGM.getTriple().isOSAIX() && VD->hasDefinition() &&
3354 isEmittedWithConstantInitializer(VD, InspectInitForWeakDef: true) &&
3355 !mayNeedDestruction(VD)) {
3356 // Init should be null. If it were non-null, then the logic above would
3357 // either be defining the function to be an alias or declaring the
3358 // function with the expectation that the definition of the variable
3359 // is elsewhere.
3360 assert(Init == nullptr && "Expected Init to be null.");
3361
3362 llvm::Function *Func = llvm::Function::Create(
3363 Ty: InitFnTy, Linkage: Var->getLinkage(), N: InitFnName.str(), M: &CGM.getModule());
3364 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
3365 CGM.SetLLVMFunctionAttributes(GD: GlobalDecl(), Info: FI,
3366 F: cast<llvm::Function>(Val: Func),
3367 /*IsThunk=*/false);
3368 // Create a function body that just returns
3369 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, Name: "", Parent: Func);
3370 CGBuilderTy Builder(CGM, Entry);
3371 Builder.CreateRetVoid();
3372 }
3373
3374 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, Name: "", Parent: Wrapper);
3375 CGBuilderTy Builder(CGM, Entry);
3376 if (HasConstantInitialization) {
3377 // No dynamic initialization to invoke.
3378 } else if (InitIsInitFunc) {
3379 if (Init) {
3380 llvm::CallInst *CallVal = Builder.CreateCall(FTy: InitFnTy, Callee: Init);
3381 if (isThreadWrapperReplaceable(VD, CGM)) {
3382 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3383 llvm::Function *Fn =
3384 cast<llvm::Function>(Val: cast<llvm::GlobalAlias>(Val: Init)->getAliasee());
3385 Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3386 }
3387 }
3388 } else if (CGM.getTriple().isOSAIX()) {
3389 // On AIX, except if constinit and also neither of class type or of
3390 // (possibly multi-dimensional) array of class type, thread_local vars
3391 // will have init routines regardless of whether they are
3392 // const-initialized. Since the routine is guaranteed to exist, we can
3393 // unconditionally call it without testing for its existance. This
3394 // avoids potentially unresolved weak symbols which the AIX linker
3395 // isn't happy with.
3396 Builder.CreateCall(FTy: InitFnTy, Callee: Init);
3397 } else {
3398 // Don't know whether we have an init function. Call it if it exists.
3399 llvm::Value *Have = Builder.CreateIsNotNull(Arg: Init);
3400 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, Name: "", Parent: Wrapper);
3401 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, Name: "", Parent: Wrapper);
3402 Builder.CreateCondBr(Cond: Have, True: InitBB, False: ExitBB);
3403
3404 Builder.SetInsertPoint(InitBB);
3405 Builder.CreateCall(FTy: InitFnTy, Callee: Init);
3406 Builder.CreateBr(Dest: ExitBB);
3407
3408 Builder.SetInsertPoint(ExitBB);
3409 }
3410
3411 // For a reference, the result of the wrapper function is a pointer to
3412 // the referenced object.
3413 llvm::Value *Val = Builder.CreateThreadLocalAddress(Ptr: Var);
3414
3415 if (VD->getType()->isReferenceType()) {
3416 CharUnits Align = CGM.getContext().getDeclAlign(D: VD);
3417 Val = Builder.CreateAlignedLoad(Ty: Var->getValueType(), Addr: Val, Align);
3418 }
3419 Val = Builder.CreateAddrSpaceCast(V: Val, DestTy: Wrapper->getReturnType());
3420
3421 Builder.CreateRet(V: Val);
3422 }
3423}
3424
3425LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
3426 const VarDecl *VD,
3427 QualType LValType) {
3428 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(D: VD);
3429 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
3430
3431 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Callee: Wrapper);
3432 CallVal->setCallingConv(Wrapper->getCallingConv());
3433
3434 LValue LV;
3435 if (VD->getType()->isReferenceType())
3436 LV = CGF.MakeNaturalAlignRawAddrLValue(V: CallVal, T: LValType);
3437 else
3438 LV = CGF.MakeRawAddrLValue(V: CallVal, T: LValType,
3439 Alignment: CGF.getContext().getDeclAlign(D: VD));
3440 // FIXME: need setObjCGCLValueClass?
3441 return LV;
3442}
3443
3444/// Return whether the given global decl needs a VTT parameter, which it does
3445/// if it's a base constructor or destructor with virtual bases.
3446bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
3447 const CXXMethodDecl *MD = cast<CXXMethodDecl>(Val: GD.getDecl());
3448
3449 // We don't have any virtual bases, just return early.
3450 if (!MD->getParent()->getNumVBases())
3451 return false;
3452
3453 // Check if we have a base constructor.
3454 if (isa<CXXConstructorDecl>(Val: MD) && GD.getCtorType() == Ctor_Base)
3455 return true;
3456
3457 // Check if we have a base destructor.
3458 if (isa<CXXDestructorDecl>(Val: MD) && GD.getDtorType() == Dtor_Base)
3459 return true;
3460
3461 return false;
3462}
3463
3464llvm::Constant *
3465ItaniumCXXABI::getOrCreateVirtualFunctionPointerThunk(const CXXMethodDecl *MD) {
3466 SmallString<256> MethodName;
3467 llvm::raw_svector_ostream Out(MethodName);
3468 getMangleContext().mangleCXXName(GD: MD, Out);
3469 MethodName += "_vfpthunk_";
3470 StringRef ThunkName = MethodName.str();
3471 llvm::Function *ThunkFn;
3472 if ((ThunkFn = cast_or_null<llvm::Function>(
3473 Val: CGM.getModule().getNamedValue(Name: ThunkName))))
3474 return ThunkFn;
3475
3476 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeCXXMethodDeclaration(MD);
3477 llvm::FunctionType *ThunkTy = CGM.getTypes().GetFunctionType(Info: FnInfo);
3478 llvm::GlobalValue::LinkageTypes Linkage =
3479 MD->isExternallyVisible() ? llvm::GlobalValue::LinkOnceODRLinkage
3480 : llvm::GlobalValue::InternalLinkage;
3481 ThunkFn =
3482 llvm::Function::Create(Ty: ThunkTy, Linkage, N: ThunkName, M: &CGM.getModule());
3483 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
3484 ThunkFn->setVisibility(llvm::GlobalValue::HiddenVisibility);
3485 assert(ThunkFn->getName() == ThunkName && "name was uniqued!");
3486
3487 CGM.SetLLVMFunctionAttributes(GD: MD, Info: FnInfo, F: ThunkFn, /*IsThunk=*/true);
3488 CGM.SetLLVMFunctionAttributesForDefinition(D: MD, F: ThunkFn);
3489
3490 // Stack protection sometimes gets inserted after the musttail call.
3491 ThunkFn->removeFnAttr(Kind: llvm::Attribute::StackProtect);
3492 ThunkFn->removeFnAttr(Kind: llvm::Attribute::StackProtectStrong);
3493 ThunkFn->removeFnAttr(Kind: llvm::Attribute::StackProtectReq);
3494
3495 // Start codegen.
3496 CodeGenFunction CGF(CGM);
3497 CGF.CurGD = GlobalDecl(MD);
3498 CGF.CurFuncIsThunk = true;
3499
3500 // Build FunctionArgs.
3501 FunctionArgList FunctionArgs;
3502 CGF.BuildFunctionArgList(GD: CGF.CurGD, Args&: FunctionArgs);
3503
3504 CGF.StartFunction(GD: GlobalDecl(), RetTy: FnInfo.getReturnType(), Fn: ThunkFn, FnInfo,
3505 Args: FunctionArgs, Loc: MD->getLocation(), StartLoc: SourceLocation());
3506
3507 // Emit an artificial location for this function.
3508 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
3509
3510 llvm::Value *ThisVal = loadIncomingCXXThis(CGF);
3511 setCXXABIThisValue(CGF, ThisPtr: ThisVal);
3512
3513 CallArgList CallArgs;
3514 for (const VarDecl *VD : FunctionArgs)
3515 CGF.EmitDelegateCallArg(args&: CallArgs, param: VD, loc: SourceLocation());
3516
3517 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
3518 RequiredArgs Required = RequiredArgs::forPrototypePlus(prototype: FPT, /*this*/ additional: 1);
3519 const CGFunctionInfo &CallInfo =
3520 CGM.getTypes().arrangeCXXMethodCall(args: CallArgs, type: FPT, required: Required, numPrefixArgs: 0);
3521 CGCallee Callee = CGCallee::forVirtual(CE: nullptr, MD: GlobalDecl(MD),
3522 Addr: getThisAddress(CGF), FTy: ThunkTy);
3523 llvm::CallBase *CallOrInvoke;
3524 CGF.EmitCall(CallInfo, Callee, ReturnValue: ReturnValueSlot(), Args: CallArgs, CallOrInvoke: &CallOrInvoke,
3525 /*IsMustTail=*/true, Loc: SourceLocation(), IsVirtualFunctionPointerThunk: true);
3526 auto *Call = cast<llvm::CallInst>(Val: CallOrInvoke);
3527 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
3528 if (Call->getType()->isVoidTy())
3529 CGF.Builder.CreateRetVoid();
3530 else
3531 CGF.Builder.CreateRet(V: Call);
3532
3533 // Finish the function to maintain CodeGenFunction invariants.
3534 // FIXME: Don't emit unreachable code.
3535 CGF.EmitBlock(BB: CGF.createBasicBlock());
3536 CGF.FinishFunction();
3537 return ThunkFn;
3538}
3539
3540namespace {
3541class ItaniumRTTIBuilder {
3542 CodeGenModule &CGM; // Per-module state.
3543 llvm::LLVMContext &VMContext;
3544 const ItaniumCXXABI &CXXABI; // Per-module state.
3545
3546 /// Fields - The fields of the RTTI descriptor currently being built.
3547 SmallVector<llvm::Constant *, 16> Fields;
3548
3549 /// GetAddrOfTypeName - Returns the mangled type name of the given type.
3550 llvm::GlobalVariable *
3551 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
3552
3553 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
3554 /// descriptor of the given type.
3555 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
3556
3557 /// BuildVTablePointer - Build the vtable pointer for the given type.
3558 void BuildVTablePointer(const Type *Ty, llvm::Constant *StorageAddress);
3559
3560 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3561 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
3562 void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
3563
3564 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3565 /// classes with bases that do not satisfy the abi::__si_class_type_info
3566 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3567 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
3568
3569 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
3570 /// for pointer types.
3571 void BuildPointerTypeInfo(QualType PointeeTy);
3572
3573 /// BuildObjCObjectTypeInfo - Build the appropriate kind of
3574 /// type_info for an object type.
3575 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
3576
3577 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3578 /// struct, used for member pointer types.
3579 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
3580
3581public:
3582 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
3583 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
3584
3585 // Pointer type info flags.
3586 enum {
3587 /// PTI_Const - Type has const qualifier.
3588 PTI_Const = 0x1,
3589
3590 /// PTI_Volatile - Type has volatile qualifier.
3591 PTI_Volatile = 0x2,
3592
3593 /// PTI_Restrict - Type has restrict qualifier.
3594 PTI_Restrict = 0x4,
3595
3596 /// PTI_Incomplete - Type is incomplete.
3597 PTI_Incomplete = 0x8,
3598
3599 /// PTI_ContainingClassIncomplete - Containing class is incomplete.
3600 /// (in pointer to member).
3601 PTI_ContainingClassIncomplete = 0x10,
3602
3603 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
3604 //PTI_TransactionSafe = 0x20,
3605
3606 /// PTI_Noexcept - Pointee is noexcept function (C++1z).
3607 PTI_Noexcept = 0x40,
3608 };
3609
3610 // VMI type info flags.
3611 enum {
3612 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
3613 VMI_NonDiamondRepeat = 0x1,
3614
3615 /// VMI_DiamondShaped - Class is diamond shaped.
3616 VMI_DiamondShaped = 0x2
3617 };
3618
3619 // Base class type info flags.
3620 enum {
3621 /// BCTI_Virtual - Base class is virtual.
3622 BCTI_Virtual = 0x1,
3623
3624 /// BCTI_Public - Base class is public.
3625 BCTI_Public = 0x2
3626 };
3627
3628 /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
3629 /// link to an existing RTTI descriptor if one already exists.
3630 llvm::Constant *BuildTypeInfo(QualType Ty);
3631
3632 /// BuildTypeInfo - Build the RTTI type info struct for the given type.
3633 llvm::Constant *BuildTypeInfo(
3634 QualType Ty,
3635 llvm::GlobalVariable::LinkageTypes Linkage,
3636 llvm::GlobalValue::VisibilityTypes Visibility,
3637 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
3638};
3639}
3640
3641llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
3642 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
3643 SmallString<256> Name;
3644 llvm::raw_svector_ostream Out(Name);
3645 CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(T: Ty, Out);
3646
3647 // We know that the mangled name of the type starts at index 4 of the
3648 // mangled name of the typename, so we can just index into it in order to
3649 // get the mangled name of the type.
3650 llvm::Constant *Init;
3651 if (CGM.getTriple().isOSzOS()) {
3652 // On z/OS, typename is stored as 2 encodings: EBCDIC followed by ASCII.
3653 SmallString<256> DualEncodedName;
3654 llvm::ConverterEBCDIC::convertToEBCDIC(Source: Name.substr(Start: 4), Result&: DualEncodedName);
3655 DualEncodedName += '\0';
3656 DualEncodedName += Name.substr(Start: 4);
3657 Init = llvm::ConstantDataArray::getString(Context&: VMContext, Initializer: DualEncodedName);
3658 } else
3659 Init = llvm::ConstantDataArray::getString(Context&: VMContext, Initializer: Name.substr(Start: 4));
3660
3661 auto Align = CGM.getContext().getTypeAlignInChars(T: CGM.getContext().CharTy);
3662
3663 llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
3664 Name, Ty: Init->getType(), Linkage, Alignment: Align.getAsAlign());
3665
3666 GV->setInitializer(Init);
3667
3668 return GV;
3669}
3670
3671llvm::Constant *
3672ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
3673 // Mangle the RTTI name.
3674 SmallString<256> Name;
3675 llvm::raw_svector_ostream Out(Name);
3676 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(T: Ty, Out);
3677
3678 // Look for an existing global.
3679 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
3680
3681 if (!GV) {
3682 // Create a new global variable.
3683 // Note for the future: If we would ever like to do deferred emission of
3684 // RTTI, check if emitting vtables opportunistically need any adjustment.
3685
3686 GV = new llvm::GlobalVariable(
3687 CGM.getModule(), CGM.GlobalsInt8PtrTy,
3688 /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr, Name);
3689 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
3690 CGM.setGVProperties(GV, D: RD);
3691 // Import the typeinfo symbol when all non-inline virtual methods are
3692 // imported.
3693 if (CGM.getTarget().hasPS4DLLImportExport()) {
3694 if (RD && CXXRecordNonInlineHasAttr<DLLImportAttr>(RD)) {
3695 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
3696 CGM.setDSOLocal(GV);
3697 }
3698 }
3699 }
3700
3701 return GV;
3702}
3703
3704/// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
3705/// info for that type is defined in the standard library.
3706static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
3707 // Itanium C++ ABI 2.9.2:
3708 // Basic type information (e.g. for "int", "bool", etc.) will be kept in
3709 // the run-time support library. Specifically, the run-time support
3710 // library should contain type_info objects for the types X, X* and
3711 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
3712 // unsigned char, signed char, short, unsigned short, int, unsigned int,
3713 // long, unsigned long, long long, unsigned long long, float, double,
3714 // long double, char16_t, char32_t, and the IEEE 754r decimal and
3715 // half-precision floating point types.
3716 //
3717 // GCC also emits RTTI for __int128.
3718 // FIXME: We do not emit RTTI information for decimal types here.
3719
3720 // Types added here must also be added to EmitFundamentalRTTIDescriptors.
3721 switch (Ty->getKind()) {
3722 case BuiltinType::Void:
3723 case BuiltinType::NullPtr:
3724 case BuiltinType::Bool:
3725 case BuiltinType::WChar_S:
3726 case BuiltinType::WChar_U:
3727 case BuiltinType::Char_U:
3728 case BuiltinType::Char_S:
3729 case BuiltinType::UChar:
3730 case BuiltinType::SChar:
3731 case BuiltinType::Short:
3732 case BuiltinType::UShort:
3733 case BuiltinType::Int:
3734 case BuiltinType::UInt:
3735 case BuiltinType::Long:
3736 case BuiltinType::ULong:
3737 case BuiltinType::LongLong:
3738 case BuiltinType::ULongLong:
3739 case BuiltinType::Half:
3740 case BuiltinType::Float:
3741 case BuiltinType::Double:
3742 case BuiltinType::LongDouble:
3743 case BuiltinType::Float16:
3744 case BuiltinType::Float128:
3745 case BuiltinType::Ibm128:
3746 case BuiltinType::Char8:
3747 case BuiltinType::Char16:
3748 case BuiltinType::Char32:
3749 case BuiltinType::Int128:
3750 case BuiltinType::UInt128:
3751 return true;
3752
3753#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3754 case BuiltinType::Id:
3755#include "clang/Basic/OpenCLImageTypes.def"
3756#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3757 case BuiltinType::Id:
3758#include "clang/Basic/OpenCLExtensionTypes.def"
3759 case BuiltinType::OCLSampler:
3760 case BuiltinType::OCLEvent:
3761 case BuiltinType::OCLClkEvent:
3762 case BuiltinType::OCLQueue:
3763 case BuiltinType::OCLReserveID:
3764#define SVE_TYPE(Name, Id, SingletonId) \
3765 case BuiltinType::Id:
3766#include "clang/Basic/AArch64ACLETypes.def"
3767#define PPC_VECTOR_TYPE(Name, Id, Size) \
3768 case BuiltinType::Id:
3769#include "clang/Basic/PPCTypes.def"
3770#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3771#include "clang/Basic/RISCVVTypes.def"
3772#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3773#include "clang/Basic/WebAssemblyReferenceTypes.def"
3774#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id:
3775#include "clang/Basic/AMDGPUTypes.def"
3776#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3777#include "clang/Basic/HLSLIntangibleTypes.def"
3778 case BuiltinType::ShortAccum:
3779 case BuiltinType::Accum:
3780 case BuiltinType::LongAccum:
3781 case BuiltinType::UShortAccum:
3782 case BuiltinType::UAccum:
3783 case BuiltinType::ULongAccum:
3784 case BuiltinType::ShortFract:
3785 case BuiltinType::Fract:
3786 case BuiltinType::LongFract:
3787 case BuiltinType::UShortFract:
3788 case BuiltinType::UFract:
3789 case BuiltinType::ULongFract:
3790 case BuiltinType::SatShortAccum:
3791 case BuiltinType::SatAccum:
3792 case BuiltinType::SatLongAccum:
3793 case BuiltinType::SatUShortAccum:
3794 case BuiltinType::SatUAccum:
3795 case BuiltinType::SatULongAccum:
3796 case BuiltinType::SatShortFract:
3797 case BuiltinType::SatFract:
3798 case BuiltinType::SatLongFract:
3799 case BuiltinType::SatUShortFract:
3800 case BuiltinType::SatUFract:
3801 case BuiltinType::SatULongFract:
3802 case BuiltinType::BFloat16:
3803 return false;
3804
3805 case BuiltinType::Dependent:
3806#define BUILTIN_TYPE(Id, SingletonId)
3807#define PLACEHOLDER_TYPE(Id, SingletonId) \
3808 case BuiltinType::Id:
3809#include "clang/AST/BuiltinTypes.def"
3810 llvm_unreachable("asking for RRTI for a placeholder type!");
3811
3812 case BuiltinType::ObjCId:
3813 case BuiltinType::ObjCClass:
3814 case BuiltinType::ObjCSel:
3815 llvm_unreachable("FIXME: Objective-C types are unsupported!");
3816 }
3817
3818 llvm_unreachable("Invalid BuiltinType Kind!");
3819}
3820
3821static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3822 QualType PointeeTy = PointerTy->getPointeeType();
3823 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Val&: PointeeTy);
3824 if (!BuiltinTy)
3825 return false;
3826
3827 // Check the qualifiers.
3828 Qualifiers Quals = PointeeTy.getQualifiers();
3829 Quals.removeConst();
3830
3831 if (!Quals.empty())
3832 return false;
3833
3834 return TypeInfoIsInStandardLibrary(Ty: BuiltinTy);
3835}
3836
3837/// IsStandardLibraryRTTIDescriptor - Returns whether the type
3838/// information for the given type exists in the standard library.
3839static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
3840 // Type info for builtin types is defined in the standard library.
3841 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Val&: Ty))
3842 return TypeInfoIsInStandardLibrary(Ty: BuiltinTy);
3843
3844 // Type info for some pointer types to builtin types is defined in the
3845 // standard library.
3846 if (const PointerType *PointerTy = dyn_cast<PointerType>(Val&: Ty))
3847 return TypeInfoIsInStandardLibrary(PointerTy);
3848
3849 return false;
3850}
3851
3852/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3853/// the given type exists somewhere else, and that we should not emit the type
3854/// information in this translation unit. Assumes that it is not a
3855/// standard-library type.
3856static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
3857 QualType Ty) {
3858 ASTContext &Context = CGM.getContext();
3859
3860 // If RTTI is disabled, assume it might be disabled in the
3861 // translation unit that defines any potential key function, too.
3862 if (!Context.getLangOpts().RTTI) return false;
3863
3864 if (const RecordType *RecordTy = dyn_cast<RecordType>(Val&: Ty)) {
3865 const CXXRecordDecl *RD =
3866 cast<CXXRecordDecl>(Val: RecordTy->getDecl())->getDefinitionOrSelf();
3867 if (!RD->hasDefinition())
3868 return false;
3869
3870 if (!RD->isDynamicClass())
3871 return false;
3872
3873 // FIXME: this may need to be reconsidered if the key function
3874 // changes.
3875 // N.B. We must always emit the RTTI data ourselves if there exists a key
3876 // function.
3877 bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3878
3879 // Don't import the RTTI but emit it locally.
3880 if (CGM.getTriple().isOSCygMing())
3881 return false;
3882
3883 if (CGM.getVTables().isVTableExternal(RD)) {
3884 if (CGM.getTarget().hasPS4DLLImportExport())
3885 return true;
3886
3887 return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
3888 ? false
3889 : true;
3890 }
3891 if (IsDLLImport)
3892 return true;
3893 }
3894
3895 return false;
3896}
3897
3898/// IsIncompleteClassType - Returns whether the given record type is incomplete.
3899static bool IsIncompleteClassType(const RecordType *RecordTy) {
3900 return !RecordTy->getDecl()->getDefinitionOrSelf()->isCompleteDefinition();
3901}
3902
3903/// ContainsIncompleteClassType - Returns whether the given type contains an
3904/// incomplete class type. This is true if
3905///
3906/// * The given type is an incomplete class type.
3907/// * The given type is a pointer type whose pointee type contains an
3908/// incomplete class type.
3909/// * The given type is a member pointer type whose class is an incomplete
3910/// class type.
3911/// * The given type is a member pointer type whoise pointee type contains an
3912/// incomplete class type.
3913/// is an indirect or direct pointer to an incomplete class type.
3914static bool ContainsIncompleteClassType(QualType Ty) {
3915 if (const RecordType *RecordTy = dyn_cast<RecordType>(Val&: Ty)) {
3916 if (IsIncompleteClassType(RecordTy))
3917 return true;
3918 }
3919
3920 if (const PointerType *PointerTy = dyn_cast<PointerType>(Val&: Ty))
3921 return ContainsIncompleteClassType(Ty: PointerTy->getPointeeType());
3922
3923 if (const MemberPointerType *MemberPointerTy =
3924 dyn_cast<MemberPointerType>(Val&: Ty)) {
3925 // Check if the class type is incomplete.
3926 if (!MemberPointerTy->getMostRecentCXXRecordDecl()->hasDefinition())
3927 return true;
3928
3929 return ContainsIncompleteClassType(Ty: MemberPointerTy->getPointeeType());
3930 }
3931
3932 return false;
3933}
3934
3935// CanUseSingleInheritance - Return whether the given record decl has a "single,
3936// public, non-virtual base at offset zero (i.e. the derived class is dynamic
3937// iff the base is)", according to Itanium C++ ABI, 2.95p6b.
3938static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
3939 // Check the number of bases.
3940 if (RD->getNumBases() != 1)
3941 return false;
3942
3943 // Get the base.
3944 CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
3945
3946 // Check that the base is not virtual.
3947 if (Base->isVirtual())
3948 return false;
3949
3950 // Check that the base is public.
3951 if (Base->getAccessSpecifier() != AS_public)
3952 return false;
3953
3954 // Check that the class is dynamic iff the base is.
3955 auto *BaseDecl = Base->getType()->castAsCXXRecordDecl();
3956 if (!BaseDecl->isEmpty() &&
3957 BaseDecl->isDynamicClass() != RD->isDynamicClass())
3958 return false;
3959
3960 return true;
3961}
3962
3963void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty,
3964 llvm::Constant *StorageAddress) {
3965 // abi::__class_type_info.
3966 static const char * const ClassTypeInfo =
3967 "_ZTVN10__cxxabiv117__class_type_infoE";
3968 // abi::__si_class_type_info.
3969 static const char * const SIClassTypeInfo =
3970 "_ZTVN10__cxxabiv120__si_class_type_infoE";
3971 // abi::__vmi_class_type_info.
3972 static const char * const VMIClassTypeInfo =
3973 "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3974
3975 const char *VTableName = nullptr;
3976
3977 switch (Ty->getTypeClass()) {
3978#define TYPE(Class, Base)
3979#define ABSTRACT_TYPE(Class, Base)
3980#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3981#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3982#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3983#include "clang/AST/TypeNodes.inc"
3984 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3985
3986 case Type::LValueReference:
3987 case Type::RValueReference:
3988 llvm_unreachable("References shouldn't get here");
3989
3990 case Type::Auto:
3991 case Type::DeducedTemplateSpecialization:
3992 llvm_unreachable("Undeduced type shouldn't get here");
3993
3994 case Type::Pipe:
3995 llvm_unreachable("Pipe types shouldn't get here");
3996
3997 case Type::ArrayParameter:
3998 llvm_unreachable("Array Parameter types should not get here.");
3999
4000 case Type::Builtin:
4001 case Type::BitInt:
4002 case Type::OverflowBehavior:
4003 // GCC treats vector and complex types as fundamental types.
4004 case Type::Vector:
4005 case Type::ExtVector:
4006 case Type::ConstantMatrix:
4007 case Type::Complex:
4008 case Type::Atomic:
4009 // FIXME: GCC treats block pointers as fundamental types?!
4010 case Type::BlockPointer:
4011 // abi::__fundamental_type_info.
4012 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
4013 break;
4014
4015 case Type::ConstantArray:
4016 case Type::IncompleteArray:
4017 case Type::VariableArray:
4018 // abi::__array_type_info.
4019 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
4020 break;
4021
4022 case Type::FunctionNoProto:
4023 case Type::FunctionProto:
4024 // abi::__function_type_info.
4025 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
4026 break;
4027
4028 case Type::Enum:
4029 // abi::__enum_type_info.
4030 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
4031 break;
4032
4033 case Type::Record: {
4034 const auto *RD = cast<CXXRecordDecl>(Val: cast<RecordType>(Val: Ty)->getDecl())
4035 ->getDefinitionOrSelf();
4036
4037 if (!RD->hasDefinition() || !RD->getNumBases()) {
4038 VTableName = ClassTypeInfo;
4039 } else if (CanUseSingleInheritance(RD)) {
4040 VTableName = SIClassTypeInfo;
4041 } else {
4042 VTableName = VMIClassTypeInfo;
4043 }
4044
4045 break;
4046 }
4047
4048 case Type::ObjCObject:
4049 // Ignore protocol qualifiers.
4050 Ty = cast<ObjCObjectType>(Val: Ty)->getBaseType().getTypePtr();
4051
4052 // Handle id and Class.
4053 if (isa<BuiltinType>(Val: Ty)) {
4054 VTableName = ClassTypeInfo;
4055 break;
4056 }
4057
4058 assert(isa<ObjCInterfaceType>(Ty));
4059 [[fallthrough]];
4060
4061 case Type::ObjCInterface:
4062 if (cast<ObjCInterfaceType>(Val: Ty)->getDecl()->getSuperClass()) {
4063 VTableName = SIClassTypeInfo;
4064 } else {
4065 VTableName = ClassTypeInfo;
4066 }
4067 break;
4068
4069 case Type::ObjCObjectPointer:
4070 case Type::Pointer:
4071 // abi::__pointer_type_info.
4072 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
4073 break;
4074
4075 case Type::MemberPointer:
4076 // abi::__pointer_to_member_type_info.
4077 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
4078 break;
4079
4080 case Type::HLSLAttributedResource:
4081 case Type::HLSLInlineSpirv:
4082 llvm_unreachable("HLSL doesn't support virtual functions");
4083 }
4084
4085 llvm::Constant *VTable = nullptr;
4086
4087 // Check if the alias exists. If it doesn't, then get or create the global.
4088 if (CGM.getItaniumVTableContext().isRelativeLayout())
4089 VTable = CGM.getModule().getNamedAlias(Name: VTableName);
4090 if (!VTable) {
4091 llvm::Type *Ty = llvm::ArrayType::get(ElementType: CGM.GlobalsInt8PtrTy, NumElements: 0);
4092 VTable = CGM.getModule().getOrInsertGlobal(Name: VTableName, Ty);
4093 }
4094
4095 CGM.setDSOLocal(cast<llvm::GlobalValue>(Val: VTable->stripPointerCasts()));
4096
4097 llvm::Type *PtrDiffTy =
4098 CGM.getTypes().ConvertType(T: CGM.getContext().getPointerDiffType());
4099
4100 // The vtable address point is 2.
4101 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
4102 // The vtable address point is 8 bytes after its start:
4103 // 4 for the offset to top + 4 for the relative offset to rtti.
4104 llvm::Constant *Eight = llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: 8);
4105 VTable = llvm::ConstantExpr::getInBoundsPtrAdd(Ptr: VTable, Offset: Eight);
4106 } else {
4107 llvm::Constant *Two = llvm::ConstantInt::get(Ty: PtrDiffTy, V: 2);
4108 VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(Ty: CGM.GlobalsInt8PtrTy,
4109 C: VTable, Idx: Two);
4110 }
4111
4112 if (const auto &Schema =
4113 CGM.getCodeGenOpts().PointerAuth.CXXTypeInfoVTablePointer)
4114 VTable = CGM.getConstantSignedPointer(
4115 Pointer: VTable, Schema,
4116 StorageAddress: Schema.isAddressDiscriminated() ? StorageAddress : nullptr,
4117 SchemaDecl: GlobalDecl(), SchemaType: QualType(Ty, 0));
4118
4119 Fields.push_back(Elt: VTable);
4120}
4121
4122/// Return the linkage that the type info and type info name constants
4123/// should have for the given type.
4124static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
4125 QualType Ty) {
4126 // Itanium C++ ABI 2.9.5p7:
4127 // In addition, it and all of the intermediate abi::__pointer_type_info
4128 // structs in the chain down to the abi::__class_type_info for the
4129 // incomplete class type must be prevented from resolving to the
4130 // corresponding type_info structs for the complete class type, possibly
4131 // by making them local static objects. Finally, a dummy class RTTI is
4132 // generated for the incomplete type that will not resolve to the final
4133 // complete class RTTI (because the latter need not exist), possibly by
4134 // making it a local static object.
4135 if (ContainsIncompleteClassType(Ty))
4136 return llvm::GlobalValue::InternalLinkage;
4137
4138 switch (Ty->getLinkage()) {
4139 case Linkage::Invalid:
4140 llvm_unreachable("Linkage hasn't been computed!");
4141
4142 case Linkage::None:
4143 case Linkage::Internal:
4144 case Linkage::UniqueExternal:
4145 return llvm::GlobalValue::InternalLinkage;
4146
4147 case Linkage::VisibleNone:
4148 case Linkage::Module:
4149 case Linkage::External:
4150 // RTTI is not enabled, which means that this type info struct is going
4151 // to be used for exception handling. Give it linkonce_odr linkage.
4152 if (!CGM.getLangOpts().RTTI)
4153 return llvm::GlobalValue::LinkOnceODRLinkage;
4154
4155 if (const RecordType *Record = dyn_cast<RecordType>(Val&: Ty)) {
4156 const auto *RD =
4157 cast<CXXRecordDecl>(Val: Record->getDecl())->getDefinitionOrSelf();
4158 if (RD->hasAttr<WeakAttr>())
4159 return llvm::GlobalValue::WeakODRLinkage;
4160 if (CGM.getTriple().isWindowsItaniumEnvironment())
4161 if (RD->hasAttr<DLLImportAttr>() &&
4162 ShouldUseExternalRTTIDescriptor(CGM, Ty))
4163 return llvm::GlobalValue::ExternalLinkage;
4164 // MinGW always uses LinkOnceODRLinkage for type info.
4165 if (RD->isDynamicClass() &&
4166 !CGM.getContext().getTargetInfo().getTriple().isOSCygMing())
4167 return CGM.getVTableLinkage(RD);
4168 }
4169
4170 return llvm::GlobalValue::LinkOnceODRLinkage;
4171 }
4172
4173 llvm_unreachable("Invalid linkage!");
4174}
4175
4176llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
4177 // We want to operate on the canonical type.
4178 Ty = Ty.getCanonicalType();
4179
4180 // Check if we've already emitted an RTTI descriptor for this type.
4181 SmallString<256> Name;
4182 llvm::raw_svector_ostream Out(Name);
4183 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(T: Ty, Out);
4184
4185 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
4186 if (OldGV && !OldGV->isDeclaration()) {
4187 assert(!OldGV->hasAvailableExternallyLinkage() &&
4188 "available_externally typeinfos not yet implemented");
4189
4190 return OldGV;
4191 }
4192
4193 // Check if there is already an external RTTI descriptor for this type.
4194 if (IsStandardLibraryRTTIDescriptor(Ty) ||
4195 ShouldUseExternalRTTIDescriptor(CGM, Ty))
4196 return GetAddrOfExternalRTTIDescriptor(Ty);
4197
4198 // Emit the standard library with external linkage.
4199 llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
4200
4201 // Give the type_info object and name the formal visibility of the
4202 // type itself.
4203 llvm::GlobalValue::VisibilityTypes llvmVisibility;
4204 if (llvm::GlobalValue::isLocalLinkage(Linkage))
4205 // If the linkage is local, only default visibility makes sense.
4206 llvmVisibility = llvm::GlobalValue::DefaultVisibility;
4207 else if (CXXABI.classifyRTTIUniqueness(CanTy: Ty, Linkage) ==
4208 ItaniumCXXABI::RUK_NonUniqueHidden)
4209 llvmVisibility = llvm::GlobalValue::HiddenVisibility;
4210 else
4211 llvmVisibility = CodeGenModule::GetLLVMVisibility(V: Ty->getVisibility());
4212
4213 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4214 llvm::GlobalValue::DefaultStorageClass;
4215 if (auto RD = Ty->getAsCXXRecordDecl()) {
4216 if ((CGM.getTriple().isWindowsItaniumEnvironment() &&
4217 RD->hasAttr<DLLExportAttr>()) ||
4218 (CGM.shouldMapVisibilityToDLLExport(D: RD) &&
4219 !llvm::GlobalValue::isLocalLinkage(Linkage) &&
4220 llvmVisibility == llvm::GlobalValue::DefaultVisibility))
4221 DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
4222 }
4223 return BuildTypeInfo(Ty, Linkage, Visibility: llvmVisibility, DLLStorageClass);
4224}
4225
4226llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
4227 QualType Ty,
4228 llvm::GlobalVariable::LinkageTypes Linkage,
4229 llvm::GlobalValue::VisibilityTypes Visibility,
4230 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
4231 SmallString<256> Name;
4232 llvm::raw_svector_ostream Out(Name);
4233 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(T: Ty, Out);
4234 llvm::Module &M = CGM.getModule();
4235 llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
4236 // int8 is an arbitrary type to be replaced later with replaceInitializer.
4237 llvm::GlobalVariable *GV =
4238 new llvm::GlobalVariable(M, CGM.Int8Ty, /*isConstant=*/true, Linkage,
4239 /*Initializer=*/nullptr, Name);
4240
4241 // Add the vtable pointer.
4242 BuildVTablePointer(Ty: cast<Type>(Val&: Ty), StorageAddress: GV);
4243
4244 // And the name.
4245 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
4246 llvm::Constant *TypeNameField;
4247
4248 // If we're supposed to demote the visibility, be sure to set a flag
4249 // to use a string comparison for type_info comparisons.
4250 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
4251 CXXABI.classifyRTTIUniqueness(CanTy: Ty, Linkage);
4252 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
4253 // The flag is the sign bit, which on ARM64 is defined to be clear
4254 // for global pointers. This is very ARM64-specific.
4255 TypeNameField = llvm::ConstantExpr::getPtrToInt(C: TypeName, Ty: CGM.Int64Ty);
4256 llvm::Constant *flag =
4257 llvm::ConstantInt::get(Ty: CGM.Int64Ty, V: ((uint64_t)1) << 63);
4258 TypeNameField = llvm::ConstantExpr::getAdd(C1: TypeNameField, C2: flag);
4259 TypeNameField =
4260 llvm::ConstantExpr::getIntToPtr(C: TypeNameField, Ty: CGM.GlobalsInt8PtrTy);
4261 } else {
4262 TypeNameField = TypeName;
4263 }
4264 Fields.push_back(Elt: TypeNameField);
4265
4266 switch (Ty->getTypeClass()) {
4267#define TYPE(Class, Base)
4268#define ABSTRACT_TYPE(Class, Base)
4269#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
4270#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
4271#define DEPENDENT_TYPE(Class, Base) case Type::Class:
4272#include "clang/AST/TypeNodes.inc"
4273 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
4274
4275 // GCC treats vector types as fundamental types.
4276 case Type::Builtin:
4277 case Type::Vector:
4278 case Type::ExtVector:
4279 case Type::ConstantMatrix:
4280 case Type::Complex:
4281 case Type::BlockPointer:
4282 // Itanium C++ ABI 2.9.5p4:
4283 // abi::__fundamental_type_info adds no data members to std::type_info.
4284 break;
4285
4286 case Type::LValueReference:
4287 case Type::RValueReference:
4288 llvm_unreachable("References shouldn't get here");
4289
4290 case Type::Auto:
4291 case Type::DeducedTemplateSpecialization:
4292 llvm_unreachable("Undeduced type shouldn't get here");
4293
4294 case Type::Pipe:
4295 break;
4296
4297 case Type::BitInt:
4298 break;
4299
4300 case Type::ConstantArray:
4301 case Type::IncompleteArray:
4302 case Type::VariableArray:
4303 case Type::ArrayParameter:
4304 // Itanium C++ ABI 2.9.5p5:
4305 // abi::__array_type_info adds no data members to std::type_info.
4306 break;
4307
4308 case Type::FunctionNoProto:
4309 case Type::FunctionProto:
4310 // Itanium C++ ABI 2.9.5p5:
4311 // abi::__function_type_info adds no data members to std::type_info.
4312 break;
4313
4314 case Type::Enum:
4315 // Itanium C++ ABI 2.9.5p5:
4316 // abi::__enum_type_info adds no data members to std::type_info.
4317 break;
4318
4319 case Type::Record: {
4320 const auto *RD = cast<CXXRecordDecl>(Val: cast<RecordType>(Val&: Ty)->getDecl())
4321 ->getDefinitionOrSelf();
4322 if (!RD->hasDefinition() || !RD->getNumBases()) {
4323 // We don't need to emit any fields.
4324 break;
4325 }
4326
4327 if (CanUseSingleInheritance(RD))
4328 BuildSIClassTypeInfo(RD);
4329 else
4330 BuildVMIClassTypeInfo(RD);
4331
4332 break;
4333 }
4334
4335 case Type::ObjCObject:
4336 case Type::ObjCInterface:
4337 BuildObjCObjectTypeInfo(Ty: cast<ObjCObjectType>(Val&: Ty));
4338 break;
4339
4340 case Type::ObjCObjectPointer:
4341 BuildPointerTypeInfo(PointeeTy: cast<ObjCObjectPointerType>(Val&: Ty)->getPointeeType());
4342 break;
4343
4344 case Type::Pointer:
4345 BuildPointerTypeInfo(PointeeTy: cast<PointerType>(Val&: Ty)->getPointeeType());
4346 break;
4347
4348 case Type::MemberPointer:
4349 BuildPointerToMemberTypeInfo(Ty: cast<MemberPointerType>(Val&: Ty));
4350 break;
4351
4352 case Type::Atomic:
4353 // No fields, at least for the moment.
4354 break;
4355
4356 case Type::OverflowBehavior:
4357 break;
4358
4359 case Type::HLSLAttributedResource:
4360 case Type::HLSLInlineSpirv:
4361 llvm_unreachable("HLSL doesn't support RTTI");
4362 }
4363
4364 GV->replaceInitializer(InitVal: llvm::ConstantStruct::getAnon(V: Fields));
4365
4366 // Export the typeinfo in the same circumstances as the vtable is exported.
4367 auto GVDLLStorageClass = DLLStorageClass;
4368 if (CGM.getTarget().hasPS4DLLImportExport() &&
4369 GVDLLStorageClass != llvm::GlobalVariable::DLLExportStorageClass) {
4370 if (const RecordType *RecordTy = dyn_cast<RecordType>(Val&: Ty)) {
4371 const auto *RD =
4372 cast<CXXRecordDecl>(Val: RecordTy->getDecl())->getDefinitionOrSelf();
4373 if (RD->hasAttr<DLLExportAttr>() ||
4374 CXXRecordNonInlineHasAttr<DLLExportAttr>(RD))
4375 GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
4376 }
4377 }
4378
4379 // If there's already an old global variable, replace it with the new one.
4380 if (OldGV) {
4381 GV->takeName(V: OldGV);
4382 OldGV->replaceAllUsesWith(V: GV);
4383 OldGV->eraseFromParent();
4384 }
4385
4386 if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
4387 GV->setComdat(M.getOrInsertComdat(Name: GV->getName()));
4388
4389 CharUnits Align = CGM.getContext().toCharUnitsFromBits(
4390 BitSize: CGM.getTarget().getPointerAlign(AddrSpace: CGM.GetGlobalVarAddressSpace(D: nullptr)));
4391 GV->setAlignment(Align.getAsAlign());
4392
4393 // The Itanium ABI specifies that type_info objects must be globally
4394 // unique, with one exception: if the type is an incomplete class
4395 // type or a (possibly indirect) pointer to one. That exception
4396 // affects the general case of comparing type_info objects produced
4397 // by the typeid operator, which is why the comparison operators on
4398 // std::type_info generally use the type_info name pointers instead
4399 // of the object addresses. However, the language's built-in uses
4400 // of RTTI generally require class types to be complete, even when
4401 // manipulating pointers to those class types. This allows the
4402 // implementation of dynamic_cast to rely on address equality tests,
4403 // which is much faster.
4404
4405 // All of this is to say that it's important that both the type_info
4406 // object and the type_info name be uniqued when weakly emitted.
4407
4408 TypeName->setVisibility(Visibility);
4409 CGM.setDSOLocal(TypeName);
4410
4411 GV->setVisibility(Visibility);
4412 CGM.setDSOLocal(GV);
4413
4414 TypeName->setDLLStorageClass(DLLStorageClass);
4415 GV->setDLLStorageClass(GVDLLStorageClass);
4416
4417 TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
4418 GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
4419
4420 return GV;
4421}
4422
4423/// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
4424/// for the given Objective-C object type.
4425void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
4426 // Drop qualifiers.
4427 const Type *T = OT->getBaseType().getTypePtr();
4428 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
4429
4430 // The builtin types are abi::__class_type_infos and don't require
4431 // extra fields.
4432 if (isa<BuiltinType>(Val: T)) return;
4433
4434 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(Val: T)->getDecl();
4435 ObjCInterfaceDecl *Super = Class->getSuperClass();
4436
4437 // Root classes are also __class_type_info.
4438 if (!Super) return;
4439
4440 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Decl: Super);
4441
4442 // Everything else is single inheritance.
4443 llvm::Constant *BaseTypeInfo =
4444 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Ty: SuperTy);
4445 Fields.push_back(Elt: BaseTypeInfo);
4446}
4447
4448/// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
4449/// inheritance, according to the Itanium C++ ABI, 2.95p6b.
4450void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
4451 // Itanium C++ ABI 2.9.5p6b:
4452 // It adds to abi::__class_type_info a single member pointing to the
4453 // type_info structure for the base type,
4454 llvm::Constant *BaseTypeInfo =
4455 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Ty: RD->bases_begin()->getType());
4456 Fields.push_back(Elt: BaseTypeInfo);
4457}
4458
4459namespace {
4460 /// SeenBases - Contains virtual and non-virtual bases seen when traversing
4461 /// a class hierarchy.
4462 struct SeenBases {
4463 llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
4464 llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
4465 };
4466}
4467
4468/// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
4469/// abi::__vmi_class_type_info.
4470///
4471static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
4472 SeenBases &Bases) {
4473
4474 unsigned Flags = 0;
4475
4476 auto *BaseDecl = Base->getType()->castAsCXXRecordDecl();
4477 if (Base->isVirtual()) {
4478 // Mark the virtual base as seen.
4479 if (!Bases.VirtualBases.insert(Ptr: BaseDecl).second) {
4480 // If this virtual base has been seen before, then the class is diamond
4481 // shaped.
4482 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
4483 } else {
4484 if (Bases.NonVirtualBases.count(Ptr: BaseDecl))
4485 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4486 }
4487 } else {
4488 // Mark the non-virtual base as seen.
4489 if (!Bases.NonVirtualBases.insert(Ptr: BaseDecl).second) {
4490 // If this non-virtual base has been seen before, then the class has non-
4491 // diamond shaped repeated inheritance.
4492 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4493 } else {
4494 if (Bases.VirtualBases.count(Ptr: BaseDecl))
4495 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4496 }
4497 }
4498
4499 // Walk all bases.
4500 for (const auto &I : BaseDecl->bases())
4501 Flags |= ComputeVMIClassTypeInfoFlags(Base: &I, Bases);
4502
4503 return Flags;
4504}
4505
4506static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
4507 unsigned Flags = 0;
4508 SeenBases Bases;
4509
4510 // Walk all bases.
4511 for (const auto &I : RD->bases())
4512 Flags |= ComputeVMIClassTypeInfoFlags(Base: &I, Bases);
4513
4514 return Flags;
4515}
4516
4517/// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
4518/// classes with bases that do not satisfy the abi::__si_class_type_info
4519/// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
4520void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
4521 llvm::Type *UnsignedIntLTy =
4522 CGM.getTypes().ConvertType(T: CGM.getContext().UnsignedIntTy);
4523
4524 // Itanium C++ ABI 2.9.5p6c:
4525 // __flags is a word with flags describing details about the class
4526 // structure, which may be referenced by using the __flags_masks
4527 // enumeration. These flags refer to both direct and indirect bases.
4528 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
4529 Fields.push_back(Elt: llvm::ConstantInt::get(Ty: UnsignedIntLTy, V: Flags));
4530
4531 // Itanium C++ ABI 2.9.5p6c:
4532 // __base_count is a word with the number of direct proper base class
4533 // descriptions that follow.
4534 Fields.push_back(Elt: llvm::ConstantInt::get(Ty: UnsignedIntLTy, V: RD->getNumBases()));
4535
4536 if (!RD->getNumBases())
4537 return;
4538
4539 // Now add the base class descriptions.
4540
4541 // Itanium C++ ABI 2.9.5p6c:
4542 // __base_info[] is an array of base class descriptions -- one for every
4543 // direct proper base. Each description is of the type:
4544 //
4545 // struct abi::__base_class_type_info {
4546 // public:
4547 // const __class_type_info *__base_type;
4548 // long __offset_flags;
4549 //
4550 // enum __offset_flags_masks {
4551 // __virtual_mask = 0x1,
4552 // __public_mask = 0x2,
4553 // __offset_shift = 8
4554 // };
4555 // };
4556
4557 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
4558 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
4559 // LLP64 platforms.
4560 // FIXME: Consider updating libc++abi to match, and extend this logic to all
4561 // LLP64 platforms.
4562 QualType OffsetFlagsTy = CGM.getContext().LongTy;
4563 const TargetInfo &TI = CGM.getContext().getTargetInfo();
4564 if (TI.getTriple().isOSCygMing() &&
4565 TI.getPointerWidth(AddrSpace: LangAS::Default) > TI.getLongWidth())
4566 OffsetFlagsTy = CGM.getContext().LongLongTy;
4567 llvm::Type *OffsetFlagsLTy =
4568 CGM.getTypes().ConvertType(T: OffsetFlagsTy);
4569
4570 for (const auto &Base : RD->bases()) {
4571 // The __base_type member points to the RTTI for the base type.
4572 Fields.push_back(Elt: ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Ty: Base.getType()));
4573
4574 auto *BaseDecl = Base.getType()->castAsCXXRecordDecl();
4575 int64_t OffsetFlags = 0;
4576
4577 // All but the lower 8 bits of __offset_flags are a signed offset.
4578 // For a non-virtual base, this is the offset in the object of the base
4579 // subobject. For a virtual base, this is the offset in the virtual table of
4580 // the virtual base offset for the virtual base referenced (negative).
4581 CharUnits Offset;
4582 if (Base.isVirtual())
4583 Offset =
4584 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, VBase: BaseDecl);
4585 else {
4586 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(D: RD);
4587 Offset = Layout.getBaseClassOffset(Base: BaseDecl);
4588 };
4589
4590 OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
4591
4592 // The low-order byte of __offset_flags contains flags, as given by the
4593 // masks from the enumeration __offset_flags_masks.
4594 if (Base.isVirtual())
4595 OffsetFlags |= BCTI_Virtual;
4596 if (Base.getAccessSpecifier() == AS_public)
4597 OffsetFlags |= BCTI_Public;
4598
4599 Fields.push_back(Elt: llvm::ConstantInt::getSigned(Ty: OffsetFlagsLTy, V: OffsetFlags));
4600 }
4601}
4602
4603/// Compute the flags for a __pbase_type_info, and remove the corresponding
4604/// pieces from \p Type.
4605static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
4606 unsigned Flags = 0;
4607
4608 if (Type.isConstQualified())
4609 Flags |= ItaniumRTTIBuilder::PTI_Const;
4610 if (Type.isVolatileQualified())
4611 Flags |= ItaniumRTTIBuilder::PTI_Volatile;
4612 if (Type.isRestrictQualified())
4613 Flags |= ItaniumRTTIBuilder::PTI_Restrict;
4614 Type = Type.getUnqualifiedType();
4615
4616 // Itanium C++ ABI 2.9.5p7:
4617 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
4618 // incomplete class type, the incomplete target type flag is set.
4619 if (ContainsIncompleteClassType(Ty: Type))
4620 Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
4621
4622 if (auto *Proto = Type->getAs<FunctionProtoType>()) {
4623 if (Proto->isNothrow()) {
4624 Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
4625 Type = Ctx.getFunctionTypeWithExceptionSpec(Orig: Type, ESI: EST_None);
4626 }
4627 }
4628
4629 return Flags;
4630}
4631
4632/// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
4633/// used for pointer types.
4634void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
4635 // Itanium C++ ABI 2.9.5p7:
4636 // __flags is a flag word describing the cv-qualification and other
4637 // attributes of the type pointed to
4638 unsigned Flags = extractPBaseFlags(Ctx&: CGM.getContext(), Type&: PointeeTy);
4639
4640 llvm::Type *UnsignedIntLTy =
4641 CGM.getTypes().ConvertType(T: CGM.getContext().UnsignedIntTy);
4642 Fields.push_back(Elt: llvm::ConstantInt::get(Ty: UnsignedIntLTy, V: Flags));
4643
4644 // Itanium C++ ABI 2.9.5p7:
4645 // __pointee is a pointer to the std::type_info derivation for the
4646 // unqualified type being pointed to.
4647 llvm::Constant *PointeeTypeInfo =
4648 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Ty: PointeeTy);
4649 Fields.push_back(Elt: PointeeTypeInfo);
4650}
4651
4652/// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
4653/// struct, used for member pointer types.
4654void
4655ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
4656 QualType PointeeTy = Ty->getPointeeType();
4657
4658 // Itanium C++ ABI 2.9.5p7:
4659 // __flags is a flag word describing the cv-qualification and other
4660 // attributes of the type pointed to.
4661 unsigned Flags = extractPBaseFlags(Ctx&: CGM.getContext(), Type&: PointeeTy);
4662
4663 const auto *RD = Ty->getMostRecentCXXRecordDecl();
4664 if (!RD->hasDefinition())
4665 Flags |= PTI_ContainingClassIncomplete;
4666
4667 llvm::Type *UnsignedIntLTy =
4668 CGM.getTypes().ConvertType(T: CGM.getContext().UnsignedIntTy);
4669 Fields.push_back(Elt: llvm::ConstantInt::get(Ty: UnsignedIntLTy, V: Flags));
4670
4671 // Itanium C++ ABI 2.9.5p7:
4672 // __pointee is a pointer to the std::type_info derivation for the
4673 // unqualified type being pointed to.
4674 llvm::Constant *PointeeTypeInfo =
4675 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Ty: PointeeTy);
4676 Fields.push_back(Elt: PointeeTypeInfo);
4677
4678 // Itanium C++ ABI 2.9.5p9:
4679 // __context is a pointer to an abi::__class_type_info corresponding to the
4680 // class type containing the member pointed to
4681 // (e.g., the "A" in "int A::*").
4682 CanQualType T = CGM.getContext().getCanonicalTagType(TD: RD);
4683 Fields.push_back(Elt: ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Ty: T));
4684}
4685
4686llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
4687 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
4688}
4689
4690void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
4691 // Types added here must also be added to TypeInfoIsInStandardLibrary.
4692 QualType FundamentalTypes[] = {
4693 getContext().VoidTy, getContext().NullPtrTy,
4694 getContext().BoolTy, getContext().WCharTy,
4695 getContext().CharTy, getContext().UnsignedCharTy,
4696 getContext().SignedCharTy, getContext().ShortTy,
4697 getContext().UnsignedShortTy, getContext().IntTy,
4698 getContext().UnsignedIntTy, getContext().LongTy,
4699 getContext().UnsignedLongTy, getContext().LongLongTy,
4700 getContext().UnsignedLongLongTy, getContext().Int128Ty,
4701 getContext().UnsignedInt128Ty, getContext().HalfTy,
4702 getContext().FloatTy, getContext().DoubleTy,
4703 getContext().LongDoubleTy, getContext().Float128Ty,
4704 getContext().Char8Ty, getContext().Char16Ty,
4705 getContext().Char32Ty
4706 };
4707 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4708 RD->hasAttr<DLLExportAttr>() || CGM.shouldMapVisibilityToDLLExport(D: RD)
4709 ? llvm::GlobalValue::DLLExportStorageClass
4710 : llvm::GlobalValue::DefaultStorageClass;
4711 llvm::GlobalValue::VisibilityTypes Visibility =
4712 CodeGenModule::GetLLVMVisibility(V: RD->getVisibility());
4713 for (const QualType &FundamentalType : FundamentalTypes) {
4714 QualType PointerType = getContext().getPointerType(T: FundamentalType);
4715 QualType PointerTypeConst = getContext().getPointerType(
4716 T: FundamentalType.withConst());
4717 for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
4718 ItaniumRTTIBuilder(*this).BuildTypeInfo(
4719 Ty: Type, Linkage: llvm::GlobalValue::ExternalLinkage,
4720 Visibility, DLLStorageClass);
4721 }
4722}
4723
4724/// What sort of uniqueness rules should we use for the RTTI for the
4725/// given type?
4726ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
4727 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
4728 if (shouldRTTIBeUnique())
4729 return RUK_Unique;
4730
4731 // It's only necessary for linkonce_odr or weak_odr linkage.
4732 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
4733 Linkage != llvm::GlobalValue::WeakODRLinkage)
4734 return RUK_Unique;
4735
4736 // It's only necessary with default visibility.
4737 if (CanTy->getVisibility() != DefaultVisibility)
4738 return RUK_Unique;
4739
4740 // If we're not required to publish this symbol, hide it.
4741 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4742 return RUK_NonUniqueHidden;
4743
4744 // If we're required to publish this symbol, as we might be under an
4745 // explicit instantiation, leave it with default visibility but
4746 // enable string-comparisons.
4747 assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
4748 return RUK_NonUniqueVisible;
4749}
4750
4751// Find out how to codegen the complete destructor and constructor
4752namespace {
4753enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
4754}
4755static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
4756 const CXXMethodDecl *MD) {
4757 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
4758 return StructorCodegen::Emit;
4759
4760 // The complete and base structors are not equivalent if there are any virtual
4761 // bases, so emit separate functions.
4762 if (MD->getParent()->getNumVBases())
4763 return StructorCodegen::Emit;
4764
4765 GlobalDecl AliasDecl;
4766 if (const auto *DD = dyn_cast<CXXDestructorDecl>(Val: MD)) {
4767 AliasDecl = GlobalDecl(DD, Dtor_Complete);
4768 } else {
4769 const auto *CD = cast<CXXConstructorDecl>(Val: MD);
4770 AliasDecl = GlobalDecl(CD, Ctor_Complete);
4771 }
4772 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(GD: AliasDecl);
4773
4774 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
4775 return StructorCodegen::RAUW;
4776
4777 // FIXME: Should we allow available_externally aliases?
4778 if (!llvm::GlobalAlias::isValidLinkage(L: Linkage))
4779 return StructorCodegen::RAUW;
4780
4781 if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
4782 // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
4783 if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
4784 CGM.getTarget().getTriple().isOSBinFormatWasm())
4785 return StructorCodegen::COMDAT;
4786 return StructorCodegen::Emit;
4787 }
4788
4789 return StructorCodegen::Alias;
4790}
4791
4792static void emitConstructorDestructorAlias(CodeGenModule &CGM,
4793 GlobalDecl AliasDecl,
4794 GlobalDecl TargetDecl) {
4795 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(GD: AliasDecl);
4796
4797 StringRef MangledName = CGM.getMangledName(GD: AliasDecl);
4798 llvm::GlobalValue *Entry = CGM.GetGlobalValue(Ref: MangledName);
4799 if (Entry && !Entry->isDeclaration())
4800 return;
4801
4802 auto *Aliasee = cast<llvm::GlobalValue>(Val: CGM.GetAddrOfGlobal(GD: TargetDecl));
4803
4804 // Create the alias with no name.
4805 auto *Alias = llvm::GlobalAlias::create(Linkage, Name: "", Aliasee);
4806
4807 // Constructors and destructors are always unnamed_addr.
4808 Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4809
4810 // Switch any previous uses to the alias.
4811 if (Entry) {
4812 assert(Entry->getType() == Aliasee->getType() &&
4813 "declaration exists with different type");
4814 Alias->takeName(V: Entry);
4815 Entry->replaceAllUsesWith(V: Alias);
4816 Entry->eraseFromParent();
4817 } else {
4818 Alias->setName(MangledName);
4819 }
4820
4821 // Finally, set up the alias with its proper name and attributes.
4822 CGM.SetCommonAttributes(GD: AliasDecl, GV: Alias);
4823}
4824
4825void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
4826 auto *MD = cast<CXXMethodDecl>(Val: GD.getDecl());
4827 auto *CD = dyn_cast<CXXConstructorDecl>(Val: MD);
4828 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(Val: MD);
4829
4830 StructorCodegen CGType = getCodegenToUse(CGM, MD);
4831
4832 if (CD ? GD.getCtorType() == Ctor_Complete
4833 : GD.getDtorType() == Dtor_Complete) {
4834 GlobalDecl BaseDecl;
4835 if (CD)
4836 BaseDecl = GD.getWithCtorType(Type: Ctor_Base);
4837 else
4838 BaseDecl = GD.getWithDtorType(Type: Dtor_Base);
4839
4840 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
4841 emitConstructorDestructorAlias(CGM, AliasDecl: GD, TargetDecl: BaseDecl);
4842 return;
4843 }
4844
4845 if (CGType == StructorCodegen::RAUW) {
4846 StringRef MangledName = CGM.getMangledName(GD);
4847 auto *Aliasee = CGM.GetAddrOfGlobal(GD: BaseDecl);
4848 CGM.addReplacement(Name: MangledName, C: Aliasee);
4849 return;
4850 }
4851 }
4852
4853 // The base destructor is equivalent to the base destructor of its
4854 // base class if there is exactly one non-virtual base class with a
4855 // non-trivial destructor, there are no fields with a non-trivial
4856 // destructor, and the body of the destructor is trivial.
4857 if (DD && GD.getDtorType() == Dtor_Base &&
4858 CGType != StructorCodegen::COMDAT &&
4859 !CGM.TryEmitBaseDestructorAsAlias(D: DD))
4860 return;
4861
4862 // FIXME: The deleting destructor is equivalent to the selected operator
4863 // delete if:
4864 // * either the delete is a destroying operator delete or the destructor
4865 // would be trivial if it weren't virtual,
4866 // * the conversion from the 'this' parameter to the first parameter of the
4867 // destructor is equivalent to a bitcast,
4868 // * the destructor does not have an implicit "this" return, and
4869 // * the operator delete has the same calling convention and IR function type
4870 // as the destructor.
4871 // In such cases we should try to emit the deleting dtor as an alias to the
4872 // selected 'operator delete'.
4873
4874 llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4875
4876 if (CGType == StructorCodegen::COMDAT) {
4877 SmallString<256> Buffer;
4878 llvm::raw_svector_ostream Out(Buffer);
4879 if (DD)
4880 getMangleContext().mangleCXXDtorComdat(D: DD, Out);
4881 else
4882 getMangleContext().mangleCXXCtorComdat(D: CD, Out);
4883 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Name: Out.str());
4884 Fn->setComdat(C);
4885 } else {
4886 CGM.maybeSetTrivialComdat(D: *MD, GO&: *Fn);
4887 }
4888}
4889
4890static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4891 // void *__cxa_begin_catch(void*);
4892 llvm::FunctionType *FTy = llvm::FunctionType::get(
4893 Result: CGM.Int8PtrTy, Params: CGM.Int8PtrTy, /*isVarArg=*/false);
4894
4895 return CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_begin_catch");
4896}
4897
4898static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4899 // void __cxa_end_catch();
4900 llvm::FunctionType *FTy =
4901 llvm::FunctionType::get(Result: CGM.VoidTy, /*isVarArg=*/false);
4902
4903 return CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_end_catch");
4904}
4905
4906static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4907 // void *__cxa_get_exception_ptr(void*);
4908 llvm::FunctionType *FTy = llvm::FunctionType::get(
4909 Result: CGM.Int8PtrTy, Params: CGM.Int8PtrTy, /*isVarArg=*/false);
4910
4911 return CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_get_exception_ptr");
4912}
4913
4914namespace {
4915 /// A cleanup to call __cxa_end_catch. In many cases, the caught
4916 /// exception type lets us state definitively that the thrown exception
4917 /// type does not have a destructor. In particular:
4918 /// - Catch-alls tell us nothing, so we have to conservatively
4919 /// assume that the thrown exception might have a destructor.
4920 /// - Catches by reference behave according to their base types.
4921 /// - Catches of non-record types will only trigger for exceptions
4922 /// of non-record types, which never have destructors.
4923 /// - Catches of record types can trigger for arbitrary subclasses
4924 /// of the caught type, so we have to assume the actual thrown
4925 /// exception type might have a throwing destructor, even if the
4926 /// caught type's destructor is trivial or nothrow.
4927 struct CallEndCatch final : EHScopeStack::Cleanup {
4928 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4929 bool MightThrow;
4930
4931 void Emit(CodeGenFunction &CGF, Flags flags) override {
4932 if (!MightThrow) {
4933 CGF.EmitNounwindRuntimeCall(callee: getEndCatchFn(CGM&: CGF.CGM));
4934 return;
4935 }
4936
4937 CGF.EmitRuntimeCallOrInvoke(callee: getEndCatchFn(CGM&: CGF.CGM));
4938 }
4939 };
4940}
4941
4942/// Emits a call to __cxa_begin_catch and enters a cleanup to call
4943/// __cxa_end_catch. If -fassume-nothrow-exception-dtor is specified, we assume
4944/// that the exception object's dtor is nothrow, therefore the __cxa_end_catch
4945/// call can be marked as nounwind even if EndMightThrow is true.
4946///
4947/// \param EndMightThrow - true if __cxa_end_catch might throw
4948static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4949 llvm::Value *Exn,
4950 bool EndMightThrow) {
4951 llvm::CallInst *call =
4952 CGF.EmitNounwindRuntimeCall(callee: getBeginCatchFn(CGM&: CGF.CGM), args: Exn);
4953
4954 CGF.EHStack.pushCleanup<CallEndCatch>(
4955 Kind: NormalAndEHCleanup,
4956 A: EndMightThrow && !CGF.CGM.getLangOpts().AssumeNothrowExceptionDtor);
4957
4958 return call;
4959}
4960
4961/// A "special initializer" callback for initializing a catch
4962/// parameter during catch initialization.
4963static void InitCatchParam(CodeGenFunction &CGF,
4964 const VarDecl &CatchParam,
4965 Address ParamAddr,
4966 SourceLocation Loc) {
4967 // Load the exception from where the landing pad saved it.
4968 llvm::Value *Exn = CGF.getExceptionFromSlot();
4969
4970 CanQualType CatchType =
4971 CGF.CGM.getContext().getCanonicalType(T: CatchParam.getType());
4972 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(T: CatchType);
4973
4974 // If we're catching by reference, we can just cast the object
4975 // pointer to the appropriate pointer.
4976 if (isa<ReferenceType>(Val: CatchType)) {
4977 QualType CaughtType = cast<ReferenceType>(Val&: CatchType)->getPointeeType();
4978 bool EndCatchMightThrow = CaughtType->isRecordType();
4979
4980 // __cxa_begin_catch returns the adjusted object pointer.
4981 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndMightThrow: EndCatchMightThrow);
4982
4983 // We have no way to tell the personality function that we're
4984 // catching by reference, so if we're catching a pointer,
4985 // __cxa_begin_catch will actually return that pointer by value.
4986 if (const PointerType *PT = dyn_cast<PointerType>(Val&: CaughtType)) {
4987 QualType PointeeType = PT->getPointeeType();
4988
4989 // When catching by reference, generally we should just ignore
4990 // this by-value pointer and use the exception object instead.
4991 if (!PointeeType->isRecordType()) {
4992
4993 // Exn points to the struct _Unwind_Exception header, which
4994 // we have to skip past in order to reach the exception data.
4995 unsigned HeaderSize =
4996 CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
4997 AdjustedExn =
4998 CGF.Builder.CreateConstGEP1_32(Ty: CGF.Int8Ty, Ptr: Exn, Idx0: HeaderSize);
4999
5000 // However, if we're catching a pointer-to-record type that won't
5001 // work, because the personality function might have adjusted
5002 // the pointer. There's actually no way for us to fully satisfy
5003 // the language/ABI contract here: we can't use Exn because it
5004 // might have the wrong adjustment, but we can't use the by-value
5005 // pointer because it's off by a level of abstraction.
5006 //
5007 // The current solution is to dump the adjusted pointer into an
5008 // alloca, which breaks language semantics (because changing the
5009 // pointer doesn't change the exception) but at least works.
5010 // The better solution would be to filter out non-exact matches
5011 // and rethrow them, but this is tricky because the rethrow
5012 // really needs to be catchable by other sites at this landing
5013 // pad. The best solution is to fix the personality function.
5014 } else {
5015 // Pull the pointer for the reference type off.
5016 llvm::Type *PtrTy = CGF.ConvertTypeForMem(T: CaughtType);
5017
5018 // Create the temporary and write the adjusted pointer into it.
5019 Address ExnPtrTmp =
5020 CGF.CreateTempAlloca(Ty: PtrTy, align: CGF.getPointerAlign(), Name: "exn.byref.tmp");
5021 llvm::Value *Casted = CGF.Builder.CreateBitCast(V: AdjustedExn, DestTy: PtrTy);
5022 CGF.Builder.CreateStore(Val: Casted, Addr: ExnPtrTmp);
5023
5024 // Bind the reference to the temporary.
5025 AdjustedExn = ExnPtrTmp.emitRawPointer(CGF);
5026 }
5027 }
5028
5029 llvm::Value *ExnCast =
5030 CGF.Builder.CreateBitCast(V: AdjustedExn, DestTy: LLVMCatchTy, Name: "exn.byref");
5031 CGF.Builder.CreateStore(Val: ExnCast, Addr: ParamAddr);
5032 return;
5033 }
5034
5035 // Scalars and complexes.
5036 TypeEvaluationKind TEK = CGF.getEvaluationKind(T: CatchType);
5037 if (TEK != TEK_Aggregate) {
5038 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndMightThrow: false);
5039
5040 // If the catch type is a pointer type, __cxa_begin_catch returns
5041 // the pointer by value.
5042 if (CatchType->hasPointerRepresentation()) {
5043 llvm::Value *CastExn =
5044 CGF.Builder.CreateBitCast(V: AdjustedExn, DestTy: LLVMCatchTy, Name: "exn.casted");
5045
5046 switch (CatchType.getQualifiers().getObjCLifetime()) {
5047 case Qualifiers::OCL_Strong:
5048 CastExn = CGF.EmitARCRetainNonBlock(value: CastExn);
5049 [[fallthrough]];
5050
5051 case Qualifiers::OCL_None:
5052 case Qualifiers::OCL_ExplicitNone:
5053 case Qualifiers::OCL_Autoreleasing:
5054 CGF.Builder.CreateStore(Val: CastExn, Addr: ParamAddr);
5055 return;
5056
5057 case Qualifiers::OCL_Weak:
5058 CGF.EmitARCInitWeak(addr: ParamAddr, value: CastExn);
5059 return;
5060 }
5061 llvm_unreachable("bad ownership qualifier!");
5062 }
5063
5064 // Otherwise, it returns a pointer into the exception object.
5065
5066 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(V: AdjustedExn, T: CatchType);
5067 LValue destLV = CGF.MakeAddrLValue(Addr: ParamAddr, T: CatchType);
5068 switch (TEK) {
5069 case TEK_Complex:
5070 CGF.EmitStoreOfComplex(V: CGF.EmitLoadOfComplex(src: srcLV, loc: Loc), dest: destLV,
5071 /*init*/ isInit: true);
5072 return;
5073 case TEK_Scalar: {
5074 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(lvalue: srcLV, Loc);
5075 CGF.EmitStoreOfScalar(value: ExnLoad, lvalue: destLV, /*init*/ isInit: true);
5076 return;
5077 }
5078 case TEK_Aggregate:
5079 llvm_unreachable("evaluation kind filtered out!");
5080 }
5081 llvm_unreachable("bad evaluation kind");
5082 }
5083
5084 assert(isa<RecordType>(CatchType) && "unexpected catch type!");
5085 auto catchRD = CatchType->getAsCXXRecordDecl();
5086 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(CD: catchRD);
5087
5088 llvm::Type *PtrTy = CGF.DefaultPtrTy;
5089
5090 // Check for a copy expression. If we don't have a copy expression,
5091 // that means a trivial copy is okay.
5092 const Expr *copyExpr = CatchParam.getInit();
5093 if (!copyExpr) {
5094 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, EndMightThrow: true);
5095 Address adjustedExn(CGF.Builder.CreateBitCast(V: rawAdjustedExn, DestTy: PtrTy),
5096 LLVMCatchTy, caughtExnAlignment);
5097 LValue Dest = CGF.MakeAddrLValue(Addr: ParamAddr, T: CatchType);
5098 LValue Src = CGF.MakeAddrLValue(Addr: adjustedExn, T: CatchType);
5099 CGF.EmitAggregateCopy(Dest, Src, EltTy: CatchType, MayOverlap: AggValueSlot::DoesNotOverlap);
5100 return;
5101 }
5102
5103 // We have to call __cxa_get_exception_ptr to get the adjusted
5104 // pointer before copying.
5105 llvm::CallInst *rawAdjustedExn =
5106 CGF.EmitNounwindRuntimeCall(callee: getGetExceptionPtrFn(CGM&: CGF.CGM), args: Exn);
5107
5108 // Cast that to the appropriate type.
5109 Address adjustedExn(CGF.Builder.CreateBitCast(V: rawAdjustedExn, DestTy: PtrTy),
5110 LLVMCatchTy, caughtExnAlignment);
5111
5112 // The copy expression is defined in terms of an OpaqueValueExpr.
5113 // Find it and map it to the adjusted expression.
5114 CodeGenFunction::OpaqueValueMapping
5115 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(expr: copyExpr),
5116 CGF.MakeAddrLValue(Addr: adjustedExn, T: CatchParam.getType()));
5117
5118 // Call the copy ctor in a terminate scope.
5119 CGF.EHStack.pushTerminate();
5120
5121 // Perform the copy construction.
5122 CGF.EmitAggExpr(E: copyExpr,
5123 AS: AggValueSlot::forAddr(addr: ParamAddr, quals: Qualifiers(),
5124 isDestructed: AggValueSlot::IsNotDestructed,
5125 needsGC: AggValueSlot::DoesNotNeedGCBarriers,
5126 isAliased: AggValueSlot::IsNotAliased,
5127 mayOverlap: AggValueSlot::DoesNotOverlap));
5128
5129 // Leave the terminate scope.
5130 CGF.EHStack.popTerminate();
5131
5132 // Undo the opaque value mapping.
5133 opaque.pop();
5134
5135 // Finally we can call __cxa_begin_catch.
5136 CallBeginCatch(CGF, Exn, EndMightThrow: true);
5137}
5138
5139/// Begins a catch statement by initializing the catch variable and
5140/// calling __cxa_begin_catch.
5141void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
5142 const CXXCatchStmt *S) {
5143 // We have to be very careful with the ordering of cleanups here:
5144 // C++ [except.throw]p4:
5145 // The destruction [of the exception temporary] occurs
5146 // immediately after the destruction of the object declared in
5147 // the exception-declaration in the handler.
5148 //
5149 // So the precise ordering is:
5150 // 1. Construct catch variable.
5151 // 2. __cxa_begin_catch
5152 // 3. Enter __cxa_end_catch cleanup
5153 // 4. Enter dtor cleanup
5154 //
5155 // We do this by using a slightly abnormal initialization process.
5156 // Delegation sequence:
5157 // - ExitCXXTryStmt opens a RunCleanupsScope
5158 // - EmitAutoVarAlloca creates the variable and debug info
5159 // - InitCatchParam initializes the variable from the exception
5160 // - CallBeginCatch calls __cxa_begin_catch
5161 // - CallBeginCatch enters the __cxa_end_catch cleanup
5162 // - EmitAutoVarCleanups enters the variable destructor cleanup
5163 // - EmitCXXTryStmt emits the code for the catch body
5164 // - EmitCXXTryStmt close the RunCleanupsScope
5165
5166 VarDecl *CatchParam = S->getExceptionDecl();
5167 if (!CatchParam) {
5168 llvm::Value *Exn = CGF.getExceptionFromSlot();
5169 CallBeginCatch(CGF, Exn, EndMightThrow: true);
5170 return;
5171 }
5172
5173 // Emit the local.
5174 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(var: *CatchParam);
5175 {
5176 ApplyAtomGroup Grp(CGF.getDebugInfo());
5177 InitCatchParam(CGF, CatchParam: *CatchParam, ParamAddr: var.getObjectAddress(CGF),
5178 Loc: S->getBeginLoc());
5179 }
5180 CGF.EmitAutoVarCleanups(emission: var);
5181}
5182
5183/// Get or define the following function:
5184/// void @__clang_call_terminate(i8* %exn) nounwind noreturn
5185/// This code is used only in C++.
5186static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
5187 ASTContext &C = CGM.getContext();
5188 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
5189 resultType: C.VoidTy, argTypes: {C.getPointerType(T: C.CharTy)});
5190 llvm::FunctionType *fnTy = CGM.getTypes().GetFunctionType(Info: FI);
5191 llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
5192 Ty: fnTy, Name: "__clang_call_terminate", ExtraAttrs: llvm::AttributeList(), /*Local=*/true);
5193 llvm::Function *fn =
5194 cast<llvm::Function>(Val: fnRef.getCallee()->stripPointerCasts());
5195 if (fn->empty()) {
5196 CGM.SetLLVMFunctionAttributes(GD: GlobalDecl(), Info: FI, F: fn, /*IsThunk=*/false);
5197 CGM.SetLLVMFunctionAttributesForDefinition(D: nullptr, F: fn);
5198 fn->setDoesNotThrow();
5199 fn->setDoesNotReturn();
5200
5201 // What we really want is to massively penalize inlining without
5202 // forbidding it completely. The difference between that and
5203 // 'noinline' is negligible.
5204 fn->addFnAttr(Kind: llvm::Attribute::NoInline);
5205
5206 // Allow this function to be shared across translation units, but
5207 // we don't want it to turn into an exported symbol.
5208 fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
5209 fn->setVisibility(llvm::Function::HiddenVisibility);
5210 if (CGM.supportsCOMDAT())
5211 fn->setComdat(CGM.getModule().getOrInsertComdat(Name: fn->getName()));
5212
5213 // Set up the function.
5214 llvm::BasicBlock *entry =
5215 llvm::BasicBlock::Create(Context&: CGM.getLLVMContext(), Name: "", Parent: fn);
5216 CGBuilderTy builder(CGM, entry);
5217
5218 // Pull the exception pointer out of the parameter list.
5219 llvm::Value *exn = &*fn->arg_begin();
5220
5221 // Call __cxa_begin_catch(exn).
5222 llvm::CallInst *catchCall = builder.CreateCall(Callee: getBeginCatchFn(CGM), Args: exn);
5223 catchCall->setDoesNotThrow();
5224 catchCall->setCallingConv(CGM.getRuntimeCC());
5225
5226 // Call std::terminate().
5227 llvm::CallInst *termCall = builder.CreateCall(Callee: CGM.getTerminateFn());
5228 termCall->setDoesNotThrow();
5229 termCall->setDoesNotReturn();
5230 termCall->setCallingConv(CGM.getRuntimeCC());
5231
5232 // std::terminate cannot return.
5233 builder.CreateUnreachable();
5234 }
5235 return fnRef;
5236}
5237
5238llvm::CallInst *
5239ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
5240 llvm::Value *Exn) {
5241 // In C++, we want to call __cxa_begin_catch() before terminating.
5242 if (Exn) {
5243 assert(CGF.CGM.getLangOpts().CPlusPlus);
5244 return CGF.EmitNounwindRuntimeCall(callee: getClangCallTerminateFn(CGM&: CGF.CGM), args: Exn);
5245 }
5246 return CGF.EmitNounwindRuntimeCall(callee: CGF.CGM.getTerminateFn());
5247}
5248
5249std::pair<llvm::Value *, const CXXRecordDecl *>
5250ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
5251 const CXXRecordDecl *RD) {
5252 return {CGF.GetVTablePtr(This, VTableTy: CGM.Int8PtrTy, VTableClass: RD), RD};
5253}
5254
5255llvm::Constant *
5256ItaniumCXXABI::getSignedVirtualMemberFunctionPointer(const CXXMethodDecl *MD) {
5257 const CXXMethodDecl *origMD =
5258 cast<CXXMethodDecl>(Val: CGM.getItaniumVTableContext()
5259 .findOriginalMethod(GD: MD->getCanonicalDecl())
5260 .getDecl());
5261 llvm::Constant *thunk = getOrCreateVirtualFunctionPointerThunk(MD: origMD);
5262 QualType funcType = CGM.getContext().getMemberPointerType(
5263 T: MD->getType(), /*Qualifier=*/std::nullopt, Cls: MD->getParent());
5264 return CGM.getMemberFunctionPointer(Pointer: thunk, FT: funcType);
5265}
5266
5267void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
5268 const CXXCatchStmt *C) {
5269 if (CGF.getTarget().hasFeature(Feature: "exception-handling"))
5270 CGF.EHStack.pushCleanup<CatchRetScope>(
5271 Kind: NormalCleanup, A: cast<llvm::CatchPadInst>(Val: CGF.CurrentFuncletPad));
5272 ItaniumCXXABI::emitBeginCatch(CGF, S: C);
5273}
5274
5275llvm::CallInst *
5276WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
5277 llvm::Value *Exn) {
5278 // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on
5279 // the violating exception to mark it handled, but it is currently hard to do
5280 // with wasm EH instruction structure with catch/catch_all, we just call
5281 // std::terminate and ignore the violating exception as in CGCXXABI in Wasm EH
5282 // and call __clang_call_terminate only in Emscripten EH.
5283 // TODO Consider code transformation that makes calling __clang_call_terminate
5284 // in Wasm EH possible.
5285 if (Exn && !EHPersonality::get(CGF).isWasmPersonality()) {
5286 assert(CGF.CGM.getLangOpts().CPlusPlus);
5287 return CGF.EmitNounwindRuntimeCall(callee: getClangCallTerminateFn(CGM&: CGF.CGM), args: Exn);
5288 }
5289 return CGCXXABI::emitTerminateForUnexpectedException(CGF, Exn);
5290}
5291
5292/// Register a global destructor as best as we know how.
5293void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
5294 llvm::FunctionCallee Dtor,
5295 llvm::Constant *Addr) {
5296 if (D.getTLSKind() != VarDecl::TLS_None) {
5297 llvm::PointerType *PtrTy = CGF.DefaultPtrTy;
5298
5299 // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
5300 llvm::FunctionType *AtExitTy =
5301 llvm::FunctionType::get(Result: CGM.IntTy, Params: {CGM.IntTy, PtrTy}, isVarArg: true);
5302
5303 // Fetch the actual function.
5304 llvm::FunctionCallee AtExit =
5305 CGM.CreateRuntimeFunction(Ty: AtExitTy, Name: "__pt_atexit_np");
5306
5307 // Create __dtor function for the var decl.
5308 llvm::Function *DtorStub = CGF.createTLSAtExitStub(VD: D, Dtor, Addr, AtExit);
5309
5310 // Register above __dtor with atexit().
5311 // First param is flags and must be 0, second param is function ptr
5312 llvm::Value *NV = llvm::Constant::getNullValue(Ty: CGM.IntTy);
5313 CGF.EmitNounwindRuntimeCall(callee: AtExit, args: {NV, DtorStub});
5314
5315 // Cannot unregister TLS __dtor so done
5316 return;
5317 }
5318
5319 // Create __dtor function for the var decl.
5320 llvm::Function *DtorStub =
5321 cast<llvm::Function>(Val: CGF.createAtExitStub(VD: D, Dtor, Addr));
5322
5323 // Register above __dtor with atexit().
5324 CGF.registerGlobalDtorWithAtExit(dtorStub: DtorStub);
5325
5326 // Emit __finalize function to unregister __dtor and (as appropriate) call
5327 // __dtor.
5328 emitCXXStermFinalizer(D, dtorStub: DtorStub, addr: Addr);
5329}
5330
5331void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
5332 llvm::Constant *addr) {
5333 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: CGM.VoidTy, isVarArg: false);
5334 SmallString<256> FnName;
5335 {
5336 llvm::raw_svector_ostream Out(FnName);
5337 getMangleContext().mangleDynamicStermFinalizer(D: &D, Out);
5338 }
5339
5340 // Create the finalization action associated with a variable.
5341 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
5342 llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
5343 ty: FTy, name: FnName.str(), FI, Loc: D.getLocation());
5344
5345 CodeGenFunction CGF(CGM);
5346
5347 CGF.StartFunction(GD: GlobalDecl(), RetTy: CGM.getContext().VoidTy, Fn: StermFinalizer, FnInfo: FI,
5348 Args: FunctionArgList(), Loc: D.getLocation(),
5349 StartLoc: D.getInit()->getExprLoc());
5350
5351 // The unatexit subroutine unregisters __dtor functions that were previously
5352 // registered by the atexit subroutine. If the referenced function is found,
5353 // the unatexit returns a value of 0, meaning that the cleanup is still
5354 // pending (and we should call the __dtor function).
5355 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
5356
5357 llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(Arg: V, Name: "needs_destruct");
5358
5359 llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock(name: "destruct.call");
5360 llvm::BasicBlock *EndBlock = CGF.createBasicBlock(name: "destruct.end");
5361
5362 // Check if unatexit returns a value of 0. If it does, jump to
5363 // DestructCallBlock, otherwise jump to EndBlock directly.
5364 CGF.Builder.CreateCondBr(Cond: NeedsDestruct, True: DestructCallBlock, False: EndBlock);
5365
5366 CGF.EmitBlock(BB: DestructCallBlock);
5367
5368 // Emit the call to dtorStub.
5369 llvm::CallInst *CI = CGF.Builder.CreateCall(Callee: dtorStub);
5370
5371 // Make sure the call and the callee agree on calling convention.
5372 CI->setCallingConv(dtorStub->getCallingConv());
5373
5374 CGF.EmitBlock(BB: EndBlock);
5375
5376 CGF.FinishFunction();
5377
5378 if (auto *IPA = D.getAttr<InitPriorityAttr>()) {
5379 CGM.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer,
5380 Priority: IPA->getPriority());
5381 } else if (isTemplateInstantiation(Kind: D.getTemplateSpecializationKind()) ||
5382 getContext().GetGVALinkageForVariable(VD: &D) == GVA_DiscardableODR) {
5383 // According to C++ [basic.start.init]p2, class template static data
5384 // members (i.e., implicitly or explicitly instantiated specializations)
5385 // have unordered initialization. As a consequence, we can put them into
5386 // their own llvm.global_dtors entry.
5387 CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, Priority: 65535);
5388 } else {
5389 CGM.AddCXXStermFinalizerEntry(DtorFn: StermFinalizer);
5390 }
5391}
5392