1//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This provides C++ code generation targeting the Itanium C++ ABI. The class
10// in this file generates structures that follow the Itanium C++ ABI, which is
11// documented at:
12// https://itanium-cxx-abi.github.io/cxx-abi/abi.html
13// https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
14//
15// It also supports the closely-related ARM ABI, documented at:
16// https://developer.arm.com/documentation/ihi0041/g/
17//
18//===----------------------------------------------------------------------===//
19
20#include "CGCXXABI.h"
21#include "CGCleanup.h"
22#include "CGRecordLayout.h"
23#include "CGVTables.h"
24#include "CodeGenFunction.h"
25#include "CodeGenModule.h"
26#include "ConstantEmitter.h"
27#include "TargetInfo.h"
28#include "clang/AST/Attr.h"
29#include "clang/AST/Mangle.h"
30#include "clang/AST/StmtCXX.h"
31#include "clang/AST/Type.h"
32#include "clang/CodeGen/ConstantInitBuilder.h"
33#include "llvm/IR/DataLayout.h"
34#include "llvm/IR/GlobalValue.h"
35#include "llvm/IR/Instructions.h"
36#include "llvm/IR/Intrinsics.h"
37#include "llvm/IR/Value.h"
38#include "llvm/Support/ScopedPrinter.h"
39
40#include <optional>
41
42using namespace clang;
43using namespace CodeGen;
44
45namespace {
46class ItaniumCXXABI : public CodeGen::CGCXXABI {
47 /// VTables - All the vtables which have been defined.
48 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
49
50 /// All the thread wrapper functions that have been used.
51 llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8>
52 ThreadWrappers;
53
54protected:
55 bool UseARMMethodPtrABI;
56 bool UseARMGuardVarABI;
57 bool Use32BitVTableOffsetABI;
58
59 ItaniumMangleContext &getMangleContext() {
60 return cast<ItaniumMangleContext>(Val&: CodeGen::CGCXXABI::getMangleContext());
61 }
62
63public:
64 ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
65 bool UseARMMethodPtrABI = false,
66 bool UseARMGuardVarABI = false) :
67 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
68 UseARMGuardVarABI(UseARMGuardVarABI),
69 Use32BitVTableOffsetABI(false) { }
70
71 bool classifyReturnType(CGFunctionInfo &FI) const override;
72
73 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
74 // If C++ prohibits us from making a copy, pass by address.
75 if (!RD->canPassInRegisters())
76 return RAA_Indirect;
77 return RAA_Default;
78 }
79
80 bool isThisCompleteObject(GlobalDecl GD) const override {
81 // The Itanium ABI has separate complete-object vs. base-object
82 // variants of both constructors and destructors.
83 if (isa<CXXDestructorDecl>(Val: GD.getDecl())) {
84 switch (GD.getDtorType()) {
85 case Dtor_Complete:
86 case Dtor_Deleting:
87 return true;
88
89 case Dtor_Base:
90 return false;
91
92 case Dtor_Comdat:
93 llvm_unreachable("emitting dtor comdat as function?");
94 }
95 llvm_unreachable("bad dtor kind");
96 }
97 if (isa<CXXConstructorDecl>(Val: GD.getDecl())) {
98 switch (GD.getCtorType()) {
99 case Ctor_Complete:
100 return true;
101
102 case Ctor_Base:
103 return false;
104
105 case Ctor_CopyingClosure:
106 case Ctor_DefaultClosure:
107 llvm_unreachable("closure ctors in Itanium ABI?");
108
109 case Ctor_Comdat:
110 llvm_unreachable("emitting ctor comdat as function?");
111 }
112 llvm_unreachable("bad dtor kind");
113 }
114
115 // No other kinds.
116 return false;
117 }
118
119 bool isZeroInitializable(const MemberPointerType *MPT) override;
120
121 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
122
123 CGCallee
124 EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
125 const Expr *E,
126 Address This,
127 llvm::Value *&ThisPtrForCall,
128 llvm::Value *MemFnPtr,
129 const MemberPointerType *MPT) override;
130
131 llvm::Value *
132 EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
133 Address Base,
134 llvm::Value *MemPtr,
135 const MemberPointerType *MPT) override;
136
137 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
138 const CastExpr *E,
139 llvm::Value *Src) override;
140 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
141 llvm::Constant *Src) override;
142
143 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
144
145 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
146 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
147 CharUnits offset) override;
148 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
149 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
150 CharUnits ThisAdjustment);
151
152 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
153 llvm::Value *L, llvm::Value *R,
154 const MemberPointerType *MPT,
155 bool Inequality) override;
156
157 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
158 llvm::Value *Addr,
159 const MemberPointerType *MPT) override;
160
161 void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
162 Address Ptr, QualType ElementType,
163 const CXXDestructorDecl *Dtor) override;
164
165 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
166 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
167
168 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
169
170 llvm::CallInst *
171 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
172 llvm::Value *Exn) override;
173
174 void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
175 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
176 CatchTypeInfo
177 getAddrOfCXXCatchHandlerType(QualType Ty,
178 QualType CatchHandlerType) override {
179 return CatchTypeInfo{.RTTI: getAddrOfRTTIDescriptor(Ty), .Flags: 0};
180 }
181
182 bool shouldTypeidBeNullChecked(QualType SrcRecordTy) override;
183 void EmitBadTypeidCall(CodeGenFunction &CGF) override;
184 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
185 Address ThisPtr,
186 llvm::Type *StdTypeInfoPtrTy) override;
187
188 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
189 QualType SrcRecordTy) override;
190
191 /// Determine whether we know that all instances of type RecordTy will have
192 /// the same vtable pointer values, that is distinct from all other vtable
193 /// pointers. While this is required by the Itanium ABI, it doesn't happen in
194 /// practice in some cases due to language extensions.
195 bool hasUniqueVTablePointer(QualType RecordTy) {
196 const CXXRecordDecl *RD = RecordTy->getAsCXXRecordDecl();
197
198 // Under -fapple-kext, multiple definitions of the same vtable may be
199 // emitted.
200 if (!CGM.getCodeGenOpts().AssumeUniqueVTables ||
201 getContext().getLangOpts().AppleKext)
202 return false;
203
204 // If the type_info* would be null, the vtable might be merged with that of
205 // another type.
206 if (!CGM.shouldEmitRTTI())
207 return false;
208
209 // If there's only one definition of the vtable in the program, it has a
210 // unique address.
211 if (!llvm::GlobalValue::isWeakForLinker(Linkage: CGM.getVTableLinkage(RD)))
212 return true;
213
214 // Even if there are multiple definitions of the vtable, they are required
215 // by the ABI to use the same symbol name, so should be merged at load
216 // time. However, if the class has hidden visibility, there can be
217 // different versions of the class in different modules, and the ABI
218 // library might treat them as being the same.
219 if (CGM.GetLLVMVisibility(V: RD->getVisibility()) !=
220 llvm::GlobalValue::DefaultVisibility)
221 return false;
222
223 return true;
224 }
225
226 bool shouldEmitExactDynamicCast(QualType DestRecordTy) override {
227 return hasUniqueVTablePointer(RecordTy: DestRecordTy);
228 }
229
230 llvm::Value *emitDynamicCastCall(CodeGenFunction &CGF, Address Value,
231 QualType SrcRecordTy, QualType DestTy,
232 QualType DestRecordTy,
233 llvm::BasicBlock *CastEnd) override;
234
235 llvm::Value *emitExactDynamicCast(CodeGenFunction &CGF, Address ThisAddr,
236 QualType SrcRecordTy, QualType DestTy,
237 QualType DestRecordTy,
238 llvm::BasicBlock *CastSuccess,
239 llvm::BasicBlock *CastFail) override;
240
241 llvm::Value *emitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
242 QualType SrcRecordTy) override;
243
244 bool EmitBadCastCall(CodeGenFunction &CGF) override;
245
246 llvm::Value *
247 GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
248 const CXXRecordDecl *ClassDecl,
249 const CXXRecordDecl *BaseClassDecl) override;
250
251 void EmitCXXConstructors(const CXXConstructorDecl *D) override;
252
253 AddedStructorArgCounts
254 buildStructorSignature(GlobalDecl GD,
255 SmallVectorImpl<CanQualType> &ArgTys) override;
256
257 bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
258 CXXDtorType DT) const override {
259 // Itanium does not emit any destructor variant as an inline thunk.
260 // Delegating may occur as an optimization, but all variants are either
261 // emitted with external linkage or as linkonce if they are inline and used.
262 return false;
263 }
264
265 void EmitCXXDestructors(const CXXDestructorDecl *D) override;
266
267 void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
268 FunctionArgList &Params) override;
269
270 void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
271
272 AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
273 const CXXConstructorDecl *D,
274 CXXCtorType Type,
275 bool ForVirtualBase,
276 bool Delegating) override;
277
278 llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
279 const CXXDestructorDecl *DD,
280 CXXDtorType Type,
281 bool ForVirtualBase,
282 bool Delegating) override;
283
284 void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
285 CXXDtorType Type, bool ForVirtualBase,
286 bool Delegating, Address This,
287 QualType ThisTy) override;
288
289 void emitVTableDefinitions(CodeGenVTables &CGVT,
290 const CXXRecordDecl *RD) override;
291
292 bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
293 CodeGenFunction::VPtr Vptr) override;
294
295 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
296 return true;
297 }
298
299 llvm::Constant *
300 getVTableAddressPoint(BaseSubobject Base,
301 const CXXRecordDecl *VTableClass) override;
302
303 llvm::Value *getVTableAddressPointInStructor(
304 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
305 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
306
307 llvm::Value *getVTableAddressPointInStructorWithVTT(
308 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
309 BaseSubobject Base, const CXXRecordDecl *NearestVBase);
310
311 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
312 CharUnits VPtrOffset) override;
313
314 CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
315 Address This, llvm::Type *Ty,
316 SourceLocation Loc) override;
317
318 llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
319 const CXXDestructorDecl *Dtor,
320 CXXDtorType DtorType, Address This,
321 DeleteOrMemberCallExpr E) override;
322
323 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
324
325 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
326 bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
327
328 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
329 bool ReturnAdjustment) override {
330 // Allow inlining of thunks by emitting them with available_externally
331 // linkage together with vtables when needed.
332 if (ForVTable && !Thunk->hasLocalLinkage())
333 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
334 CGM.setGVProperties(GV: Thunk, GD);
335 }
336
337 bool exportThunk() override { return true; }
338
339 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
340 const CXXRecordDecl *UnadjustedThisClass,
341 const ThunkInfo &TI) override;
342
343 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
344 const CXXRecordDecl *UnadjustedRetClass,
345 const ReturnAdjustment &RA) override;
346
347 size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
348 FunctionArgList &Args) const override {
349 assert(!Args.empty() && "expected the arglist to not be empty!");
350 return Args.size() - 1;
351 }
352
353 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
354 StringRef GetDeletedVirtualCallName() override
355 { return "__cxa_deleted_virtual"; }
356
357 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
358 Address InitializeArrayCookie(CodeGenFunction &CGF,
359 Address NewPtr,
360 llvm::Value *NumElements,
361 const CXXNewExpr *expr,
362 QualType ElementType) override;
363 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
364 Address allocPtr,
365 CharUnits cookieSize) override;
366
367 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
368 llvm::GlobalVariable *DeclPtr,
369 bool PerformInit) override;
370 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
371 llvm::FunctionCallee dtor,
372 llvm::Constant *addr) override;
373
374 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
375 llvm::Value *Val);
376 void EmitThreadLocalInitFuncs(
377 CodeGenModule &CGM,
378 ArrayRef<const VarDecl *> CXXThreadLocals,
379 ArrayRef<llvm::Function *> CXXThreadLocalInits,
380 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
381
382 bool usesThreadWrapperFunction(const VarDecl *VD) const override {
383 return !isEmittedWithConstantInitializer(VD) ||
384 mayNeedDestruction(VD);
385 }
386 LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
387 QualType LValType) override;
388
389 bool NeedsVTTParameter(GlobalDecl GD) override;
390
391 llvm::Constant *
392 getOrCreateVirtualFunctionPointerThunk(const CXXMethodDecl *MD);
393
394 /**************************** RTTI Uniqueness ******************************/
395
396protected:
397 /// Returns true if the ABI requires RTTI type_info objects to be unique
398 /// across a program.
399 virtual bool shouldRTTIBeUnique() const { return true; }
400
401public:
402 /// What sort of unique-RTTI behavior should we use?
403 enum RTTIUniquenessKind {
404 /// We are guaranteeing, or need to guarantee, that the RTTI string
405 /// is unique.
406 RUK_Unique,
407
408 /// We are not guaranteeing uniqueness for the RTTI string, so we
409 /// can demote to hidden visibility but must use string comparisons.
410 RUK_NonUniqueHidden,
411
412 /// We are not guaranteeing uniqueness for the RTTI string, so we
413 /// have to use string comparisons, but we also have to emit it with
414 /// non-hidden visibility.
415 RUK_NonUniqueVisible
416 };
417
418 /// Return the required visibility status for the given type and linkage in
419 /// the current ABI.
420 RTTIUniquenessKind
421 classifyRTTIUniqueness(QualType CanTy,
422 llvm::GlobalValue::LinkageTypes Linkage) const;
423 friend class ItaniumRTTIBuilder;
424
425 void emitCXXStructor(GlobalDecl GD) override;
426
427 std::pair<llvm::Value *, const CXXRecordDecl *>
428 LoadVTablePtr(CodeGenFunction &CGF, Address This,
429 const CXXRecordDecl *RD) override;
430
431 private:
432 llvm::Constant *
433 getSignedVirtualMemberFunctionPointer(const CXXMethodDecl *MD);
434
435 bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
436 const auto &VtableLayout =
437 CGM.getItaniumVTableContext().getVTableLayout(RD);
438
439 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
440 // Skip empty slot.
441 if (!VtableComponent.isUsedFunctionPointerKind())
442 continue;
443
444 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
445 if (!Method->getCanonicalDecl()->isInlined())
446 continue;
447
448 StringRef Name = CGM.getMangledName(GD: VtableComponent.getGlobalDecl());
449 auto *Entry = CGM.GetGlobalValue(Ref: Name);
450 // This checks if virtual inline function has already been emitted.
451 // Note that it is possible that this inline function would be emitted
452 // after trying to emit vtable speculatively. Because of this we do
453 // an extra pass after emitting all deferred vtables to find and emit
454 // these vtables opportunistically.
455 if (!Entry || Entry->isDeclaration())
456 return true;
457 }
458 return false;
459 }
460
461 bool isVTableHidden(const CXXRecordDecl *RD) const {
462 const auto &VtableLayout =
463 CGM.getItaniumVTableContext().getVTableLayout(RD);
464
465 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
466 if (VtableComponent.isRTTIKind()) {
467 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
468 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
469 return true;
470 } else if (VtableComponent.isUsedFunctionPointerKind()) {
471 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
472 if (Method->getVisibility() == Visibility::HiddenVisibility &&
473 !Method->isDefined())
474 return true;
475 }
476 }
477 return false;
478 }
479};
480
481class ARMCXXABI : public ItaniumCXXABI {
482public:
483 ARMCXXABI(CodeGen::CodeGenModule &CGM) :
484 ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
485 /*UseARMGuardVarABI=*/true) {}
486
487 bool constructorsAndDestructorsReturnThis() const override { return true; }
488
489 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
490 QualType ResTy) override;
491
492 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
493 Address InitializeArrayCookie(CodeGenFunction &CGF,
494 Address NewPtr,
495 llvm::Value *NumElements,
496 const CXXNewExpr *expr,
497 QualType ElementType) override;
498 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
499 CharUnits cookieSize) override;
500};
501
502class AppleARM64CXXABI : public ARMCXXABI {
503public:
504 AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
505 Use32BitVTableOffsetABI = true;
506 }
507
508 // ARM64 libraries are prepared for non-unique RTTI.
509 bool shouldRTTIBeUnique() const override { return false; }
510};
511
512class FuchsiaCXXABI final : public ItaniumCXXABI {
513public:
514 explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
515 : ItaniumCXXABI(CGM) {}
516
517private:
518 bool constructorsAndDestructorsReturnThis() const override { return true; }
519};
520
521class WebAssemblyCXXABI final : public ItaniumCXXABI {
522public:
523 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
524 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
525 /*UseARMGuardVarABI=*/true) {}
526 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
527 llvm::CallInst *
528 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
529 llvm::Value *Exn) override;
530
531private:
532 bool constructorsAndDestructorsReturnThis() const override { return true; }
533 bool canCallMismatchedFunctionType() const override { return false; }
534};
535
536class XLCXXABI final : public ItaniumCXXABI {
537public:
538 explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
539 : ItaniumCXXABI(CGM) {}
540
541 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
542 llvm::FunctionCallee dtor,
543 llvm::Constant *addr) override;
544
545 bool useSinitAndSterm() const override { return true; }
546
547private:
548 void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
549 llvm::Constant *addr);
550};
551}
552
553CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
554 switch (CGM.getContext().getCXXABIKind()) {
555 // For IR-generation purposes, there's no significant difference
556 // between the ARM and iOS ABIs.
557 case TargetCXXABI::GenericARM:
558 case TargetCXXABI::iOS:
559 case TargetCXXABI::WatchOS:
560 return new ARMCXXABI(CGM);
561
562 case TargetCXXABI::AppleARM64:
563 return new AppleARM64CXXABI(CGM);
564
565 case TargetCXXABI::Fuchsia:
566 return new FuchsiaCXXABI(CGM);
567
568 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
569 // include the other 32-bit ARM oddities: constructor/destructor return values
570 // and array cookies.
571 case TargetCXXABI::GenericAArch64:
572 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
573 /*UseARMGuardVarABI=*/true);
574
575 case TargetCXXABI::GenericMIPS:
576 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
577
578 case TargetCXXABI::WebAssembly:
579 return new WebAssemblyCXXABI(CGM);
580
581 case TargetCXXABI::XL:
582 return new XLCXXABI(CGM);
583
584 case TargetCXXABI::GenericItanium:
585 if (CGM.getContext().getTargetInfo().getTriple().getArch()
586 == llvm::Triple::le32) {
587 // For PNaCl, use ARM-style method pointers so that PNaCl code
588 // does not assume anything about the alignment of function
589 // pointers.
590 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
591 }
592 return new ItaniumCXXABI(CGM);
593
594 case TargetCXXABI::Microsoft:
595 llvm_unreachable("Microsoft ABI is not Itanium-based");
596 }
597 llvm_unreachable("bad ABI kind");
598}
599
600llvm::Type *
601ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
602 if (MPT->isMemberDataPointer())
603 return CGM.PtrDiffTy;
604 return llvm::StructType::get(elt1: CGM.PtrDiffTy, elts: CGM.PtrDiffTy);
605}
606
607/// In the Itanium and ARM ABIs, method pointers have the form:
608/// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
609///
610/// In the Itanium ABI:
611/// - method pointers are virtual if (memptr.ptr & 1) is nonzero
612/// - the this-adjustment is (memptr.adj)
613/// - the virtual offset is (memptr.ptr - 1)
614///
615/// In the ARM ABI:
616/// - method pointers are virtual if (memptr.adj & 1) is nonzero
617/// - the this-adjustment is (memptr.adj >> 1)
618/// - the virtual offset is (memptr.ptr)
619/// ARM uses 'adj' for the virtual flag because Thumb functions
620/// may be only single-byte aligned.
621///
622/// If the member is virtual, the adjusted 'this' pointer points
623/// to a vtable pointer from which the virtual offset is applied.
624///
625/// If the member is non-virtual, memptr.ptr is the address of
626/// the function to call.
627CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
628 CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
629 llvm::Value *&ThisPtrForCall,
630 llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
631 CGBuilderTy &Builder = CGF.Builder;
632
633 const FunctionProtoType *FPT =
634 MPT->getPointeeType()->castAs<FunctionProtoType>();
635 auto *RD =
636 cast<CXXRecordDecl>(Val: MPT->getClass()->castAs<RecordType>()->getDecl());
637
638 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: 1);
639
640 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock(name: "memptr.virtual");
641 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock(name: "memptr.nonvirtual");
642 llvm::BasicBlock *FnEnd = CGF.createBasicBlock(name: "memptr.end");
643
644 // Extract memptr.adj, which is in the second field.
645 llvm::Value *RawAdj = Builder.CreateExtractValue(Agg: MemFnPtr, Idxs: 1, Name: "memptr.adj");
646
647 // Compute the true adjustment.
648 llvm::Value *Adj = RawAdj;
649 if (UseARMMethodPtrABI)
650 Adj = Builder.CreateAShr(LHS: Adj, RHS: ptrdiff_1, Name: "memptr.adj.shifted");
651
652 // Apply the adjustment and cast back to the original struct type
653 // for consistency.
654 llvm::Value *This = ThisAddr.emitRawPointer(CGF);
655 This = Builder.CreateInBoundsGEP(Ty: Builder.getInt8Ty(), Ptr: This, IdxList: Adj);
656 ThisPtrForCall = This;
657
658 // Load the function pointer.
659 llvm::Value *FnAsInt = Builder.CreateExtractValue(Agg: MemFnPtr, Idxs: 0, Name: "memptr.ptr");
660
661 // If the LSB in the function pointer is 1, the function pointer points to
662 // a virtual function.
663 llvm::Value *IsVirtual;
664 if (UseARMMethodPtrABI)
665 IsVirtual = Builder.CreateAnd(LHS: RawAdj, RHS: ptrdiff_1);
666 else
667 IsVirtual = Builder.CreateAnd(LHS: FnAsInt, RHS: ptrdiff_1);
668 IsVirtual = Builder.CreateIsNotNull(Arg: IsVirtual, Name: "memptr.isvirtual");
669 Builder.CreateCondBr(Cond: IsVirtual, True: FnVirtual, False: FnNonVirtual);
670
671 // In the virtual path, the adjustment left 'This' pointing to the
672 // vtable of the correct base subobject. The "function pointer" is an
673 // offset within the vtable (+1 for the virtual flag on non-ARM).
674 CGF.EmitBlock(BB: FnVirtual);
675
676 // Cast the adjusted this to a pointer to vtable pointer and load.
677 llvm::Type *VTableTy = CGF.CGM.GlobalsInt8PtrTy;
678 CharUnits VTablePtrAlign =
679 CGF.CGM.getDynamicOffsetAlignment(ActualAlign: ThisAddr.getAlignment(), Class: RD,
680 ExpectedTargetAlign: CGF.getPointerAlign());
681 llvm::Value *VTable = CGF.GetVTablePtr(
682 This: Address(This, ThisAddr.getElementType(), VTablePtrAlign), VTableTy, VTableClass: RD);
683
684 // Apply the offset.
685 // On ARM64, to reserve extra space in virtual member function pointers,
686 // we only pay attention to the low 32 bits of the offset.
687 llvm::Value *VTableOffset = FnAsInt;
688 if (!UseARMMethodPtrABI)
689 VTableOffset = Builder.CreateSub(LHS: VTableOffset, RHS: ptrdiff_1);
690 if (Use32BitVTableOffsetABI) {
691 VTableOffset = Builder.CreateTrunc(V: VTableOffset, DestTy: CGF.Int32Ty);
692 VTableOffset = Builder.CreateZExt(V: VTableOffset, DestTy: CGM.PtrDiffTy);
693 }
694
695 // Check the address of the function pointer if CFI on member function
696 // pointers is enabled.
697 llvm::Constant *CheckSourceLocation;
698 llvm::Constant *CheckTypeDesc;
699 bool ShouldEmitCFICheck = CGF.SanOpts.has(K: SanitizerKind::CFIMFCall) &&
700 CGM.HasHiddenLTOVisibility(RD);
701 bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
702 CGM.HasHiddenLTOVisibility(RD);
703 bool ShouldEmitWPDInfo =
704 CGM.getCodeGenOpts().WholeProgramVTables &&
705 // Don't insert type tests if we are forcing public visibility.
706 !CGM.AlwaysHasLTOVisibilityPublic(RD);
707 llvm::Value *VirtualFn = nullptr;
708
709 {
710 CodeGenFunction::SanitizerScope SanScope(&CGF);
711 llvm::Value *TypeId = nullptr;
712 llvm::Value *CheckResult = nullptr;
713
714 if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
715 // If doing CFI, VFE or WPD, we will need the metadata node to check
716 // against.
717 llvm::Metadata *MD =
718 CGM.CreateMetadataIdentifierForVirtualMemPtrType(T: QualType(MPT, 0));
719 TypeId = llvm::MetadataAsValue::get(Context&: CGF.getLLVMContext(), MD);
720 }
721
722 if (ShouldEmitVFEInfo) {
723 llvm::Value *VFPAddr =
724 Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: VTable, IdxList: VTableOffset);
725
726 // If doing VFE, load from the vtable with a type.checked.load intrinsic
727 // call. Note that we use the GEP to calculate the address to load from
728 // and pass 0 as the offset to the intrinsic. This is because every
729 // vtable slot of the correct type is marked with matching metadata, and
730 // we know that the load must be from one of these slots.
731 llvm::Value *CheckedLoad = Builder.CreateCall(
732 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::type_checked_load),
733 Args: {VFPAddr, llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: 0), TypeId});
734 CheckResult = Builder.CreateExtractValue(Agg: CheckedLoad, Idxs: 1);
735 VirtualFn = Builder.CreateExtractValue(Agg: CheckedLoad, Idxs: 0);
736 } else {
737 // When not doing VFE, emit a normal load, as it allows more
738 // optimisations than type.checked.load.
739 if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
740 llvm::Value *VFPAddr =
741 Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: VTable, IdxList: VTableOffset);
742 llvm::Intrinsic::ID IID = CGM.HasHiddenLTOVisibility(RD)
743 ? llvm::Intrinsic::type_test
744 : llvm::Intrinsic::public_type_test;
745
746 CheckResult =
747 Builder.CreateCall(Callee: CGM.getIntrinsic(IID), Args: {VFPAddr, TypeId});
748 }
749
750 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
751 VirtualFn = CGF.Builder.CreateCall(
752 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::load_relative,
753 Tys: {VTableOffset->getType()}),
754 Args: {VTable, VTableOffset});
755 } else {
756 llvm::Value *VFPAddr =
757 CGF.Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: VTable, IdxList: VTableOffset);
758 VirtualFn = CGF.Builder.CreateAlignedLoad(Ty: CGF.UnqualPtrTy, Addr: VFPAddr,
759 Align: CGF.getPointerAlign(),
760 Name: "memptr.virtualfn");
761 }
762 }
763 assert(VirtualFn && "Virtual fuction pointer not created!");
764 assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
765 CheckResult) &&
766 "Check result required but not created!");
767
768 if (ShouldEmitCFICheck) {
769 // If doing CFI, emit the check.
770 CheckSourceLocation = CGF.EmitCheckSourceLocation(Loc: E->getBeginLoc());
771 CheckTypeDesc = CGF.EmitCheckTypeDescriptor(T: QualType(MPT, 0));
772 llvm::Constant *StaticData[] = {
773 llvm::ConstantInt::get(Ty: CGF.Int8Ty, V: CodeGenFunction::CFITCK_VMFCall),
774 CheckSourceLocation,
775 CheckTypeDesc,
776 };
777
778 if (CGM.getCodeGenOpts().SanitizeTrap.has(K: SanitizerKind::CFIMFCall)) {
779 CGF.EmitTrapCheck(Checked: CheckResult, CheckHandlerID: SanitizerHandler::CFICheckFail);
780 } else {
781 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
782 Context&: CGM.getLLVMContext(),
783 MD: llvm::MDString::get(Context&: CGM.getLLVMContext(), Str: "all-vtables"));
784 llvm::Value *ValidVtable = Builder.CreateCall(
785 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::type_test), Args: {VTable, AllVtables});
786 CGF.EmitCheck(Checked: std::make_pair(x&: CheckResult, y: SanitizerKind::CFIMFCall),
787 Check: SanitizerHandler::CFICheckFail, StaticArgs: StaticData,
788 DynamicArgs: {VTable, ValidVtable});
789 }
790
791 FnVirtual = Builder.GetInsertBlock();
792 }
793 } // End of sanitizer scope
794
795 CGF.EmitBranch(Block: FnEnd);
796
797 // In the non-virtual path, the function pointer is actually a
798 // function pointer.
799 CGF.EmitBlock(BB: FnNonVirtual);
800 llvm::Value *NonVirtualFn =
801 Builder.CreateIntToPtr(V: FnAsInt, DestTy: CGF.UnqualPtrTy, Name: "memptr.nonvirtualfn");
802
803 // Check the function pointer if CFI on member function pointers is enabled.
804 if (ShouldEmitCFICheck) {
805 CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
806 if (RD->hasDefinition()) {
807 CodeGenFunction::SanitizerScope SanScope(&CGF);
808
809 llvm::Constant *StaticData[] = {
810 llvm::ConstantInt::get(Ty: CGF.Int8Ty, V: CodeGenFunction::CFITCK_NVMFCall),
811 CheckSourceLocation,
812 CheckTypeDesc,
813 };
814
815 llvm::Value *Bit = Builder.getFalse();
816 for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
817 llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
818 T: getContext().getMemberPointerType(
819 T: MPT->getPointeeType(),
820 Cls: getContext().getRecordType(Decl: Base).getTypePtr()));
821 llvm::Value *TypeId =
822 llvm::MetadataAsValue::get(Context&: CGF.getLLVMContext(), MD);
823
824 llvm::Value *TypeTest =
825 Builder.CreateCall(Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::type_test),
826 Args: {NonVirtualFn, TypeId});
827 Bit = Builder.CreateOr(LHS: Bit, RHS: TypeTest);
828 }
829
830 CGF.EmitCheck(Checked: std::make_pair(x&: Bit, y: SanitizerKind::CFIMFCall),
831 Check: SanitizerHandler::CFICheckFail, StaticArgs: StaticData,
832 DynamicArgs: {NonVirtualFn, llvm::UndefValue::get(T: CGF.IntPtrTy)});
833
834 FnNonVirtual = Builder.GetInsertBlock();
835 }
836 }
837
838 // We're done.
839 CGF.EmitBlock(BB: FnEnd);
840 llvm::PHINode *CalleePtr = Builder.CreatePHI(Ty: CGF.UnqualPtrTy, NumReservedValues: 2);
841 CalleePtr->addIncoming(V: VirtualFn, BB: FnVirtual);
842 CalleePtr->addIncoming(V: NonVirtualFn, BB: FnNonVirtual);
843
844 CGPointerAuthInfo PointerAuth;
845
846 if (const auto &Schema =
847 CGM.getCodeGenOpts().PointerAuth.CXXMemberFunctionPointers) {
848 llvm::PHINode *DiscriminatorPHI = Builder.CreatePHI(Ty: CGF.IntPtrTy, NumReservedValues: 2);
849 DiscriminatorPHI->addIncoming(V: llvm::ConstantInt::get(Ty: CGF.IntPtrTy, V: 0),
850 BB: FnVirtual);
851 const auto &AuthInfo =
852 CGM.getMemberFunctionPointerAuthInfo(FT: QualType(MPT, 0));
853 assert(Schema.getKey() == AuthInfo.getKey() &&
854 "Keys for virtual and non-virtual member functions must match");
855 auto *NonVirtualDiscriminator = AuthInfo.getDiscriminator();
856 DiscriminatorPHI->addIncoming(V: NonVirtualDiscriminator, BB: FnNonVirtual);
857 PointerAuth = CGPointerAuthInfo(
858 Schema.getKey(), Schema.getAuthenticationMode(), Schema.isIsaPointer(),
859 Schema.authenticatesNullValues(), DiscriminatorPHI);
860 }
861
862 CGCallee Callee(FPT, CalleePtr, PointerAuth);
863 return Callee;
864}
865
866/// Compute an l-value by applying the given pointer-to-member to a
867/// base object.
868llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
869 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
870 const MemberPointerType *MPT) {
871 assert(MemPtr->getType() == CGM.PtrDiffTy);
872
873 CGBuilderTy &Builder = CGF.Builder;
874
875 // Apply the offset, which we assume is non-null.
876 return Builder.CreateInBoundsGEP(Ty: CGF.Int8Ty, Ptr: Base.emitRawPointer(CGF), IdxList: MemPtr,
877 Name: "memptr.offset");
878}
879
880// See if it's possible to return a constant signed pointer.
881static llvm::Constant *pointerAuthResignConstant(
882 llvm::Value *Ptr, const CGPointerAuthInfo &CurAuthInfo,
883 const CGPointerAuthInfo &NewAuthInfo, CodeGenModule &CGM) {
884 const auto *CPA = dyn_cast<llvm::ConstantPtrAuth>(Val: Ptr);
885
886 if (!CPA)
887 return nullptr;
888
889 assert(CPA->getKey()->getZExtValue() == CurAuthInfo.getKey() &&
890 CPA->getAddrDiscriminator()->isZeroValue() &&
891 CPA->getDiscriminator() == CurAuthInfo.getDiscriminator() &&
892 "unexpected key or discriminators");
893
894 return CGM.getConstantSignedPointer(
895 Pointer: CPA->getPointer(), Key: NewAuthInfo.getKey(), StorageAddress: nullptr,
896 OtherDiscriminator: cast<llvm::ConstantInt>(Val: NewAuthInfo.getDiscriminator()));
897}
898
899/// Perform a bitcast, derived-to-base, or base-to-derived member pointer
900/// conversion.
901///
902/// Bitcast conversions are always a no-op under Itanium.
903///
904/// Obligatory offset/adjustment diagram:
905/// <-- offset --> <-- adjustment -->
906/// |--------------------------|----------------------|--------------------|
907/// ^Derived address point ^Base address point ^Member address point
908///
909/// So when converting a base member pointer to a derived member pointer,
910/// we add the offset to the adjustment because the address point has
911/// decreased; and conversely, when converting a derived MP to a base MP
912/// we subtract the offset from the adjustment because the address point
913/// has increased.
914///
915/// The standard forbids (at compile time) conversion to and from
916/// virtual bases, which is why we don't have to consider them here.
917///
918/// The standard forbids (at run time) casting a derived MP to a base
919/// MP when the derived MP does not point to a member of the base.
920/// This is why -1 is a reasonable choice for null data member
921/// pointers.
922llvm::Value *
923ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
924 const CastExpr *E,
925 llvm::Value *src) {
926 // Use constant emission if we can.
927 if (isa<llvm::Constant>(Val: src))
928 return EmitMemberPointerConversion(E, Src: cast<llvm::Constant>(Val: src));
929
930 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
931 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
932 E->getCastKind() == CK_ReinterpretMemberPointer);
933
934 CGBuilderTy &Builder = CGF.Builder;
935 QualType DstType = E->getType();
936
937 if (DstType->isMemberFunctionPointerType()) {
938 if (const auto &NewAuthInfo =
939 CGM.getMemberFunctionPointerAuthInfo(FT: DstType)) {
940 QualType SrcType = E->getSubExpr()->getType();
941 assert(SrcType->isMemberFunctionPointerType());
942 const auto &CurAuthInfo = CGM.getMemberFunctionPointerAuthInfo(FT: SrcType);
943 llvm::Value *MemFnPtr = Builder.CreateExtractValue(Agg: src, Idxs: 0, Name: "memptr.ptr");
944 llvm::Type *OrigTy = MemFnPtr->getType();
945
946 llvm::BasicBlock *StartBB = Builder.GetInsertBlock();
947 llvm::BasicBlock *ResignBB = CGF.createBasicBlock(name: "resign");
948 llvm::BasicBlock *MergeBB = CGF.createBasicBlock(name: "merge");
949
950 // Check whether we have a virtual offset or a pointer to a function.
951 assert(UseARMMethodPtrABI && "ARM ABI expected");
952 llvm::Value *Adj = Builder.CreateExtractValue(Agg: src, Idxs: 1, Name: "memptr.adj");
953 llvm::Constant *Ptrdiff_1 = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: 1);
954 llvm::Value *AndVal = Builder.CreateAnd(LHS: Adj, RHS: Ptrdiff_1);
955 llvm::Value *IsVirtualOffset =
956 Builder.CreateIsNotNull(Arg: AndVal, Name: "is.virtual.offset");
957 Builder.CreateCondBr(Cond: IsVirtualOffset, True: MergeBB, False: ResignBB);
958
959 CGF.EmitBlock(BB: ResignBB);
960 llvm::Type *PtrTy = llvm::PointerType::getUnqual(ElementType: CGM.Int8Ty);
961 MemFnPtr = Builder.CreateIntToPtr(V: MemFnPtr, DestTy: PtrTy);
962 MemFnPtr =
963 CGF.emitPointerAuthResign(Pointer: MemFnPtr, PointerType: SrcType, CurAuthInfo, NewAuthInfo,
964 IsKnownNonNull: isa<llvm::Constant>(Val: src));
965 MemFnPtr = Builder.CreatePtrToInt(V: MemFnPtr, DestTy: OrigTy);
966 llvm::Value *ResignedVal = Builder.CreateInsertValue(Agg: src, Val: MemFnPtr, Idxs: 0);
967 ResignBB = Builder.GetInsertBlock();
968
969 CGF.EmitBlock(BB: MergeBB);
970 llvm::PHINode *NewSrc = Builder.CreatePHI(Ty: src->getType(), NumReservedValues: 2);
971 NewSrc->addIncoming(V: src, BB: StartBB);
972 NewSrc->addIncoming(V: ResignedVal, BB: ResignBB);
973 src = NewSrc;
974 }
975 }
976
977 // Under Itanium, reinterprets don't require any additional processing.
978 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
979
980 llvm::Constant *adj = getMemberPointerAdjustment(E);
981 if (!adj) return src;
982
983 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
984
985 const MemberPointerType *destTy =
986 E->getType()->castAs<MemberPointerType>();
987
988 // For member data pointers, this is just a matter of adding the
989 // offset if the source is non-null.
990 if (destTy->isMemberDataPointer()) {
991 llvm::Value *dst;
992 if (isDerivedToBase)
993 dst = Builder.CreateNSWSub(LHS: src, RHS: adj, Name: "adj");
994 else
995 dst = Builder.CreateNSWAdd(LHS: src, RHS: adj, Name: "adj");
996
997 // Null check.
998 llvm::Value *null = llvm::Constant::getAllOnesValue(Ty: src->getType());
999 llvm::Value *isNull = Builder.CreateICmpEQ(LHS: src, RHS: null, Name: "memptr.isnull");
1000 return Builder.CreateSelect(C: isNull, True: src, False: dst);
1001 }
1002
1003 // The this-adjustment is left-shifted by 1 on ARM.
1004 if (UseARMMethodPtrABI) {
1005 uint64_t offset = cast<llvm::ConstantInt>(Val: adj)->getZExtValue();
1006 offset <<= 1;
1007 adj = llvm::ConstantInt::get(Ty: adj->getType(), V: offset);
1008 }
1009
1010 llvm::Value *srcAdj = Builder.CreateExtractValue(Agg: src, Idxs: 1, Name: "src.adj");
1011 llvm::Value *dstAdj;
1012 if (isDerivedToBase)
1013 dstAdj = Builder.CreateNSWSub(LHS: srcAdj, RHS: adj, Name: "adj");
1014 else
1015 dstAdj = Builder.CreateNSWAdd(LHS: srcAdj, RHS: adj, Name: "adj");
1016
1017 return Builder.CreateInsertValue(Agg: src, Val: dstAdj, Idxs: 1);
1018}
1019
1020static llvm::Constant *
1021pointerAuthResignMemberFunctionPointer(llvm::Constant *Src, QualType DestType,
1022 QualType SrcType, CodeGenModule &CGM) {
1023 assert(DestType->isMemberFunctionPointerType() &&
1024 SrcType->isMemberFunctionPointerType() &&
1025 "member function pointers expected");
1026 if (DestType == SrcType)
1027 return Src;
1028
1029 const auto &NewAuthInfo = CGM.getMemberFunctionPointerAuthInfo(FT: DestType);
1030 const auto &CurAuthInfo = CGM.getMemberFunctionPointerAuthInfo(FT: SrcType);
1031
1032 if (!NewAuthInfo && !CurAuthInfo)
1033 return Src;
1034
1035 llvm::Constant *MemFnPtr = Src->getAggregateElement(Elt: 0u);
1036 if (MemFnPtr->getNumOperands() == 0) {
1037 // src must be a pair of null pointers.
1038 assert(isa<llvm::ConstantInt>(MemFnPtr) && "constant int expected");
1039 return Src;
1040 }
1041
1042 llvm::Constant *ConstPtr = pointerAuthResignConstant(
1043 Ptr: cast<llvm::User>(Val: MemFnPtr)->getOperand(i: 0), CurAuthInfo, NewAuthInfo, CGM);
1044 ConstPtr = llvm::ConstantExpr::getPtrToInt(C: ConstPtr, Ty: MemFnPtr->getType());
1045 return ConstantFoldInsertValueInstruction(Agg: Src, Val: ConstPtr, Idxs: 0);
1046}
1047
1048llvm::Constant *
1049ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
1050 llvm::Constant *src) {
1051 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
1052 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
1053 E->getCastKind() == CK_ReinterpretMemberPointer);
1054
1055 QualType DstType = E->getType();
1056
1057 if (DstType->isMemberFunctionPointerType())
1058 src = pointerAuthResignMemberFunctionPointer(
1059 Src: src, DestType: DstType, SrcType: E->getSubExpr()->getType(), CGM);
1060
1061 // Under Itanium, reinterprets don't require any additional processing.
1062 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
1063
1064 // If the adjustment is trivial, we don't need to do anything.
1065 llvm::Constant *adj = getMemberPointerAdjustment(E);
1066 if (!adj) return src;
1067
1068 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
1069
1070 const MemberPointerType *destTy =
1071 E->getType()->castAs<MemberPointerType>();
1072
1073 // For member data pointers, this is just a matter of adding the
1074 // offset if the source is non-null.
1075 if (destTy->isMemberDataPointer()) {
1076 // null maps to null.
1077 if (src->isAllOnesValue()) return src;
1078
1079 if (isDerivedToBase)
1080 return llvm::ConstantExpr::getNSWSub(C1: src, C2: adj);
1081 else
1082 return llvm::ConstantExpr::getNSWAdd(C1: src, C2: adj);
1083 }
1084
1085 // The this-adjustment is left-shifted by 1 on ARM.
1086 if (UseARMMethodPtrABI) {
1087 uint64_t offset = cast<llvm::ConstantInt>(Val: adj)->getZExtValue();
1088 offset <<= 1;
1089 adj = llvm::ConstantInt::get(Ty: adj->getType(), V: offset);
1090 }
1091
1092 llvm::Constant *srcAdj = src->getAggregateElement(Elt: 1);
1093 llvm::Constant *dstAdj;
1094 if (isDerivedToBase)
1095 dstAdj = llvm::ConstantExpr::getNSWSub(C1: srcAdj, C2: adj);
1096 else
1097 dstAdj = llvm::ConstantExpr::getNSWAdd(C1: srcAdj, C2: adj);
1098
1099 llvm::Constant *res = ConstantFoldInsertValueInstruction(Agg: src, Val: dstAdj, Idxs: 1);
1100 assert(res != nullptr && "Folding must succeed");
1101 return res;
1102}
1103
1104llvm::Constant *
1105ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
1106 // Itanium C++ ABI 2.3:
1107 // A NULL pointer is represented as -1.
1108 if (MPT->isMemberDataPointer())
1109 return llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: -1ULL, /*isSigned=*/IsSigned: true);
1110
1111 llvm::Constant *Zero = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: 0);
1112 llvm::Constant *Values[2] = { Zero, Zero };
1113 return llvm::ConstantStruct::getAnon(V: Values);
1114}
1115
1116llvm::Constant *
1117ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
1118 CharUnits offset) {
1119 // Itanium C++ ABI 2.3:
1120 // A pointer to data member is an offset from the base address of
1121 // the class object containing it, represented as a ptrdiff_t
1122 return llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: offset.getQuantity());
1123}
1124
1125llvm::Constant *
1126ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
1127 return BuildMemberPointer(MD, ThisAdjustment: CharUnits::Zero());
1128}
1129
1130llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
1131 CharUnits ThisAdjustment) {
1132 assert(MD->isInstance() && "Member function must not be static!");
1133
1134 CodeGenTypes &Types = CGM.getTypes();
1135
1136 // Get the function pointer (or index if this is a virtual function).
1137 llvm::Constant *MemPtr[2];
1138 if (MD->isVirtual()) {
1139 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(GD: MD);
1140 uint64_t VTableOffset;
1141 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1142 // Multiply by 4-byte relative offsets.
1143 VTableOffset = Index * 4;
1144 } else {
1145 const ASTContext &Context = getContext();
1146 CharUnits PointerWidth = Context.toCharUnitsFromBits(
1147 BitSize: Context.getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default));
1148 VTableOffset = Index * PointerWidth.getQuantity();
1149 }
1150
1151 if (UseARMMethodPtrABI) {
1152 // ARM C++ ABI 3.2.1:
1153 // This ABI specifies that adj contains twice the this
1154 // adjustment, plus 1 if the member function is virtual. The
1155 // least significant bit of adj then makes exactly the same
1156 // discrimination as the least significant bit of ptr does for
1157 // Itanium.
1158
1159 // We cannot use the Itanium ABI's representation for virtual member
1160 // function pointers under pointer authentication because it would
1161 // require us to store both the virtual offset and the constant
1162 // discriminator in the pointer, which would be immediately vulnerable
1163 // to attack. Instead we introduce a thunk that does the virtual dispatch
1164 // and store it as if it were a non-virtual member function. This means
1165 // that virtual function pointers may not compare equal anymore, but
1166 // fortunately they aren't required to by the standard, and we do make
1167 // a best-effort attempt to re-use the thunk.
1168 //
1169 // To support interoperation with code in which pointer authentication
1170 // is disabled, derefencing a member function pointer must still handle
1171 // the virtual case, but it can use a discriminator which should never
1172 // be valid.
1173 const auto &Schema =
1174 CGM.getCodeGenOpts().PointerAuth.CXXMemberFunctionPointers;
1175 if (Schema)
1176 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(
1177 C: getSignedVirtualMemberFunctionPointer(MD), Ty: CGM.PtrDiffTy);
1178 else
1179 MemPtr[0] = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: VTableOffset);
1180 // Don't set the LSB of adj to 1 if pointer authentication for member
1181 // function pointers is enabled.
1182 MemPtr[1] = llvm::ConstantInt::get(
1183 Ty: CGM.PtrDiffTy, V: 2 * ThisAdjustment.getQuantity() + !Schema);
1184 } else {
1185 // Itanium C++ ABI 2.3:
1186 // For a virtual function, [the pointer field] is 1 plus the
1187 // virtual table offset (in bytes) of the function,
1188 // represented as a ptrdiff_t.
1189 MemPtr[0] = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: VTableOffset + 1);
1190 MemPtr[1] = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy,
1191 V: ThisAdjustment.getQuantity());
1192 }
1193 } else {
1194 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1195 llvm::Type *Ty;
1196 // Check whether the function has a computable LLVM signature.
1197 if (Types.isFuncTypeConvertible(FT: FPT)) {
1198 // The function has a computable LLVM signature; use the correct type.
1199 Ty = Types.GetFunctionType(Info: Types.arrangeCXXMethodDeclaration(MD));
1200 } else {
1201 // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1202 // function type is incomplete.
1203 Ty = CGM.PtrDiffTy;
1204 }
1205 llvm::Constant *addr = CGM.getMemberFunctionPointer(FD: MD, Ty);
1206
1207 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(C: addr, Ty: CGM.PtrDiffTy);
1208 MemPtr[1] = llvm::ConstantInt::get(Ty: CGM.PtrDiffTy,
1209 V: (UseARMMethodPtrABI ? 2 : 1) *
1210 ThisAdjustment.getQuantity());
1211 }
1212
1213 return llvm::ConstantStruct::getAnon(V: MemPtr);
1214}
1215
1216llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1217 QualType MPType) {
1218 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1219 const ValueDecl *MPD = MP.getMemberPointerDecl();
1220 if (!MPD)
1221 return EmitNullMemberPointer(MPT);
1222
1223 CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
1224
1225 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: MPD)) {
1226 llvm::Constant *Src = BuildMemberPointer(MD, ThisAdjustment);
1227 QualType SrcType = getContext().getMemberPointerType(
1228 T: MD->getType(), Cls: MD->getParent()->getTypeForDecl());
1229 return pointerAuthResignMemberFunctionPointer(Src, DestType: MPType, SrcType, CGM);
1230 }
1231
1232 CharUnits FieldOffset =
1233 getContext().toCharUnitsFromBits(BitSize: getContext().getFieldOffset(FD: MPD));
1234 return EmitMemberDataPointer(MPT, offset: ThisAdjustment + FieldOffset);
1235}
1236
1237/// The comparison algorithm is pretty easy: the member pointers are
1238/// the same if they're either bitwise identical *or* both null.
1239///
1240/// ARM is different here only because null-ness is more complicated.
1241llvm::Value *
1242ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1243 llvm::Value *L,
1244 llvm::Value *R,
1245 const MemberPointerType *MPT,
1246 bool Inequality) {
1247 CGBuilderTy &Builder = CGF.Builder;
1248
1249 llvm::ICmpInst::Predicate Eq;
1250 llvm::Instruction::BinaryOps And, Or;
1251 if (Inequality) {
1252 Eq = llvm::ICmpInst::ICMP_NE;
1253 And = llvm::Instruction::Or;
1254 Or = llvm::Instruction::And;
1255 } else {
1256 Eq = llvm::ICmpInst::ICMP_EQ;
1257 And = llvm::Instruction::And;
1258 Or = llvm::Instruction::Or;
1259 }
1260
1261 // Member data pointers are easy because there's a unique null
1262 // value, so it just comes down to bitwise equality.
1263 if (MPT->isMemberDataPointer())
1264 return Builder.CreateICmp(P: Eq, LHS: L, RHS: R);
1265
1266 // For member function pointers, the tautologies are more complex.
1267 // The Itanium tautology is:
1268 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1269 // The ARM tautology is:
1270 // (L == R) <==> (L.ptr == R.ptr &&
1271 // (L.adj == R.adj ||
1272 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1273 // The inequality tautologies have exactly the same structure, except
1274 // applying De Morgan's laws.
1275
1276 llvm::Value *LPtr = Builder.CreateExtractValue(Agg: L, Idxs: 0, Name: "lhs.memptr.ptr");
1277 llvm::Value *RPtr = Builder.CreateExtractValue(Agg: R, Idxs: 0, Name: "rhs.memptr.ptr");
1278
1279 // This condition tests whether L.ptr == R.ptr. This must always be
1280 // true for equality to hold.
1281 llvm::Value *PtrEq = Builder.CreateICmp(P: Eq, LHS: LPtr, RHS: RPtr, Name: "cmp.ptr");
1282
1283 // This condition, together with the assumption that L.ptr == R.ptr,
1284 // tests whether the pointers are both null. ARM imposes an extra
1285 // condition.
1286 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: LPtr->getType());
1287 llvm::Value *EqZero = Builder.CreateICmp(P: Eq, LHS: LPtr, RHS: Zero, Name: "cmp.ptr.null");
1288
1289 // This condition tests whether L.adj == R.adj. If this isn't
1290 // true, the pointers are unequal unless they're both null.
1291 llvm::Value *LAdj = Builder.CreateExtractValue(Agg: L, Idxs: 1, Name: "lhs.memptr.adj");
1292 llvm::Value *RAdj = Builder.CreateExtractValue(Agg: R, Idxs: 1, Name: "rhs.memptr.adj");
1293 llvm::Value *AdjEq = Builder.CreateICmp(P: Eq, LHS: LAdj, RHS: RAdj, Name: "cmp.adj");
1294
1295 // Null member function pointers on ARM clear the low bit of Adj,
1296 // so the zero condition has to check that neither low bit is set.
1297 if (UseARMMethodPtrABI) {
1298 llvm::Value *One = llvm::ConstantInt::get(Ty: LPtr->getType(), V: 1);
1299
1300 // Compute (l.adj | r.adj) & 1 and test it against zero.
1301 llvm::Value *OrAdj = Builder.CreateOr(LHS: LAdj, RHS: RAdj, Name: "or.adj");
1302 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(LHS: OrAdj, RHS: One);
1303 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(P: Eq, LHS: OrAdjAnd1, RHS: Zero,
1304 Name: "cmp.or.adj");
1305 EqZero = Builder.CreateBinOp(Opc: And, LHS: EqZero, RHS: OrAdjAnd1EqZero);
1306 }
1307
1308 // Tie together all our conditions.
1309 llvm::Value *Result = Builder.CreateBinOp(Opc: Or, LHS: EqZero, RHS: AdjEq);
1310 Result = Builder.CreateBinOp(Opc: And, LHS: PtrEq, RHS: Result,
1311 Name: Inequality ? "memptr.ne" : "memptr.eq");
1312 return Result;
1313}
1314
1315llvm::Value *
1316ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1317 llvm::Value *MemPtr,
1318 const MemberPointerType *MPT) {
1319 CGBuilderTy &Builder = CGF.Builder;
1320
1321 /// For member data pointers, this is just a check against -1.
1322 if (MPT->isMemberDataPointer()) {
1323 assert(MemPtr->getType() == CGM.PtrDiffTy);
1324 llvm::Value *NegativeOne =
1325 llvm::Constant::getAllOnesValue(Ty: MemPtr->getType());
1326 return Builder.CreateICmpNE(LHS: MemPtr, RHS: NegativeOne, Name: "memptr.tobool");
1327 }
1328
1329 // In Itanium, a member function pointer is not null if 'ptr' is not null.
1330 llvm::Value *Ptr = Builder.CreateExtractValue(Agg: MemPtr, Idxs: 0, Name: "memptr.ptr");
1331
1332 llvm::Constant *Zero = llvm::ConstantInt::get(Ty: Ptr->getType(), V: 0);
1333 llvm::Value *Result = Builder.CreateICmpNE(LHS: Ptr, RHS: Zero, Name: "memptr.tobool");
1334
1335 // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1336 // (the virtual bit) is set.
1337 if (UseARMMethodPtrABI) {
1338 llvm::Constant *One = llvm::ConstantInt::get(Ty: Ptr->getType(), V: 1);
1339 llvm::Value *Adj = Builder.CreateExtractValue(Agg: MemPtr, Idxs: 1, Name: "memptr.adj");
1340 llvm::Value *VirtualBit = Builder.CreateAnd(LHS: Adj, RHS: One, Name: "memptr.virtualbit");
1341 llvm::Value *IsVirtual = Builder.CreateICmpNE(LHS: VirtualBit, RHS: Zero,
1342 Name: "memptr.isvirtual");
1343 Result = Builder.CreateOr(LHS: Result, RHS: IsVirtual);
1344 }
1345
1346 return Result;
1347}
1348
1349bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1350 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1351 if (!RD)
1352 return false;
1353
1354 // If C++ prohibits us from making a copy, return by address.
1355 if (!RD->canPassInRegisters()) {
1356 auto Align = CGM.getContext().getTypeAlignInChars(T: FI.getReturnType());
1357 FI.getReturnInfo() = ABIArgInfo::getIndirect(Alignment: Align, /*ByVal=*/false);
1358 return true;
1359 }
1360 return false;
1361}
1362
1363/// The Itanium ABI requires non-zero initialization only for data
1364/// member pointers, for which '0' is a valid offset.
1365bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1366 return MPT->isMemberFunctionPointer();
1367}
1368
1369/// The Itanium ABI always places an offset to the complete object
1370/// at entry -2 in the vtable.
1371void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1372 const CXXDeleteExpr *DE,
1373 Address Ptr,
1374 QualType ElementType,
1375 const CXXDestructorDecl *Dtor) {
1376 bool UseGlobalDelete = DE->isGlobalDelete();
1377 if (UseGlobalDelete) {
1378 // Derive the complete-object pointer, which is what we need
1379 // to pass to the deallocation function.
1380
1381 // Grab the vtable pointer as an intptr_t*.
1382 auto *ClassDecl =
1383 cast<CXXRecordDecl>(Val: ElementType->castAs<RecordType>()->getDecl());
1384 llvm::Value *VTable = CGF.GetVTablePtr(This: Ptr, VTableTy: CGF.UnqualPtrTy, VTableClass: ClassDecl);
1385
1386 // Track back to entry -2 and pull out the offset there.
1387 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1388 Ty: CGF.IntPtrTy, Ptr: VTable, Idx0: -2, Name: "complete-offset.ptr");
1389 llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(Ty: CGF.IntPtrTy, Addr: OffsetPtr,
1390 Align: CGF.getPointerAlign());
1391
1392 // Apply the offset.
1393 llvm::Value *CompletePtr = Ptr.emitRawPointer(CGF);
1394 CompletePtr =
1395 CGF.Builder.CreateInBoundsGEP(Ty: CGF.Int8Ty, Ptr: CompletePtr, IdxList: Offset);
1396
1397 // If we're supposed to call the global delete, make sure we do so
1398 // even if the destructor throws.
1399 CGF.pushCallObjectDeleteCleanup(OperatorDelete: DE->getOperatorDelete(), CompletePtr,
1400 ElementType);
1401 }
1402
1403 // FIXME: Provide a source location here even though there's no
1404 // CXXMemberCallExpr for dtor call.
1405 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1406 EmitVirtualDestructorCall(CGF, Dtor, DtorType, This: Ptr, E: DE);
1407
1408 if (UseGlobalDelete)
1409 CGF.PopCleanupBlock();
1410}
1411
1412void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1413 // void __cxa_rethrow();
1414
1415 llvm::FunctionType *FTy =
1416 llvm::FunctionType::get(Result: CGM.VoidTy, /*isVarArg=*/false);
1417
1418 llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_rethrow");
1419
1420 if (isNoReturn)
1421 CGF.EmitNoreturnRuntimeCallOrInvoke(callee: Fn, args: std::nullopt);
1422 else
1423 CGF.EmitRuntimeCallOrInvoke(callee: Fn);
1424}
1425
1426static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1427 // void *__cxa_allocate_exception(size_t thrown_size);
1428
1429 llvm::FunctionType *FTy =
1430 llvm::FunctionType::get(Result: CGM.Int8PtrTy, Params: CGM.SizeTy, /*isVarArg=*/false);
1431
1432 return CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_allocate_exception");
1433}
1434
1435static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1436 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1437 // void (*dest) (void *));
1438
1439 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.GlobalsInt8PtrTy, CGM.Int8PtrTy };
1440 llvm::FunctionType *FTy =
1441 llvm::FunctionType::get(Result: CGM.VoidTy, Params: Args, /*isVarArg=*/false);
1442
1443 return CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_throw");
1444}
1445
1446void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1447 QualType ThrowType = E->getSubExpr()->getType();
1448 // Now allocate the exception object.
1449 llvm::Type *SizeTy = CGF.ConvertType(T: getContext().getSizeType());
1450 uint64_t TypeSize = getContext().getTypeSizeInChars(T: ThrowType).getQuantity();
1451
1452 llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1453 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1454 callee: AllocExceptionFn, args: llvm::ConstantInt::get(Ty: SizeTy, V: TypeSize), name: "exception");
1455
1456 CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1457 CGF.EmitAnyExprToExn(
1458 E: E->getSubExpr(), Addr: Address(ExceptionPtr, CGM.Int8Ty, ExnAlign));
1459
1460 // Now throw the exception.
1461 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(Ty: ThrowType,
1462 /*ForEH=*/true);
1463
1464 // The address of the destructor. If the exception type has a
1465 // trivial destructor (or isn't a record), we just pass null.
1466 llvm::Constant *Dtor = nullptr;
1467 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1468 CXXRecordDecl *Record = cast<CXXRecordDecl>(Val: RecordTy->getDecl());
1469 if (!Record->hasTrivialDestructor()) {
1470 // __cxa_throw is declared to take its destructor as void (*)(void *). We
1471 // must match that if function pointers can be authenticated with a
1472 // discriminator based on their type.
1473 const ASTContext &Ctx = getContext();
1474 QualType DtorTy = Ctx.getFunctionType(ResultTy: Ctx.VoidTy, Args: {Ctx.VoidPtrTy},
1475 EPI: FunctionProtoType::ExtProtoInfo());
1476
1477 CXXDestructorDecl *DtorD = Record->getDestructor();
1478 Dtor = CGM.getAddrOfCXXStructor(GD: GlobalDecl(DtorD, Dtor_Complete));
1479 Dtor = CGM.getFunctionPointer(Pointer: Dtor, FunctionType: DtorTy);
1480 }
1481 }
1482 if (!Dtor) Dtor = llvm::Constant::getNullValue(Ty: CGM.Int8PtrTy);
1483
1484 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1485 CGF.EmitNoreturnRuntimeCallOrInvoke(callee: getThrowFn(CGM), args);
1486}
1487
1488static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1489 // void *__dynamic_cast(const void *sub,
1490 // global_as const abi::__class_type_info *src,
1491 // global_as const abi::__class_type_info *dst,
1492 // std::ptrdiff_t src2dst_offset);
1493
1494 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1495 llvm::Type *GlobInt8PtrTy = CGF.GlobalsInt8PtrTy;
1496 llvm::Type *PtrDiffTy =
1497 CGF.ConvertType(T: CGF.getContext().getPointerDiffType());
1498
1499 llvm::Type *Args[4] = { Int8PtrTy, GlobInt8PtrTy, GlobInt8PtrTy, PtrDiffTy };
1500
1501 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: Int8PtrTy, Params: Args, isVarArg: false);
1502
1503 // Mark the function as nounwind willreturn readonly.
1504 llvm::AttrBuilder FuncAttrs(CGF.getLLVMContext());
1505 FuncAttrs.addAttribute(Val: llvm::Attribute::NoUnwind);
1506 FuncAttrs.addAttribute(Val: llvm::Attribute::WillReturn);
1507 FuncAttrs.addMemoryAttr(ME: llvm::MemoryEffects::readOnly());
1508 llvm::AttributeList Attrs = llvm::AttributeList::get(
1509 C&: CGF.getLLVMContext(), Index: llvm::AttributeList::FunctionIndex, B: FuncAttrs);
1510
1511 return CGF.CGM.CreateRuntimeFunction(Ty: FTy, Name: "__dynamic_cast", ExtraAttrs: Attrs);
1512}
1513
1514static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1515 // void __cxa_bad_cast();
1516 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: CGF.VoidTy, isVarArg: false);
1517 return CGF.CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_bad_cast");
1518}
1519
1520/// Compute the src2dst_offset hint as described in the
1521/// Itanium C++ ABI [2.9.7]
1522static CharUnits computeOffsetHint(ASTContext &Context,
1523 const CXXRecordDecl *Src,
1524 const CXXRecordDecl *Dst) {
1525 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1526 /*DetectVirtual=*/false);
1527
1528 // If Dst is not derived from Src we can skip the whole computation below and
1529 // return that Src is not a public base of Dst. Record all inheritance paths.
1530 if (!Dst->isDerivedFrom(Base: Src, Paths))
1531 return CharUnits::fromQuantity(Quantity: -2ULL);
1532
1533 unsigned NumPublicPaths = 0;
1534 CharUnits Offset;
1535
1536 // Now walk all possible inheritance paths.
1537 for (const CXXBasePath &Path : Paths) {
1538 if (Path.Access != AS_public) // Ignore non-public inheritance.
1539 continue;
1540
1541 ++NumPublicPaths;
1542
1543 for (const CXXBasePathElement &PathElement : Path) {
1544 // If the path contains a virtual base class we can't give any hint.
1545 // -1: no hint.
1546 if (PathElement.Base->isVirtual())
1547 return CharUnits::fromQuantity(Quantity: -1ULL);
1548
1549 if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1550 continue;
1551
1552 // Accumulate the base class offsets.
1553 const ASTRecordLayout &L = Context.getASTRecordLayout(D: PathElement.Class);
1554 Offset += L.getBaseClassOffset(
1555 Base: PathElement.Base->getType()->getAsCXXRecordDecl());
1556 }
1557 }
1558
1559 // -2: Src is not a public base of Dst.
1560 if (NumPublicPaths == 0)
1561 return CharUnits::fromQuantity(Quantity: -2ULL);
1562
1563 // -3: Src is a multiple public base type but never a virtual base type.
1564 if (NumPublicPaths > 1)
1565 return CharUnits::fromQuantity(Quantity: -3ULL);
1566
1567 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1568 // Return the offset of Src from the origin of Dst.
1569 return Offset;
1570}
1571
1572static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1573 // void __cxa_bad_typeid();
1574 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: CGF.VoidTy, isVarArg: false);
1575
1576 return CGF.CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_bad_typeid");
1577}
1578
1579bool ItaniumCXXABI::shouldTypeidBeNullChecked(QualType SrcRecordTy) {
1580 return true;
1581}
1582
1583void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1584 llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1585 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(callee: Fn);
1586 Call->setDoesNotReturn();
1587 CGF.Builder.CreateUnreachable();
1588}
1589
1590llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1591 QualType SrcRecordTy,
1592 Address ThisPtr,
1593 llvm::Type *StdTypeInfoPtrTy) {
1594 auto *ClassDecl =
1595 cast<CXXRecordDecl>(Val: SrcRecordTy->castAs<RecordType>()->getDecl());
1596 llvm::Value *Value = CGF.GetVTablePtr(This: ThisPtr, VTableTy: CGM.GlobalsInt8PtrTy,
1597 VTableClass: ClassDecl);
1598
1599 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1600 // Load the type info.
1601 Value = CGF.Builder.CreateCall(
1602 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::load_relative, Tys: {CGM.Int32Ty}),
1603 Args: {Value, llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: -4)});
1604 } else {
1605 // Load the type info.
1606 Value =
1607 CGF.Builder.CreateConstInBoundsGEP1_64(Ty: StdTypeInfoPtrTy, Ptr: Value, Idx0: -1ULL);
1608 }
1609 return CGF.Builder.CreateAlignedLoad(Ty: StdTypeInfoPtrTy, Addr: Value,
1610 Align: CGF.getPointerAlign());
1611}
1612
1613bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1614 QualType SrcRecordTy) {
1615 return SrcIsPtr;
1616}
1617
1618llvm::Value *ItaniumCXXABI::emitDynamicCastCall(
1619 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1620 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1621 llvm::Type *PtrDiffLTy =
1622 CGF.ConvertType(T: CGF.getContext().getPointerDiffType());
1623
1624 llvm::Value *SrcRTTI =
1625 CGF.CGM.GetAddrOfRTTIDescriptor(Ty: SrcRecordTy.getUnqualifiedType());
1626 llvm::Value *DestRTTI =
1627 CGF.CGM.GetAddrOfRTTIDescriptor(Ty: DestRecordTy.getUnqualifiedType());
1628
1629 // Compute the offset hint.
1630 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1631 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1632 llvm::Value *OffsetHint = llvm::ConstantInt::get(
1633 Ty: PtrDiffLTy,
1634 V: computeOffsetHint(Context&: CGF.getContext(), Src: SrcDecl, Dst: DestDecl).getQuantity());
1635
1636 // Emit the call to __dynamic_cast.
1637 llvm::Value *Value = ThisAddr.emitRawPointer(CGF);
1638 if (CGM.getCodeGenOpts().PointerAuth.CXXVTablePointers) {
1639 // We perform a no-op load of the vtable pointer here to force an
1640 // authentication. In environments that do not support pointer
1641 // authentication this is a an actual no-op that will be elided. When
1642 // pointer authentication is supported and enforced on vtable pointers this
1643 // load can trap.
1644 llvm::Value *Vtable =
1645 CGF.GetVTablePtr(This: ThisAddr, VTableTy: CGM.Int8PtrTy, VTableClass: SrcDecl,
1646 AuthMode: CodeGenFunction::VTableAuthMode::MustTrap);
1647 assert(Vtable);
1648 (void)Vtable;
1649 }
1650
1651 llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1652 Value = CGF.EmitNounwindRuntimeCall(callee: getItaniumDynamicCastFn(CGF), args);
1653
1654 /// C++ [expr.dynamic.cast]p9:
1655 /// A failed cast to reference type throws std::bad_cast
1656 if (DestTy->isReferenceType()) {
1657 llvm::BasicBlock *BadCastBlock =
1658 CGF.createBasicBlock(name: "dynamic_cast.bad_cast");
1659
1660 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Arg: Value);
1661 CGF.Builder.CreateCondBr(Cond: IsNull, True: BadCastBlock, False: CastEnd);
1662
1663 CGF.EmitBlock(BB: BadCastBlock);
1664 EmitBadCastCall(CGF);
1665 }
1666
1667 return Value;
1668}
1669
1670llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
1671 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1672 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastSuccess,
1673 llvm::BasicBlock *CastFail) {
1674 ASTContext &Context = getContext();
1675
1676 // Find all the inheritance paths.
1677 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1678 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1679 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1680 /*DetectVirtual=*/false);
1681 (void)DestDecl->isDerivedFrom(Base: SrcDecl, Paths);
1682
1683 // Find an offset within `DestDecl` where a `SrcDecl` instance and its vptr
1684 // might appear.
1685 std::optional<CharUnits> Offset;
1686 for (const CXXBasePath &Path : Paths) {
1687 // dynamic_cast only finds public inheritance paths.
1688 if (Path.Access != AS_public)
1689 continue;
1690
1691 CharUnits PathOffset;
1692 for (const CXXBasePathElement &PathElement : Path) {
1693 // Find the offset along this inheritance step.
1694 const CXXRecordDecl *Base =
1695 PathElement.Base->getType()->getAsCXXRecordDecl();
1696 if (PathElement.Base->isVirtual()) {
1697 // For a virtual base class, we know that the derived class is exactly
1698 // DestDecl, so we can use the vbase offset from its layout.
1699 const ASTRecordLayout &L = Context.getASTRecordLayout(D: DestDecl);
1700 PathOffset = L.getVBaseClassOffset(VBase: Base);
1701 } else {
1702 const ASTRecordLayout &L =
1703 Context.getASTRecordLayout(D: PathElement.Class);
1704 PathOffset += L.getBaseClassOffset(Base);
1705 }
1706 }
1707
1708 if (!Offset)
1709 Offset = PathOffset;
1710 else if (Offset != PathOffset) {
1711 // Base appears in at least two different places. Find the most-derived
1712 // object and see if it's a DestDecl. Note that the most-derived object
1713 // must be at least as aligned as this base class subobject, and must
1714 // have a vptr at offset 0.
1715 ThisAddr = Address(emitDynamicCastToVoid(CGF, Value: ThisAddr, SrcRecordTy),
1716 CGF.VoidPtrTy, ThisAddr.getAlignment());
1717 SrcDecl = DestDecl;
1718 Offset = CharUnits::Zero();
1719 break;
1720 }
1721 }
1722
1723 if (!Offset) {
1724 // If there are no public inheritance paths, the cast always fails.
1725 CGF.EmitBranch(Block: CastFail);
1726 return llvm::PoisonValue::get(T: CGF.VoidPtrTy);
1727 }
1728
1729 // Compare the vptr against the expected vptr for the destination type at
1730 // this offset. Note that we do not know what type ThisAddr points to in
1731 // the case where the derived class multiply inherits from the base class
1732 // so we can't use GetVTablePtr, so we load the vptr directly instead.
1733 llvm::Instruction *VPtr = CGF.Builder.CreateLoad(
1734 Addr: ThisAddr.withElementType(ElemTy: CGF.VoidPtrPtrTy), Name: "vtable");
1735 CGM.DecorateInstructionWithTBAA(
1736 Inst: VPtr, TBAAInfo: CGM.getTBAAVTablePtrAccessInfo(VTablePtrType: CGF.VoidPtrPtrTy));
1737 llvm::Value *Success = CGF.Builder.CreateICmpEQ(
1738 LHS: VPtr, RHS: getVTableAddressPoint(Base: BaseSubobject(SrcDecl, *Offset), VTableClass: DestDecl));
1739 llvm::Value *Result = ThisAddr.emitRawPointer(CGF);
1740 if (!Offset->isZero())
1741 Result = CGF.Builder.CreateInBoundsGEP(
1742 Ty: CGF.CharTy, Ptr: Result,
1743 IdxList: {llvm::ConstantInt::get(Ty: CGF.PtrDiffTy, V: -Offset->getQuantity())});
1744 CGF.Builder.CreateCondBr(Cond: Success, True: CastSuccess, False: CastFail);
1745 return Result;
1746}
1747
1748llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
1749 Address ThisAddr,
1750 QualType SrcRecordTy) {
1751 auto *ClassDecl =
1752 cast<CXXRecordDecl>(Val: SrcRecordTy->castAs<RecordType>()->getDecl());
1753 llvm::Value *OffsetToTop;
1754 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1755 // Get the vtable pointer.
1756 llvm::Value *VTable =
1757 CGF.GetVTablePtr(This: ThisAddr, VTableTy: CGF.UnqualPtrTy, VTableClass: ClassDecl);
1758
1759 // Get the offset-to-top from the vtable.
1760 OffsetToTop =
1761 CGF.Builder.CreateConstInBoundsGEP1_32(Ty: CGM.Int32Ty, Ptr: VTable, Idx0: -2U);
1762 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1763 Ty: CGM.Int32Ty, Addr: OffsetToTop, Align: CharUnits::fromQuantity(Quantity: 4), Name: "offset.to.top");
1764 } else {
1765 llvm::Type *PtrDiffLTy =
1766 CGF.ConvertType(T: CGF.getContext().getPointerDiffType());
1767
1768 // Get the vtable pointer.
1769 llvm::Value *VTable =
1770 CGF.GetVTablePtr(This: ThisAddr, VTableTy: CGF.UnqualPtrTy, VTableClass: ClassDecl);
1771
1772 // Get the offset-to-top from the vtable.
1773 OffsetToTop =
1774 CGF.Builder.CreateConstInBoundsGEP1_64(Ty: PtrDiffLTy, Ptr: VTable, Idx0: -2ULL);
1775 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1776 Ty: PtrDiffLTy, Addr: OffsetToTop, Align: CGF.getPointerAlign(), Name: "offset.to.top");
1777 }
1778 // Finally, add the offset to the pointer.
1779 return CGF.Builder.CreateInBoundsGEP(Ty: CGF.Int8Ty, Ptr: ThisAddr.emitRawPointer(CGF),
1780 IdxList: OffsetToTop);
1781}
1782
1783bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1784 llvm::FunctionCallee Fn = getBadCastFn(CGF);
1785 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(callee: Fn);
1786 Call->setDoesNotReturn();
1787 CGF.Builder.CreateUnreachable();
1788 return true;
1789}
1790
1791llvm::Value *
1792ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1793 Address This,
1794 const CXXRecordDecl *ClassDecl,
1795 const CXXRecordDecl *BaseClassDecl) {
1796 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, VTableTy: CGM.Int8PtrTy, VTableClass: ClassDecl);
1797 CharUnits VBaseOffsetOffset =
1798 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD: ClassDecl,
1799 VBase: BaseClassDecl);
1800 llvm::Value *VBaseOffsetPtr =
1801 CGF.Builder.CreateConstGEP1_64(
1802 Ty: CGF.Int8Ty, Ptr: VTablePtr, Idx0: VBaseOffsetOffset.getQuantity(),
1803 Name: "vbase.offset.ptr");
1804
1805 llvm::Value *VBaseOffset;
1806 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1807 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1808 Ty: CGF.Int32Ty, Addr: VBaseOffsetPtr, Align: CharUnits::fromQuantity(Quantity: 4),
1809 Name: "vbase.offset");
1810 } else {
1811 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1812 Ty: CGM.PtrDiffTy, Addr: VBaseOffsetPtr, Align: CGF.getPointerAlign(), Name: "vbase.offset");
1813 }
1814 return VBaseOffset;
1815}
1816
1817void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1818 // Just make sure we're in sync with TargetCXXABI.
1819 assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1820
1821 // The constructor used for constructing this as a base class;
1822 // ignores virtual bases.
1823 CGM.EmitGlobal(D: GlobalDecl(D, Ctor_Base));
1824
1825 // The constructor used for constructing this as a complete class;
1826 // constructs the virtual bases, then calls the base constructor.
1827 if (!D->getParent()->isAbstract()) {
1828 // We don't need to emit the complete ctor if the class is abstract.
1829 CGM.EmitGlobal(D: GlobalDecl(D, Ctor_Complete));
1830 }
1831}
1832
1833CGCXXABI::AddedStructorArgCounts
1834ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1835 SmallVectorImpl<CanQualType> &ArgTys) {
1836 ASTContext &Context = getContext();
1837
1838 // All parameters are already in place except VTT, which goes after 'this'.
1839 // These are Clang types, so we don't need to worry about sret yet.
1840
1841 // Check if we need to add a VTT parameter (which has type global void **).
1842 if ((isa<CXXConstructorDecl>(Val: GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1843 : GD.getDtorType() == Dtor_Base) &&
1844 cast<CXXMethodDecl>(Val: GD.getDecl())->getParent()->getNumVBases() != 0) {
1845 LangAS AS = CGM.GetGlobalVarAddressSpace(D: nullptr);
1846 QualType Q = Context.getAddrSpaceQualType(T: Context.VoidPtrTy, AddressSpace: AS);
1847 ArgTys.insert(I: ArgTys.begin() + 1,
1848 Elt: Context.getPointerType(T: CanQualType::CreateUnsafe(Other: Q)));
1849 return AddedStructorArgCounts::prefix(N: 1);
1850 }
1851 return AddedStructorArgCounts{};
1852}
1853
1854void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1855 // The destructor used for destructing this as a base class; ignores
1856 // virtual bases.
1857 CGM.EmitGlobal(D: GlobalDecl(D, Dtor_Base));
1858
1859 // The destructor used for destructing this as a most-derived class;
1860 // call the base destructor and then destructs any virtual bases.
1861 CGM.EmitGlobal(D: GlobalDecl(D, Dtor_Complete));
1862
1863 // The destructor in a virtual table is always a 'deleting'
1864 // destructor, which calls the complete destructor and then uses the
1865 // appropriate operator delete.
1866 if (D->isVirtual())
1867 CGM.EmitGlobal(D: GlobalDecl(D, Dtor_Deleting));
1868}
1869
1870void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1871 QualType &ResTy,
1872 FunctionArgList &Params) {
1873 const CXXMethodDecl *MD = cast<CXXMethodDecl>(Val: CGF.CurGD.getDecl());
1874 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1875
1876 // Check if we need a VTT parameter as well.
1877 if (NeedsVTTParameter(GD: CGF.CurGD)) {
1878 ASTContext &Context = getContext();
1879
1880 // FIXME: avoid the fake decl
1881 LangAS AS = CGM.GetGlobalVarAddressSpace(D: nullptr);
1882 QualType Q = Context.getAddrSpaceQualType(T: Context.VoidPtrTy, AddressSpace: AS);
1883 QualType T = Context.getPointerType(T: Q);
1884 auto *VTTDecl = ImplicitParamDecl::Create(
1885 C&: Context, /*DC=*/nullptr, IdLoc: MD->getLocation(), Id: &Context.Idents.get(Name: "vtt"),
1886 T, ParamKind: ImplicitParamKind::CXXVTT);
1887 Params.insert(I: Params.begin() + 1, Elt: VTTDecl);
1888 getStructorImplicitParamDecl(CGF) = VTTDecl;
1889 }
1890}
1891
1892void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1893 // Naked functions have no prolog.
1894 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1895 return;
1896
1897 /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1898 /// adjustments are required, because they are all handled by thunks.
1899 setCXXABIThisValue(CGF, ThisPtr: loadIncomingCXXThis(CGF));
1900
1901 /// Initialize the 'vtt' slot if needed.
1902 if (getStructorImplicitParamDecl(CGF)) {
1903 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1904 Addr: CGF.GetAddrOfLocalVar(VD: getStructorImplicitParamDecl(CGF)), Name: "vtt");
1905 }
1906
1907 /// If this is a function that the ABI specifies returns 'this', initialize
1908 /// the return slot to 'this' at the start of the function.
1909 ///
1910 /// Unlike the setting of return types, this is done within the ABI
1911 /// implementation instead of by clients of CGCXXABI because:
1912 /// 1) getThisValue is currently protected
1913 /// 2) in theory, an ABI could implement 'this' returns some other way;
1914 /// HasThisReturn only specifies a contract, not the implementation
1915 if (HasThisReturn(GD: CGF.CurGD))
1916 CGF.Builder.CreateStore(Val: getThisValue(CGF), Addr: CGF.ReturnValue);
1917}
1918
1919CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
1920 CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1921 bool ForVirtualBase, bool Delegating) {
1922 if (!NeedsVTTParameter(GD: GlobalDecl(D, Type)))
1923 return AddedStructorArgs{};
1924
1925 // Insert the implicit 'vtt' argument as the second argument. Make sure to
1926 // correctly reflect its address space, which can differ from generic on
1927 // some targets.
1928 llvm::Value *VTT =
1929 CGF.GetVTTParameter(GD: GlobalDecl(D, Type), ForVirtualBase, Delegating);
1930 LangAS AS = CGM.GetGlobalVarAddressSpace(D: nullptr);
1931 QualType Q = getContext().getAddrSpaceQualType(T: getContext().VoidPtrTy, AddressSpace: AS);
1932 QualType VTTTy = getContext().getPointerType(T: Q);
1933 return AddedStructorArgs::prefix(Args: {{.Value: VTT, .Type: VTTTy}});
1934}
1935
1936llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
1937 CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
1938 bool ForVirtualBase, bool Delegating) {
1939 GlobalDecl GD(DD, Type);
1940 return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1941}
1942
1943void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1944 const CXXDestructorDecl *DD,
1945 CXXDtorType Type, bool ForVirtualBase,
1946 bool Delegating, Address This,
1947 QualType ThisTy) {
1948 GlobalDecl GD(DD, Type);
1949 llvm::Value *VTT =
1950 getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
1951 QualType VTTTy = getContext().getPointerType(T: getContext().VoidPtrTy);
1952
1953 CGCallee Callee;
1954 if (getContext().getLangOpts().AppleKext &&
1955 Type != Dtor_Base && DD->isVirtual())
1956 Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, RD: DD->getParent());
1957 else
1958 Callee = CGCallee::forDirect(functionPtr: CGM.getAddrOfCXXStructor(GD), abstractInfo: GD);
1959
1960 CGF.EmitCXXDestructorCall(Dtor: GD, Callee, This: CGF.getAsNaturalPointerTo(Addr: This, PointeeType: ThisTy),
1961 ThisTy, ImplicitParam: VTT, ImplicitParamTy: VTTTy, E: nullptr);
1962}
1963
1964// Check if any non-inline method has the specified attribute.
1965template <typename T>
1966static bool CXXRecordNonInlineHasAttr(const CXXRecordDecl *RD) {
1967 for (const auto *D : RD->noload_decls()) {
1968 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
1969 if (FD->isInlined() || FD->doesThisDeclarationHaveABody() ||
1970 FD->isPureVirtual())
1971 continue;
1972 if (D->hasAttr<T>())
1973 return true;
1974 }
1975 }
1976
1977 return false;
1978}
1979
1980static void setVTableSelectiveDLLImportExport(CodeGenModule &CGM,
1981 llvm::GlobalVariable *VTable,
1982 const CXXRecordDecl *RD) {
1983 if (VTable->getDLLStorageClass() !=
1984 llvm::GlobalVariable::DefaultStorageClass ||
1985 RD->hasAttr<DLLImportAttr>() || RD->hasAttr<DLLExportAttr>())
1986 return;
1987
1988 if (CGM.getVTables().isVTableExternal(RD)) {
1989 if (CXXRecordNonInlineHasAttr<DLLImportAttr>(RD))
1990 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
1991 } else if (CXXRecordNonInlineHasAttr<DLLExportAttr>(RD))
1992 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
1993}
1994
1995void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1996 const CXXRecordDecl *RD) {
1997 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, VPtrOffset: CharUnits());
1998 if (VTable->hasInitializer())
1999 return;
2000
2001 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
2002 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
2003 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
2004 llvm::Constant *RTTI =
2005 CGM.GetAddrOfRTTIDescriptor(Ty: CGM.getContext().getTagDeclType(Decl: RD));
2006
2007 // Create and set the initializer.
2008 ConstantInitBuilder builder(CGM);
2009 auto components = builder.beginStruct();
2010 CGVT.createVTableInitializer(builder&: components, layout: VTLayout, rtti: RTTI,
2011 vtableHasLocalLinkage: llvm::GlobalValue::isLocalLinkage(Linkage));
2012 components.finishAndSetAsInitializer(global: VTable);
2013
2014 // Set the correct linkage.
2015 VTable->setLinkage(Linkage);
2016
2017 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
2018 VTable->setComdat(CGM.getModule().getOrInsertComdat(Name: VTable->getName()));
2019
2020 if (CGM.getTarget().hasPS4DLLImportExport())
2021 setVTableSelectiveDLLImportExport(CGM, VTable, RD);
2022
2023 // Set the right visibility.
2024 CGM.setGVProperties(GV: VTable, D: RD);
2025
2026 // If this is the magic class __cxxabiv1::__fundamental_type_info,
2027 // we will emit the typeinfo for the fundamental types. This is the
2028 // same behaviour as GCC.
2029 const DeclContext *DC = RD->getDeclContext();
2030 if (RD->getIdentifier() &&
2031 RD->getIdentifier()->isStr(Str: "__fundamental_type_info") &&
2032 isa<NamespaceDecl>(Val: DC) && cast<NamespaceDecl>(Val: DC)->getIdentifier() &&
2033 cast<NamespaceDecl>(Val: DC)->getIdentifier()->isStr(Str: "__cxxabiv1") &&
2034 DC->getParent()->isTranslationUnit())
2035 EmitFundamentalRTTIDescriptors(RD);
2036
2037 // Always emit type metadata on non-available_externally definitions, and on
2038 // available_externally definitions if we are performing whole program
2039 // devirtualization. For WPD we need the type metadata on all vtable
2040 // definitions to ensure we associate derived classes with base classes
2041 // defined in headers but with a strong definition only in a shared library.
2042 if (!VTable->isDeclarationForLinker() ||
2043 CGM.getCodeGenOpts().WholeProgramVTables) {
2044 CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
2045 // For available_externally definitions, add the vtable to
2046 // @llvm.compiler.used so that it isn't deleted before whole program
2047 // analysis.
2048 if (VTable->isDeclarationForLinker()) {
2049 assert(CGM.getCodeGenOpts().WholeProgramVTables);
2050 CGM.addCompilerUsedGlobal(GV: VTable);
2051 }
2052 }
2053
2054 if (VTContext.isRelativeLayout()) {
2055 CGVT.RemoveHwasanMetadata(GV: VTable);
2056 if (!VTable->isDSOLocal())
2057 CGVT.GenerateRelativeVTableAlias(VTable, AliasNameRef: VTable->getName());
2058 }
2059}
2060
2061bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
2062 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
2063 if (Vptr.NearestVBase == nullptr)
2064 return false;
2065 return NeedsVTTParameter(GD: CGF.CurGD);
2066}
2067
2068llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
2069 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
2070 const CXXRecordDecl *NearestVBase) {
2071
2072 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
2073 NeedsVTTParameter(GD: CGF.CurGD)) {
2074 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
2075 NearestVBase);
2076 }
2077 return getVTableAddressPoint(Base, VTableClass);
2078}
2079
2080llvm::Constant *
2081ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
2082 const CXXRecordDecl *VTableClass) {
2083 llvm::GlobalValue *VTable = getAddrOfVTable(RD: VTableClass, VPtrOffset: CharUnits());
2084
2085 // Find the appropriate vtable within the vtable group, and the address point
2086 // within that vtable.
2087 const VTableLayout &Layout =
2088 CGM.getItaniumVTableContext().getVTableLayout(RD: VTableClass);
2089 VTableLayout::AddressPointLocation AddressPoint =
2090 Layout.getAddressPoint(Base);
2091 llvm::Value *Indices[] = {
2092 llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: 0),
2093 llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: AddressPoint.VTableIndex),
2094 llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: AddressPoint.AddressPointIndex),
2095 };
2096
2097 // Add inrange attribute to indicate that only the VTableIndex can be
2098 // accessed.
2099 unsigned ComponentSize =
2100 CGM.getDataLayout().getTypeAllocSize(Ty: CGM.getVTableComponentType());
2101 unsigned VTableSize =
2102 ComponentSize * Layout.getVTableSize(i: AddressPoint.VTableIndex);
2103 unsigned Offset = ComponentSize * AddressPoint.AddressPointIndex;
2104 llvm::ConstantRange InRange(llvm::APInt(32, -Offset, true),
2105 llvm::APInt(32, VTableSize - Offset, true));
2106 return llvm::ConstantExpr::getGetElementPtr(
2107 Ty: VTable->getValueType(), C: VTable, IdxList: Indices, /*InBounds=*/NW: true, InRange);
2108}
2109
2110llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
2111 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
2112 const CXXRecordDecl *NearestVBase) {
2113 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
2114 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
2115
2116 // Get the secondary vpointer index.
2117 uint64_t VirtualPointerIndex =
2118 CGM.getVTables().getSecondaryVirtualPointerIndex(RD: VTableClass, Base);
2119
2120 /// Load the VTT.
2121 llvm::Value *VTT = CGF.LoadCXXVTT();
2122 if (VirtualPointerIndex)
2123 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(Ty: CGF.GlobalsVoidPtrTy, Ptr: VTT,
2124 Idx0: VirtualPointerIndex);
2125
2126 // And load the address point from the VTT.
2127 llvm::Value *AP =
2128 CGF.Builder.CreateAlignedLoad(Ty: CGF.GlobalsVoidPtrTy, Addr: VTT,
2129 Align: CGF.getPointerAlign());
2130
2131 if (auto &Schema = CGF.CGM.getCodeGenOpts().PointerAuth.CXXVTTVTablePointers) {
2132 CGPointerAuthInfo PointerAuth = CGF.EmitPointerAuthInfo(Schema, StorageAddress: VTT,
2133 SchemaDecl: GlobalDecl(),
2134 SchemaType: QualType());
2135 AP = CGF.EmitPointerAuthAuth(Info: PointerAuth, Pointer: AP);
2136 }
2137
2138 return AP;
2139}
2140
2141llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
2142 CharUnits VPtrOffset) {
2143 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
2144
2145 llvm::GlobalVariable *&VTable = VTables[RD];
2146 if (VTable)
2147 return VTable;
2148
2149 // Queue up this vtable for possible deferred emission.
2150 CGM.addDeferredVTable(RD);
2151
2152 SmallString<256> Name;
2153 llvm::raw_svector_ostream Out(Name);
2154 getMangleContext().mangleCXXVTable(RD, Out);
2155
2156 const VTableLayout &VTLayout =
2157 CGM.getItaniumVTableContext().getVTableLayout(RD);
2158 llvm::Type *VTableType = CGM.getVTables().getVTableType(layout: VTLayout);
2159
2160 // Use pointer to global alignment for the vtable. Otherwise we would align
2161 // them based on the size of the initializer which doesn't make sense as only
2162 // single values are read.
2163 LangAS AS = CGM.GetGlobalVarAddressSpace(D: nullptr);
2164 unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
2165 ? 32
2166 : CGM.getTarget().getPointerAlign(AddrSpace: AS);
2167
2168 VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
2169 Name, Ty: VTableType, Linkage: llvm::GlobalValue::ExternalLinkage,
2170 Alignment: getContext().toCharUnitsFromBits(BitSize: PAlign).getAsAlign());
2171 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2172
2173 if (CGM.getTarget().hasPS4DLLImportExport())
2174 setVTableSelectiveDLLImportExport(CGM, VTable, RD);
2175
2176 CGM.setGVProperties(GV: VTable, D: RD);
2177 return VTable;
2178}
2179
2180CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
2181 GlobalDecl GD,
2182 Address This,
2183 llvm::Type *Ty,
2184 SourceLocation Loc) {
2185 llvm::Type *PtrTy = CGM.GlobalsInt8PtrTy;
2186 auto *MethodDecl = cast<CXXMethodDecl>(Val: GD.getDecl());
2187 llvm::Value *VTable = CGF.GetVTablePtr(This, VTableTy: PtrTy, VTableClass: MethodDecl->getParent());
2188
2189 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
2190 llvm::Value *VFunc, *VTableSlotPtr = nullptr;
2191 auto &Schema = CGM.getCodeGenOpts().PointerAuth.CXXVirtualFunctionPointers;
2192 if (!Schema && CGF.ShouldEmitVTableTypeCheckedLoad(RD: MethodDecl->getParent())) {
2193 VFunc = CGF.EmitVTableTypeCheckedLoad(
2194 RD: MethodDecl->getParent(), VTable, VTableTy: PtrTy,
2195 VTableByteOffset: VTableIndex *
2196 CGM.getContext().getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default) /
2197 8);
2198 } else {
2199 CGF.EmitTypeMetadataCodeForVCall(RD: MethodDecl->getParent(), VTable, Loc);
2200
2201 llvm::Value *VFuncLoad;
2202 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
2203 VFuncLoad = CGF.Builder.CreateCall(
2204 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::load_relative, Tys: {CGM.Int32Ty}),
2205 Args: {VTable, llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: 4 * VTableIndex)});
2206 } else {
2207 VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2208 Ty: PtrTy, Ptr: VTable, Idx0: VTableIndex, Name: "vfn");
2209 VFuncLoad = CGF.Builder.CreateAlignedLoad(Ty: PtrTy, Addr: VTableSlotPtr,
2210 Align: CGF.getPointerAlign());
2211 }
2212
2213 // Add !invariant.load md to virtual function load to indicate that
2214 // function didn't change inside vtable.
2215 // It's safe to add it without -fstrict-vtable-pointers, but it would not
2216 // help in devirtualization because it will only matter if we will have 2
2217 // the same virtual function loads from the same vtable load, which won't
2218 // happen without enabled devirtualization with -fstrict-vtable-pointers.
2219 if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
2220 CGM.getCodeGenOpts().StrictVTablePointers) {
2221 if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(Val: VFuncLoad)) {
2222 VFuncLoadInstr->setMetadata(
2223 KindID: llvm::LLVMContext::MD_invariant_load,
2224 Node: llvm::MDNode::get(Context&: CGM.getLLVMContext(),
2225 MDs: llvm::ArrayRef<llvm::Metadata *>()));
2226 }
2227 }
2228 VFunc = VFuncLoad;
2229 }
2230
2231 CGPointerAuthInfo PointerAuth;
2232 if (Schema) {
2233 assert(VTableSlotPtr && "virtual function pointer not set");
2234 GD = CGM.getItaniumVTableContext().findOriginalMethod(GD: GD.getCanonicalDecl());
2235 PointerAuth = CGF.EmitPointerAuthInfo(Schema, StorageAddress: VTableSlotPtr, SchemaDecl: GD, SchemaType: QualType());
2236 }
2237 CGCallee Callee(GD, VFunc, PointerAuth);
2238 return Callee;
2239}
2240
2241llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
2242 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
2243 Address This, DeleteOrMemberCallExpr E) {
2244 auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
2245 auto *D = E.dyn_cast<const CXXDeleteExpr *>();
2246 assert((CE != nullptr) ^ (D != nullptr));
2247 assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
2248 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
2249
2250 GlobalDecl GD(Dtor, DtorType);
2251 const CGFunctionInfo *FInfo =
2252 &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
2253 llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(Info: *FInfo);
2254 CGCallee Callee = CGCallee::forVirtual(CE, MD: GD, Addr: This, FTy: Ty);
2255
2256 QualType ThisTy;
2257 if (CE) {
2258 ThisTy = CE->getObjectType();
2259 } else {
2260 ThisTy = D->getDestroyedType();
2261 }
2262
2263 CGF.EmitCXXDestructorCall(Dtor: GD, Callee, This: This.emitRawPointer(CGF), ThisTy,
2264 ImplicitParam: nullptr, ImplicitParamTy: QualType(), E: nullptr);
2265 return nullptr;
2266}
2267
2268void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
2269 CodeGenVTables &VTables = CGM.getVTables();
2270 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
2271 VTables.EmitVTTDefinition(VTT, Linkage: CGM.getVTableLinkage(RD), RD);
2272}
2273
2274bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
2275 const CXXRecordDecl *RD) const {
2276 // We don't emit available_externally vtables if we are in -fapple-kext mode
2277 // because kext mode does not permit devirtualization.
2278 if (CGM.getLangOpts().AppleKext)
2279 return false;
2280
2281 // If the vtable is hidden then it is not safe to emit an available_externally
2282 // copy of vtable.
2283 if (isVTableHidden(RD))
2284 return false;
2285
2286 if (CGM.getCodeGenOpts().ForceEmitVTables)
2287 return true;
2288
2289 // If we don't have any not emitted inline virtual function then we are safe
2290 // to emit an available_externally copy of vtable.
2291 // FIXME we can still emit a copy of the vtable if we
2292 // can emit definition of the inline functions.
2293 if (hasAnyUnusedVirtualInlineFunction(RD))
2294 return false;
2295
2296 // For a class with virtual bases, we must also be able to speculatively
2297 // emit the VTT, because CodeGen doesn't have separate notions of "can emit
2298 // the vtable" and "can emit the VTT". For a base subobject, this means we
2299 // need to be able to emit non-virtual base vtables.
2300 if (RD->getNumVBases()) {
2301 for (const auto &B : RD->bases()) {
2302 auto *BRD = B.getType()->getAsCXXRecordDecl();
2303 assert(BRD && "no class for base specifier");
2304 if (B.isVirtual() || !BRD->isDynamicClass())
2305 continue;
2306 if (!canSpeculativelyEmitVTableAsBaseClass(RD: BRD))
2307 return false;
2308 }
2309 }
2310
2311 return true;
2312}
2313
2314bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
2315 if (!canSpeculativelyEmitVTableAsBaseClass(RD))
2316 return false;
2317
2318 if (RD->shouldEmitInExternalSource())
2319 return false;
2320
2321 // For a complete-object vtable (or more specifically, for the VTT), we need
2322 // to be able to speculatively emit the vtables of all dynamic virtual bases.
2323 for (const auto &B : RD->vbases()) {
2324 auto *BRD = B.getType()->getAsCXXRecordDecl();
2325 assert(BRD && "no class for base specifier");
2326 if (!BRD->isDynamicClass())
2327 continue;
2328 if (!canSpeculativelyEmitVTableAsBaseClass(RD: BRD))
2329 return false;
2330 }
2331
2332 return true;
2333}
2334static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
2335 Address InitialPtr,
2336 const CXXRecordDecl *UnadjustedClass,
2337 int64_t NonVirtualAdjustment,
2338 int64_t VirtualAdjustment,
2339 bool IsReturnAdjustment) {
2340 if (!NonVirtualAdjustment && !VirtualAdjustment)
2341 return InitialPtr.emitRawPointer(CGF);
2342
2343 Address V = InitialPtr.withElementType(ElemTy: CGF.Int8Ty);
2344
2345 // In a base-to-derived cast, the non-virtual adjustment is applied first.
2346 if (NonVirtualAdjustment && !IsReturnAdjustment) {
2347 V = CGF.Builder.CreateConstInBoundsByteGEP(Addr: V,
2348 Offset: CharUnits::fromQuantity(Quantity: NonVirtualAdjustment));
2349 }
2350
2351 // Perform the virtual adjustment if we have one.
2352 llvm::Value *ResultPtr;
2353 if (VirtualAdjustment) {
2354 llvm::Value *VTablePtr =
2355 CGF.GetVTablePtr(This: V, VTableTy: CGF.Int8PtrTy, VTableClass: UnadjustedClass);
2356
2357 llvm::Value *Offset;
2358 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2359 Ty: CGF.Int8Ty, Ptr: VTablePtr, Idx0: VirtualAdjustment);
2360 if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
2361 // Load the adjustment offset from the vtable as a 32-bit int.
2362 Offset =
2363 CGF.Builder.CreateAlignedLoad(Ty: CGF.Int32Ty, Addr: OffsetPtr,
2364 Align: CharUnits::fromQuantity(Quantity: 4));
2365 } else {
2366 llvm::Type *PtrDiffTy =
2367 CGF.ConvertType(T: CGF.getContext().getPointerDiffType());
2368
2369 // Load the adjustment offset from the vtable.
2370 Offset = CGF.Builder.CreateAlignedLoad(Ty: PtrDiffTy, Addr: OffsetPtr,
2371 Align: CGF.getPointerAlign());
2372 }
2373 // Adjust our pointer.
2374 ResultPtr = CGF.Builder.CreateInBoundsGEP(Ty: V.getElementType(),
2375 Ptr: V.emitRawPointer(CGF), IdxList: Offset);
2376 } else {
2377 ResultPtr = V.emitRawPointer(CGF);
2378 }
2379
2380 // In a derived-to-base conversion, the non-virtual adjustment is
2381 // applied second.
2382 if (NonVirtualAdjustment && IsReturnAdjustment) {
2383 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(Ty: CGF.Int8Ty, Ptr: ResultPtr,
2384 Idx0: NonVirtualAdjustment);
2385 }
2386
2387 return ResultPtr;
2388}
2389
2390llvm::Value *
2391ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF, Address This,
2392 const CXXRecordDecl *UnadjustedClass,
2393 const ThunkInfo &TI) {
2394 return performTypeAdjustment(CGF, InitialPtr: This, UnadjustedClass, NonVirtualAdjustment: TI.This.NonVirtual,
2395 VirtualAdjustment: TI.This.Virtual.Itanium.VCallOffsetOffset,
2396 /*IsReturnAdjustment=*/false);
2397}
2398
2399llvm::Value *
2400ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
2401 const CXXRecordDecl *UnadjustedClass,
2402 const ReturnAdjustment &RA) {
2403 return performTypeAdjustment(CGF, InitialPtr: Ret, UnadjustedClass, NonVirtualAdjustment: RA.NonVirtual,
2404 VirtualAdjustment: RA.Virtual.Itanium.VBaseOffsetOffset,
2405 /*IsReturnAdjustment=*/true);
2406}
2407
2408void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
2409 RValue RV, QualType ResultType) {
2410 if (!isa<CXXDestructorDecl>(Val: CGF.CurGD.getDecl()))
2411 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
2412
2413 // Destructor thunks in the ARM ABI have indeterminate results.
2414 llvm::Type *T = CGF.ReturnValue.getElementType();
2415 RValue Undef = RValue::get(V: llvm::UndefValue::get(T));
2416 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV: Undef, ResultType);
2417}
2418
2419/************************** Array allocation cookies **************************/
2420
2421CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2422 // The array cookie is a size_t; pad that up to the element alignment.
2423 // The cookie is actually right-justified in that space.
2424 return std::max(a: CharUnits::fromQuantity(Quantity: CGM.SizeSizeInBytes),
2425 b: CGM.getContext().getPreferredTypeAlignInChars(T: elementType));
2426}
2427
2428Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2429 Address NewPtr,
2430 llvm::Value *NumElements,
2431 const CXXNewExpr *expr,
2432 QualType ElementType) {
2433 assert(requiresArrayCookie(expr));
2434
2435 unsigned AS = NewPtr.getAddressSpace();
2436
2437 ASTContext &Ctx = getContext();
2438 CharUnits SizeSize = CGF.getSizeSize();
2439
2440 // The size of the cookie.
2441 CharUnits CookieSize =
2442 std::max(a: SizeSize, b: Ctx.getPreferredTypeAlignInChars(T: ElementType));
2443 assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2444
2445 // Compute an offset to the cookie.
2446 Address CookiePtr = NewPtr;
2447 CharUnits CookieOffset = CookieSize - SizeSize;
2448 if (!CookieOffset.isZero())
2449 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(Addr: CookiePtr, Offset: CookieOffset);
2450
2451 // Write the number of elements into the appropriate slot.
2452 Address NumElementsPtr = CookiePtr.withElementType(ElemTy: CGF.SizeTy);
2453 llvm::Instruction *SI = CGF.Builder.CreateStore(Val: NumElements, Addr: NumElementsPtr);
2454
2455 // Handle the array cookie specially in ASan.
2456 if (CGM.getLangOpts().Sanitize.has(K: SanitizerKind::Address) && AS == 0 &&
2457 (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2458 CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
2459 // The store to the CookiePtr does not need to be instrumented.
2460 SI->setNoSanitizeMetadata();
2461 llvm::FunctionType *FTy =
2462 llvm::FunctionType::get(Result: CGM.VoidTy, Params: NumElementsPtr.getType(), isVarArg: false);
2463 llvm::FunctionCallee F =
2464 CGM.CreateRuntimeFunction(Ty: FTy, Name: "__asan_poison_cxx_array_cookie");
2465 CGF.Builder.CreateCall(Callee: F, Args: NumElementsPtr.emitRawPointer(CGF));
2466 }
2467
2468 // Finally, compute a pointer to the actual data buffer by skipping
2469 // over the cookie completely.
2470 return CGF.Builder.CreateConstInBoundsByteGEP(Addr: NewPtr, Offset: CookieSize);
2471}
2472
2473llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2474 Address allocPtr,
2475 CharUnits cookieSize) {
2476 // The element size is right-justified in the cookie.
2477 Address numElementsPtr = allocPtr;
2478 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2479 if (!numElementsOffset.isZero())
2480 numElementsPtr =
2481 CGF.Builder.CreateConstInBoundsByteGEP(Addr: numElementsPtr, Offset: numElementsOffset);
2482
2483 unsigned AS = allocPtr.getAddressSpace();
2484 numElementsPtr = numElementsPtr.withElementType(ElemTy: CGF.SizeTy);
2485 if (!CGM.getLangOpts().Sanitize.has(K: SanitizerKind::Address) || AS != 0)
2486 return CGF.Builder.CreateLoad(Addr: numElementsPtr);
2487 // In asan mode emit a function call instead of a regular load and let the
2488 // run-time deal with it: if the shadow is properly poisoned return the
2489 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2490 // We can't simply ignore this load using nosanitize metadata because
2491 // the metadata may be lost.
2492 llvm::FunctionType *FTy =
2493 llvm::FunctionType::get(Result: CGF.SizeTy, Params: CGF.UnqualPtrTy, isVarArg: false);
2494 llvm::FunctionCallee F =
2495 CGM.CreateRuntimeFunction(Ty: FTy, Name: "__asan_load_cxx_array_cookie");
2496 return CGF.Builder.CreateCall(Callee: F, Args: numElementsPtr.emitRawPointer(CGF));
2497}
2498
2499CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2500 // ARM says that the cookie is always:
2501 // struct array_cookie {
2502 // std::size_t element_size; // element_size != 0
2503 // std::size_t element_count;
2504 // };
2505 // But the base ABI doesn't give anything an alignment greater than
2506 // 8, so we can dismiss this as typical ABI-author blindness to
2507 // actual language complexity and round up to the element alignment.
2508 return std::max(a: CharUnits::fromQuantity(Quantity: 2 * CGM.SizeSizeInBytes),
2509 b: CGM.getContext().getTypeAlignInChars(T: elementType));
2510}
2511
2512Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2513 Address newPtr,
2514 llvm::Value *numElements,
2515 const CXXNewExpr *expr,
2516 QualType elementType) {
2517 assert(requiresArrayCookie(expr));
2518
2519 // The cookie is always at the start of the buffer.
2520 Address cookie = newPtr;
2521
2522 // The first element is the element size.
2523 cookie = cookie.withElementType(ElemTy: CGF.SizeTy);
2524 llvm::Value *elementSize = llvm::ConstantInt::get(Ty: CGF.SizeTy,
2525 V: getContext().getTypeSizeInChars(T: elementType).getQuantity());
2526 CGF.Builder.CreateStore(Val: elementSize, Addr: cookie);
2527
2528 // The second element is the element count.
2529 cookie = CGF.Builder.CreateConstInBoundsGEP(Addr: cookie, Index: 1);
2530 CGF.Builder.CreateStore(Val: numElements, Addr: cookie);
2531
2532 // Finally, compute a pointer to the actual data buffer by skipping
2533 // over the cookie completely.
2534 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2535 return CGF.Builder.CreateConstInBoundsByteGEP(Addr: newPtr, Offset: cookieSize);
2536}
2537
2538llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2539 Address allocPtr,
2540 CharUnits cookieSize) {
2541 // The number of elements is at offset sizeof(size_t) relative to
2542 // the allocated pointer.
2543 Address numElementsPtr
2544 = CGF.Builder.CreateConstInBoundsByteGEP(Addr: allocPtr, Offset: CGF.getSizeSize());
2545
2546 numElementsPtr = numElementsPtr.withElementType(ElemTy: CGF.SizeTy);
2547 return CGF.Builder.CreateLoad(Addr: numElementsPtr);
2548}
2549
2550/*********************** Static local initialization **************************/
2551
2552static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2553 llvm::PointerType *GuardPtrTy) {
2554 // int __cxa_guard_acquire(__guard *guard_object);
2555 llvm::FunctionType *FTy =
2556 llvm::FunctionType::get(Result: CGM.getTypes().ConvertType(T: CGM.getContext().IntTy),
2557 Params: GuardPtrTy, /*isVarArg=*/false);
2558 return CGM.CreateRuntimeFunction(
2559 Ty: FTy, Name: "__cxa_guard_acquire",
2560 ExtraAttrs: llvm::AttributeList::get(C&: CGM.getLLVMContext(),
2561 Index: llvm::AttributeList::FunctionIndex,
2562 Kinds: llvm::Attribute::NoUnwind));
2563}
2564
2565static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2566 llvm::PointerType *GuardPtrTy) {
2567 // void __cxa_guard_release(__guard *guard_object);
2568 llvm::FunctionType *FTy =
2569 llvm::FunctionType::get(Result: CGM.VoidTy, Params: GuardPtrTy, /*isVarArg=*/false);
2570 return CGM.CreateRuntimeFunction(
2571 Ty: FTy, Name: "__cxa_guard_release",
2572 ExtraAttrs: llvm::AttributeList::get(C&: CGM.getLLVMContext(),
2573 Index: llvm::AttributeList::FunctionIndex,
2574 Kinds: llvm::Attribute::NoUnwind));
2575}
2576
2577static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2578 llvm::PointerType *GuardPtrTy) {
2579 // void __cxa_guard_abort(__guard *guard_object);
2580 llvm::FunctionType *FTy =
2581 llvm::FunctionType::get(Result: CGM.VoidTy, Params: GuardPtrTy, /*isVarArg=*/false);
2582 return CGM.CreateRuntimeFunction(
2583 Ty: FTy, Name: "__cxa_guard_abort",
2584 ExtraAttrs: llvm::AttributeList::get(C&: CGM.getLLVMContext(),
2585 Index: llvm::AttributeList::FunctionIndex,
2586 Kinds: llvm::Attribute::NoUnwind));
2587}
2588
2589namespace {
2590 struct CallGuardAbort final : EHScopeStack::Cleanup {
2591 llvm::GlobalVariable *Guard;
2592 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2593
2594 void Emit(CodeGenFunction &CGF, Flags flags) override {
2595 CGF.EmitNounwindRuntimeCall(callee: getGuardAbortFn(CGM&: CGF.CGM, GuardPtrTy: Guard->getType()),
2596 args: Guard);
2597 }
2598 };
2599}
2600
2601/// The ARM code here follows the Itanium code closely enough that we
2602/// just special-case it at particular places.
2603void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2604 const VarDecl &D,
2605 llvm::GlobalVariable *var,
2606 bool shouldPerformInit) {
2607 CGBuilderTy &Builder = CGF.Builder;
2608
2609 // Inline variables that weren't instantiated from variable templates have
2610 // partially-ordered initialization within their translation unit.
2611 bool NonTemplateInline =
2612 D.isInline() &&
2613 !isTemplateInstantiation(Kind: D.getTemplateSpecializationKind());
2614
2615 // We only need to use thread-safe statics for local non-TLS variables and
2616 // inline variables; other global initialization is always single-threaded
2617 // or (through lazy dynamic loading in multiple threads) unsequenced.
2618 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2619 (D.isLocalVarDecl() || NonTemplateInline) &&
2620 !D.getTLSKind();
2621
2622 // If we have a global variable with internal linkage and thread-safe statics
2623 // are disabled, we can just let the guard variable be of type i8.
2624 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2625
2626 llvm::IntegerType *guardTy;
2627 CharUnits guardAlignment;
2628 if (useInt8GuardVariable) {
2629 guardTy = CGF.Int8Ty;
2630 guardAlignment = CharUnits::One();
2631 } else {
2632 // Guard variables are 64 bits in the generic ABI and size width on ARM
2633 // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2634 if (UseARMGuardVarABI) {
2635 guardTy = CGF.SizeTy;
2636 guardAlignment = CGF.getSizeAlign();
2637 } else {
2638 guardTy = CGF.Int64Ty;
2639 guardAlignment =
2640 CharUnits::fromQuantity(Quantity: CGM.getDataLayout().getABITypeAlign(Ty: guardTy));
2641 }
2642 }
2643 llvm::PointerType *guardPtrTy = llvm::PointerType::get(
2644 C&: CGF.CGM.getLLVMContext(),
2645 AddressSpace: CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
2646
2647 // Create the guard variable if we don't already have it (as we
2648 // might if we're double-emitting this function body).
2649 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(D: &D);
2650 if (!guard) {
2651 // Mangle the name for the guard.
2652 SmallString<256> guardName;
2653 {
2654 llvm::raw_svector_ostream out(guardName);
2655 getMangleContext().mangleStaticGuardVariable(D: &D, out);
2656 }
2657
2658 // Create the guard variable with a zero-initializer.
2659 // Just absorb linkage, visibility and dll storage class from the guarded
2660 // variable.
2661 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2662 false, var->getLinkage(),
2663 llvm::ConstantInt::get(Ty: guardTy, V: 0),
2664 guardName.str());
2665 guard->setDSOLocal(var->isDSOLocal());
2666 guard->setVisibility(var->getVisibility());
2667 guard->setDLLStorageClass(var->getDLLStorageClass());
2668 // If the variable is thread-local, so is its guard variable.
2669 guard->setThreadLocalMode(var->getThreadLocalMode());
2670 guard->setAlignment(guardAlignment.getAsAlign());
2671
2672 // The ABI says: "It is suggested that it be emitted in the same COMDAT
2673 // group as the associated data object." In practice, this doesn't work for
2674 // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2675 llvm::Comdat *C = var->getComdat();
2676 if (!D.isLocalVarDecl() && C &&
2677 (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2678 CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2679 guard->setComdat(C);
2680 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2681 guard->setComdat(CGM.getModule().getOrInsertComdat(Name: guard->getName()));
2682 }
2683
2684 CGM.setStaticLocalDeclGuardAddress(D: &D, C: guard);
2685 }
2686
2687 Address guardAddr = Address(guard, guard->getValueType(), guardAlignment);
2688
2689 // Test whether the variable has completed initialization.
2690 //
2691 // Itanium C++ ABI 3.3.2:
2692 // The following is pseudo-code showing how these functions can be used:
2693 // if (obj_guard.first_byte == 0) {
2694 // if ( __cxa_guard_acquire (&obj_guard) ) {
2695 // try {
2696 // ... initialize the object ...;
2697 // } catch (...) {
2698 // __cxa_guard_abort (&obj_guard);
2699 // throw;
2700 // }
2701 // ... queue object destructor with __cxa_atexit() ...;
2702 // __cxa_guard_release (&obj_guard);
2703 // }
2704 // }
2705 //
2706 // If threadsafe statics are enabled, but we don't have inline atomics, just
2707 // call __cxa_guard_acquire unconditionally. The "inline" check isn't
2708 // actually inline, and the user might not expect calls to __atomic libcalls.
2709
2710 unsigned MaxInlineWidthInBits = CGF.getTarget().getMaxAtomicInlineWidth();
2711 llvm::BasicBlock *EndBlock = CGF.createBasicBlock(name: "init.end");
2712 if (!threadsafe || MaxInlineWidthInBits) {
2713 // Load the first byte of the guard variable.
2714 llvm::LoadInst *LI =
2715 Builder.CreateLoad(Addr: guardAddr.withElementType(ElemTy: CGM.Int8Ty));
2716
2717 // Itanium ABI:
2718 // An implementation supporting thread-safety on multiprocessor
2719 // systems must also guarantee that references to the initialized
2720 // object do not occur before the load of the initialization flag.
2721 //
2722 // In LLVM, we do this by marking the load Acquire.
2723 if (threadsafe)
2724 LI->setAtomic(Ordering: llvm::AtomicOrdering::Acquire);
2725
2726 // For ARM, we should only check the first bit, rather than the entire byte:
2727 //
2728 // ARM C++ ABI 3.2.3.1:
2729 // To support the potential use of initialization guard variables
2730 // as semaphores that are the target of ARM SWP and LDREX/STREX
2731 // synchronizing instructions we define a static initialization
2732 // guard variable to be a 4-byte aligned, 4-byte word with the
2733 // following inline access protocol.
2734 // #define INITIALIZED 1
2735 // if ((obj_guard & INITIALIZED) != INITIALIZED) {
2736 // if (__cxa_guard_acquire(&obj_guard))
2737 // ...
2738 // }
2739 //
2740 // and similarly for ARM64:
2741 //
2742 // ARM64 C++ ABI 3.2.2:
2743 // This ABI instead only specifies the value bit 0 of the static guard
2744 // variable; all other bits are platform defined. Bit 0 shall be 0 when the
2745 // variable is not initialized and 1 when it is.
2746 llvm::Value *V =
2747 (UseARMGuardVarABI && !useInt8GuardVariable)
2748 ? Builder.CreateAnd(LHS: LI, RHS: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: 1))
2749 : LI;
2750 llvm::Value *NeedsInit = Builder.CreateIsNull(Arg: V, Name: "guard.uninitialized");
2751
2752 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock(name: "init.check");
2753
2754 // Check if the first byte of the guard variable is zero.
2755 CGF.EmitCXXGuardedInitBranch(NeedsInit, InitBlock: InitCheckBlock, NoInitBlock: EndBlock,
2756 Kind: CodeGenFunction::GuardKind::VariableGuard, D: &D);
2757
2758 CGF.EmitBlock(BB: InitCheckBlock);
2759 }
2760
2761 // The semantics of dynamic initialization of variables with static or thread
2762 // storage duration depends on whether they are declared at block-scope. The
2763 // initialization of such variables at block-scope can be aborted with an
2764 // exception and later retried (per C++20 [stmt.dcl]p4), and recursive entry
2765 // to their initialization has undefined behavior (also per C++20
2766 // [stmt.dcl]p4). For such variables declared at non-block scope, exceptions
2767 // lead to termination (per C++20 [except.terminate]p1), and recursive
2768 // references to the variables are governed only by the lifetime rules (per
2769 // C++20 [class.cdtor]p2), which means such references are perfectly fine as
2770 // long as they avoid touching memory. As a result, block-scope variables must
2771 // not be marked as initialized until after initialization completes (unless
2772 // the mark is reverted following an exception), but non-block-scope variables
2773 // must be marked prior to initialization so that recursive accesses during
2774 // initialization do not restart initialization.
2775
2776 // Variables used when coping with thread-safe statics and exceptions.
2777 if (threadsafe) {
2778 // Call __cxa_guard_acquire.
2779 llvm::Value *V
2780 = CGF.EmitNounwindRuntimeCall(callee: getGuardAcquireFn(CGM, GuardPtrTy: guardPtrTy), args: guard);
2781
2782 llvm::BasicBlock *InitBlock = CGF.createBasicBlock(name: "init");
2783
2784 Builder.CreateCondBr(Cond: Builder.CreateIsNotNull(Arg: V, Name: "tobool"),
2785 True: InitBlock, False: EndBlock);
2786
2787 // Call __cxa_guard_abort along the exceptional edge.
2788 CGF.EHStack.pushCleanup<CallGuardAbort>(Kind: EHCleanup, A: guard);
2789
2790 CGF.EmitBlock(BB: InitBlock);
2791 } else if (!D.isLocalVarDecl()) {
2792 // For non-local variables, store 1 into the first byte of the guard
2793 // variable before the object initialization begins so that references
2794 // to the variable during initialization don't restart initialization.
2795 Builder.CreateStore(Val: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: 1),
2796 Addr: guardAddr.withElementType(ElemTy: CGM.Int8Ty));
2797 }
2798
2799 // Emit the initializer and add a global destructor if appropriate.
2800 CGF.EmitCXXGlobalVarDeclInit(D, GV: var, PerformInit: shouldPerformInit);
2801
2802 if (threadsafe) {
2803 // Pop the guard-abort cleanup if we pushed one.
2804 CGF.PopCleanupBlock();
2805
2806 // Call __cxa_guard_release. This cannot throw.
2807 CGF.EmitNounwindRuntimeCall(callee: getGuardReleaseFn(CGM, GuardPtrTy: guardPtrTy),
2808 args: guardAddr.emitRawPointer(CGF));
2809 } else if (D.isLocalVarDecl()) {
2810 // For local variables, store 1 into the first byte of the guard variable
2811 // after the object initialization completes so that initialization is
2812 // retried if initialization is interrupted by an exception.
2813 Builder.CreateStore(Val: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: 1),
2814 Addr: guardAddr.withElementType(ElemTy: CGM.Int8Ty));
2815 }
2816
2817 CGF.EmitBlock(BB: EndBlock);
2818}
2819
2820/// Register a global destructor using __cxa_atexit.
2821static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2822 llvm::FunctionCallee dtor,
2823 llvm::Constant *addr, bool TLS) {
2824 assert(!CGF.getTarget().getTriple().isOSAIX() &&
2825 "unexpected call to emitGlobalDtorWithCXAAtExit");
2826 assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2827 "__cxa_atexit is disabled");
2828 const char *Name = "__cxa_atexit";
2829 if (TLS) {
2830 const llvm::Triple &T = CGF.getTarget().getTriple();
2831 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit";
2832 }
2833
2834 // We're assuming that the destructor function is something we can
2835 // reasonably call with the default CC.
2836 llvm::Type *dtorTy = CGF.UnqualPtrTy;
2837
2838 // Preserve address space of addr.
2839 auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
2840 auto AddrPtrTy = AddrAS ? llvm::PointerType::get(C&: CGF.getLLVMContext(), AddressSpace: AddrAS)
2841 : CGF.Int8PtrTy;
2842
2843 // Create a variable that binds the atexit to this shared object.
2844 llvm::Constant *handle =
2845 CGF.CGM.CreateRuntimeVariable(Ty: CGF.Int8Ty, Name: "__dso_handle");
2846 auto *GV = cast<llvm::GlobalValue>(Val: handle->stripPointerCasts());
2847 GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2848
2849 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2850 llvm::Type *paramTys[] = {dtorTy, AddrPtrTy, handle->getType()};
2851 llvm::FunctionType *atexitTy =
2852 llvm::FunctionType::get(Result: CGF.IntTy, Params: paramTys, isVarArg: false);
2853
2854 // Fetch the actual function.
2855 llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(Ty: atexitTy, Name);
2856 if (llvm::Function *fn = dyn_cast<llvm::Function>(Val: atexit.getCallee()))
2857 fn->setDoesNotThrow();
2858
2859 const auto &Context = CGF.CGM.getContext();
2860 FunctionProtoType::ExtProtoInfo EPI(Context.getDefaultCallingConvention(
2861 /*IsVariadic=*/false, /*IsCXXMethod=*/false));
2862 QualType fnType =
2863 Context.getFunctionType(ResultTy: Context.VoidTy, Args: {Context.VoidPtrTy}, EPI);
2864 llvm::Constant *dtorCallee = cast<llvm::Constant>(Val: dtor.getCallee());
2865 dtorCallee = CGF.CGM.getFunctionPointer(Pointer: dtorCallee, FunctionType: fnType);
2866
2867 if (!addr)
2868 // addr is null when we are trying to register a dtor annotated with
2869 // __attribute__((destructor)) in a constructor function. Using null here is
2870 // okay because this argument is just passed back to the destructor
2871 // function.
2872 addr = llvm::Constant::getNullValue(Ty: CGF.Int8PtrTy);
2873
2874 llvm::Value *args[] = {dtorCallee, addr, handle};
2875 CGF.EmitNounwindRuntimeCall(callee: atexit, args);
2876}
2877
2878static llvm::Function *createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM,
2879 StringRef FnName) {
2880 // Create a function that registers/unregisters destructors that have the same
2881 // priority.
2882 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: CGM.VoidTy, isVarArg: false);
2883 llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction(
2884 ty: FTy, name: FnName, FI: CGM.getTypes().arrangeNullaryFunction(), Loc: SourceLocation());
2885
2886 return GlobalInitOrCleanupFn;
2887}
2888
2889void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
2890 for (const auto &I : DtorsUsingAtExit) {
2891 int Priority = I.first;
2892 std::string GlobalCleanupFnName =
2893 std::string("__GLOBAL_cleanup_") + llvm::to_string(Value: Priority);
2894
2895 llvm::Function *GlobalCleanupFn =
2896 createGlobalInitOrCleanupFn(CGM&: *this, FnName: GlobalCleanupFnName);
2897
2898 CodeGenFunction CGF(*this);
2899 CGF.StartFunction(GD: GlobalDecl(), RetTy: getContext().VoidTy, Fn: GlobalCleanupFn,
2900 FnInfo: getTypes().arrangeNullaryFunction(), Args: FunctionArgList(),
2901 Loc: SourceLocation(), StartLoc: SourceLocation());
2902 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
2903
2904 // Get the destructor function type, void(*)(void).
2905 llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(Result: CGF.VoidTy, isVarArg: false);
2906
2907 // Destructor functions are run/unregistered in non-ascending
2908 // order of their priorities.
2909 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2910 auto itv = Dtors.rbegin();
2911 while (itv != Dtors.rend()) {
2912 llvm::Function *Dtor = *itv;
2913
2914 // We're assuming that the destructor function is something we can
2915 // reasonably call with the correct CC.
2916 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub: Dtor);
2917 llvm::Value *NeedsDestruct =
2918 CGF.Builder.CreateIsNull(Arg: V, Name: "needs_destruct");
2919
2920 llvm::BasicBlock *DestructCallBlock =
2921 CGF.createBasicBlock(name: "destruct.call");
2922 llvm::BasicBlock *EndBlock = CGF.createBasicBlock(
2923 name: (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end");
2924 // Check if unatexit returns a value of 0. If it does, jump to
2925 // DestructCallBlock, otherwise jump to EndBlock directly.
2926 CGF.Builder.CreateCondBr(Cond: NeedsDestruct, True: DestructCallBlock, False: EndBlock);
2927
2928 CGF.EmitBlock(BB: DestructCallBlock);
2929
2930 // Emit the call to casted Dtor.
2931 llvm::CallInst *CI = CGF.Builder.CreateCall(FTy: dtorFuncTy, Callee: Dtor);
2932 // Make sure the call and the callee agree on calling convention.
2933 CI->setCallingConv(Dtor->getCallingConv());
2934
2935 CGF.EmitBlock(BB: EndBlock);
2936
2937 itv++;
2938 }
2939
2940 CGF.FinishFunction();
2941 AddGlobalDtor(Dtor: GlobalCleanupFn, Priority);
2942 }
2943}
2944
2945void CodeGenModule::registerGlobalDtorsWithAtExit() {
2946 for (const auto &I : DtorsUsingAtExit) {
2947 int Priority = I.first;
2948 std::string GlobalInitFnName =
2949 std::string("__GLOBAL_init_") + llvm::to_string(Value: Priority);
2950 llvm::Function *GlobalInitFn =
2951 createGlobalInitOrCleanupFn(CGM&: *this, FnName: GlobalInitFnName);
2952
2953 CodeGenFunction CGF(*this);
2954 CGF.StartFunction(GD: GlobalDecl(), RetTy: getContext().VoidTy, Fn: GlobalInitFn,
2955 FnInfo: getTypes().arrangeNullaryFunction(), Args: FunctionArgList(),
2956 Loc: SourceLocation(), StartLoc: SourceLocation());
2957 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
2958
2959 // Since constructor functions are run in non-descending order of their
2960 // priorities, destructors are registered in non-descending order of their
2961 // priorities, and since destructor functions are run in the reverse order
2962 // of their registration, destructor functions are run in non-ascending
2963 // order of their priorities.
2964 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2965 for (auto *Dtor : Dtors) {
2966 // Register the destructor function calling __cxa_atexit if it is
2967 // available. Otherwise fall back on calling atexit.
2968 if (getCodeGenOpts().CXAAtExit) {
2969 emitGlobalDtorWithCXAAtExit(CGF, dtor: Dtor, addr: nullptr, TLS: false);
2970 } else {
2971 // We're assuming that the destructor function is something we can
2972 // reasonably call with the correct CC.
2973 CGF.registerGlobalDtorWithAtExit(dtorStub: Dtor);
2974 }
2975 }
2976
2977 CGF.FinishFunction();
2978 AddGlobalCtor(Ctor: GlobalInitFn, Priority);
2979 }
2980
2981 if (getCXXABI().useSinitAndSterm())
2982 unregisterGlobalDtorsWithUnAtExit();
2983}
2984
2985/// Register a global destructor as best as we know how.
2986void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
2987 llvm::FunctionCallee dtor,
2988 llvm::Constant *addr) {
2989 if (D.isNoDestroy(CGM.getContext()))
2990 return;
2991
2992 // OpenMP offloading supports C++ constructors and destructors but we do not
2993 // always have 'atexit' available. Instead lower these to use the LLVM global
2994 // destructors which we can handle directly in the runtime. Note that this is
2995 // not strictly 1-to-1 with using `atexit` because we no longer tear down
2996 // globals in reverse order of when they were constructed.
2997 if (!CGM.getLangOpts().hasAtExit() && !D.isStaticLocal())
2998 return CGF.registerGlobalDtorWithLLVM(D, fn: dtor, addr);
2999
3000 // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
3001 // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
3002 // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
3003 // We can always use __cxa_thread_atexit.
3004 if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
3005 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, TLS: D.getTLSKind());
3006
3007 // In Apple kexts, we want to add a global destructor entry.
3008 // FIXME: shouldn't this be guarded by some variable?
3009 if (CGM.getLangOpts().AppleKext) {
3010 // Generate a global destructor entry.
3011 return CGM.AddCXXDtorEntry(DtorFn: dtor, Object: addr);
3012 }
3013
3014 CGF.registerGlobalDtorWithAtExit(D, fn: dtor, addr);
3015}
3016
3017static bool isThreadWrapperReplaceable(const VarDecl *VD,
3018 CodeGen::CodeGenModule &CGM) {
3019 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
3020 // Darwin prefers to have references to thread local variables to go through
3021 // the thread wrapper instead of directly referencing the backing variable.
3022 return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
3023 CGM.getTarget().getTriple().isOSDarwin();
3024}
3025
3026/// Get the appropriate linkage for the wrapper function. This is essentially
3027/// the weak form of the variable's linkage; every translation unit which needs
3028/// the wrapper emits a copy, and we want the linker to merge them.
3029static llvm::GlobalValue::LinkageTypes
3030getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
3031 llvm::GlobalValue::LinkageTypes VarLinkage =
3032 CGM.getLLVMLinkageVarDefinition(VD);
3033
3034 // For internal linkage variables, we don't need an external or weak wrapper.
3035 if (llvm::GlobalValue::isLocalLinkage(Linkage: VarLinkage))
3036 return VarLinkage;
3037
3038 // If the thread wrapper is replaceable, give it appropriate linkage.
3039 if (isThreadWrapperReplaceable(VD, CGM))
3040 if (!llvm::GlobalVariable::isLinkOnceLinkage(Linkage: VarLinkage) &&
3041 !llvm::GlobalVariable::isWeakODRLinkage(Linkage: VarLinkage))
3042 return VarLinkage;
3043 return llvm::GlobalValue::WeakODRLinkage;
3044}
3045
3046llvm::Function *
3047ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
3048 llvm::Value *Val) {
3049 // Mangle the name for the thread_local wrapper function.
3050 SmallString<256> WrapperName;
3051 {
3052 llvm::raw_svector_ostream Out(WrapperName);
3053 getMangleContext().mangleItaniumThreadLocalWrapper(D: VD, Out);
3054 }
3055
3056 // FIXME: If VD is a definition, we should regenerate the function attributes
3057 // before returning.
3058 if (llvm::Value *V = CGM.getModule().getNamedValue(Name: WrapperName))
3059 return cast<llvm::Function>(Val: V);
3060
3061 QualType RetQT = VD->getType();
3062 if (RetQT->isReferenceType())
3063 RetQT = RetQT.getNonReferenceType();
3064
3065 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
3066 resultType: getContext().getPointerType(T: RetQT), args: FunctionArgList());
3067
3068 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(Info: FI);
3069 llvm::Function *Wrapper =
3070 llvm::Function::Create(Ty: FnTy, Linkage: getThreadLocalWrapperLinkage(VD, CGM),
3071 N: WrapperName.str(), M: &CGM.getModule());
3072
3073 if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker())
3074 Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Name: Wrapper->getName()));
3075
3076 CGM.SetLLVMFunctionAttributes(GD: GlobalDecl(), Info: FI, F: Wrapper, /*IsThunk=*/false);
3077
3078 // Always resolve references to the wrapper at link time.
3079 if (!Wrapper->hasLocalLinkage())
3080 if (!isThreadWrapperReplaceable(VD, CGM) ||
3081 llvm::GlobalVariable::isLinkOnceLinkage(Linkage: Wrapper->getLinkage()) ||
3082 llvm::GlobalVariable::isWeakODRLinkage(Linkage: Wrapper->getLinkage()) ||
3083 VD->getVisibility() == HiddenVisibility)
3084 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
3085
3086 if (isThreadWrapperReplaceable(VD, CGM)) {
3087 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3088 Wrapper->addFnAttr(Kind: llvm::Attribute::NoUnwind);
3089 }
3090
3091 ThreadWrappers.push_back(Elt: {VD, Wrapper});
3092 return Wrapper;
3093}
3094
3095void ItaniumCXXABI::EmitThreadLocalInitFuncs(
3096 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
3097 ArrayRef<llvm::Function *> CXXThreadLocalInits,
3098 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
3099 llvm::Function *InitFunc = nullptr;
3100
3101 // Separate initializers into those with ordered (or partially-ordered)
3102 // initialization and those with unordered initialization.
3103 llvm::SmallVector<llvm::Function *, 8> OrderedInits;
3104 llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
3105 for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
3106 if (isTemplateInstantiation(
3107 Kind: CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
3108 UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
3109 CXXThreadLocalInits[I];
3110 else
3111 OrderedInits.push_back(Elt: CXXThreadLocalInits[I]);
3112 }
3113
3114 if (!OrderedInits.empty()) {
3115 // Generate a guarded initialization function.
3116 llvm::FunctionType *FTy =
3117 llvm::FunctionType::get(Result: CGM.VoidTy, /*isVarArg=*/false);
3118 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
3119 InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(ty: FTy, name: "__tls_init", FI,
3120 Loc: SourceLocation(),
3121 /*TLS=*/true);
3122 llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
3123 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
3124 llvm::GlobalVariable::InternalLinkage,
3125 llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: 0), "__tls_guard");
3126 Guard->setThreadLocal(true);
3127 Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
3128
3129 CharUnits GuardAlign = CharUnits::One();
3130 Guard->setAlignment(GuardAlign.getAsAlign());
3131
3132 CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
3133 Fn: InitFunc, CXXThreadLocals: OrderedInits, Guard: ConstantAddress(Guard, CGM.Int8Ty, GuardAlign));
3134 // On Darwin platforms, use CXX_FAST_TLS calling convention.
3135 if (CGM.getTarget().getTriple().isOSDarwin()) {
3136 InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3137 InitFunc->addFnAttr(Kind: llvm::Attribute::NoUnwind);
3138 }
3139 }
3140
3141 // Create declarations for thread wrappers for all thread-local variables
3142 // with non-discardable definitions in this translation unit.
3143 for (const VarDecl *VD : CXXThreadLocals) {
3144 if (VD->hasDefinition() &&
3145 !isDiscardableGVALinkage(L: getContext().GetGVALinkageForVariable(VD))) {
3146 llvm::GlobalValue *GV = CGM.GetGlobalValue(Ref: CGM.getMangledName(GD: VD));
3147 getOrCreateThreadLocalWrapper(VD, Val: GV);
3148 }
3149 }
3150
3151 // Emit all referenced thread wrappers.
3152 for (auto VDAndWrapper : ThreadWrappers) {
3153 const VarDecl *VD = VDAndWrapper.first;
3154 llvm::GlobalVariable *Var =
3155 cast<llvm::GlobalVariable>(Val: CGM.GetGlobalValue(Ref: CGM.getMangledName(GD: VD)));
3156 llvm::Function *Wrapper = VDAndWrapper.second;
3157
3158 // Some targets require that all access to thread local variables go through
3159 // the thread wrapper. This means that we cannot attempt to create a thread
3160 // wrapper or a thread helper.
3161 if (!VD->hasDefinition()) {
3162 if (isThreadWrapperReplaceable(VD, CGM)) {
3163 Wrapper->setLinkage(llvm::Function::ExternalLinkage);
3164 continue;
3165 }
3166
3167 // If this isn't a TU in which this variable is defined, the thread
3168 // wrapper is discardable.
3169 if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
3170 Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
3171 }
3172
3173 CGM.SetLLVMFunctionAttributesForDefinition(D: nullptr, F: Wrapper);
3174
3175 // Mangle the name for the thread_local initialization function.
3176 SmallString<256> InitFnName;
3177 {
3178 llvm::raw_svector_ostream Out(InitFnName);
3179 getMangleContext().mangleItaniumThreadLocalInit(D: VD, Out);
3180 }
3181
3182 llvm::FunctionType *InitFnTy = llvm::FunctionType::get(Result: CGM.VoidTy, isVarArg: false);
3183
3184 // If we have a definition for the variable, emit the initialization
3185 // function as an alias to the global Init function (if any). Otherwise,
3186 // produce a declaration of the initialization function.
3187 llvm::GlobalValue *Init = nullptr;
3188 bool InitIsInitFunc = false;
3189 bool HasConstantInitialization = false;
3190 if (!usesThreadWrapperFunction(VD)) {
3191 HasConstantInitialization = true;
3192 } else if (VD->hasDefinition()) {
3193 InitIsInitFunc = true;
3194 llvm::Function *InitFuncToUse = InitFunc;
3195 if (isTemplateInstantiation(Kind: VD->getTemplateSpecializationKind()))
3196 InitFuncToUse = UnorderedInits.lookup(Val: VD->getCanonicalDecl());
3197 if (InitFuncToUse)
3198 Init = llvm::GlobalAlias::create(Linkage: Var->getLinkage(), Name: InitFnName.str(),
3199 Aliasee: InitFuncToUse);
3200 } else {
3201 // Emit a weak global function referring to the initialization function.
3202 // This function will not exist if the TU defining the thread_local
3203 // variable in question does not need any dynamic initialization for
3204 // its thread_local variables.
3205 Init = llvm::Function::Create(Ty: InitFnTy,
3206 Linkage: llvm::GlobalVariable::ExternalWeakLinkage,
3207 N: InitFnName.str(), M: &CGM.getModule());
3208 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
3209 CGM.SetLLVMFunctionAttributes(
3210 GD: GlobalDecl(), Info: FI, F: cast<llvm::Function>(Val: Init), /*IsThunk=*/false);
3211 }
3212
3213 if (Init) {
3214 Init->setVisibility(Var->getVisibility());
3215 // Don't mark an extern_weak function DSO local on windows.
3216 if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage())
3217 Init->setDSOLocal(Var->isDSOLocal());
3218 }
3219
3220 llvm::LLVMContext &Context = CGM.getModule().getContext();
3221
3222 // The linker on AIX is not happy with missing weak symbols. However,
3223 // other TUs will not know whether the initialization routine exists
3224 // so create an empty, init function to satisfy the linker.
3225 // This is needed whenever a thread wrapper function is not used, and
3226 // also when the symbol is weak.
3227 if (CGM.getTriple().isOSAIX() && VD->hasDefinition() &&
3228 isEmittedWithConstantInitializer(VD, InspectInitForWeakDef: true) &&
3229 !mayNeedDestruction(VD)) {
3230 // Init should be null. If it were non-null, then the logic above would
3231 // either be defining the function to be an alias or declaring the
3232 // function with the expectation that the definition of the variable
3233 // is elsewhere.
3234 assert(Init == nullptr && "Expected Init to be null.");
3235
3236 llvm::Function *Func = llvm::Function::Create(
3237 Ty: InitFnTy, Linkage: Var->getLinkage(), N: InitFnName.str(), M: &CGM.getModule());
3238 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
3239 CGM.SetLLVMFunctionAttributes(GD: GlobalDecl(), Info: FI,
3240 F: cast<llvm::Function>(Val: Func),
3241 /*IsThunk=*/false);
3242 // Create a function body that just returns
3243 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, Name: "", Parent: Func);
3244 CGBuilderTy Builder(CGM, Entry);
3245 Builder.CreateRetVoid();
3246 }
3247
3248 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, Name: "", Parent: Wrapper);
3249 CGBuilderTy Builder(CGM, Entry);
3250 if (HasConstantInitialization) {
3251 // No dynamic initialization to invoke.
3252 } else if (InitIsInitFunc) {
3253 if (Init) {
3254 llvm::CallInst *CallVal = Builder.CreateCall(FTy: InitFnTy, Callee: Init);
3255 if (isThreadWrapperReplaceable(VD, CGM)) {
3256 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3257 llvm::Function *Fn =
3258 cast<llvm::Function>(Val: cast<llvm::GlobalAlias>(Val: Init)->getAliasee());
3259 Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3260 }
3261 }
3262 } else if (CGM.getTriple().isOSAIX()) {
3263 // On AIX, except if constinit and also neither of class type or of
3264 // (possibly multi-dimensional) array of class type, thread_local vars
3265 // will have init routines regardless of whether they are
3266 // const-initialized. Since the routine is guaranteed to exist, we can
3267 // unconditionally call it without testing for its existance. This
3268 // avoids potentially unresolved weak symbols which the AIX linker
3269 // isn't happy with.
3270 Builder.CreateCall(FTy: InitFnTy, Callee: Init);
3271 } else {
3272 // Don't know whether we have an init function. Call it if it exists.
3273 llvm::Value *Have = Builder.CreateIsNotNull(Arg: Init);
3274 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, Name: "", Parent: Wrapper);
3275 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, Name: "", Parent: Wrapper);
3276 Builder.CreateCondBr(Cond: Have, True: InitBB, False: ExitBB);
3277
3278 Builder.SetInsertPoint(InitBB);
3279 Builder.CreateCall(FTy: InitFnTy, Callee: Init);
3280 Builder.CreateBr(Dest: ExitBB);
3281
3282 Builder.SetInsertPoint(ExitBB);
3283 }
3284
3285 // For a reference, the result of the wrapper function is a pointer to
3286 // the referenced object.
3287 llvm::Value *Val = Builder.CreateThreadLocalAddress(Ptr: Var);
3288
3289 if (VD->getType()->isReferenceType()) {
3290 CharUnits Align = CGM.getContext().getDeclAlign(D: VD);
3291 Val = Builder.CreateAlignedLoad(Ty: Var->getValueType(), Addr: Val, Align);
3292 }
3293
3294 Builder.CreateRet(V: Val);
3295 }
3296}
3297
3298LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
3299 const VarDecl *VD,
3300 QualType LValType) {
3301 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(D: VD);
3302 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
3303
3304 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Callee: Wrapper);
3305 CallVal->setCallingConv(Wrapper->getCallingConv());
3306
3307 LValue LV;
3308 if (VD->getType()->isReferenceType())
3309 LV = CGF.MakeNaturalAlignRawAddrLValue(V: CallVal, T: LValType);
3310 else
3311 LV = CGF.MakeRawAddrLValue(V: CallVal, T: LValType,
3312 Alignment: CGF.getContext().getDeclAlign(D: VD));
3313 // FIXME: need setObjCGCLValueClass?
3314 return LV;
3315}
3316
3317/// Return whether the given global decl needs a VTT parameter, which it does
3318/// if it's a base constructor or destructor with virtual bases.
3319bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
3320 const CXXMethodDecl *MD = cast<CXXMethodDecl>(Val: GD.getDecl());
3321
3322 // We don't have any virtual bases, just return early.
3323 if (!MD->getParent()->getNumVBases())
3324 return false;
3325
3326 // Check if we have a base constructor.
3327 if (isa<CXXConstructorDecl>(Val: MD) && GD.getCtorType() == Ctor_Base)
3328 return true;
3329
3330 // Check if we have a base destructor.
3331 if (isa<CXXDestructorDecl>(Val: MD) && GD.getDtorType() == Dtor_Base)
3332 return true;
3333
3334 return false;
3335}
3336
3337llvm::Constant *
3338ItaniumCXXABI::getOrCreateVirtualFunctionPointerThunk(const CXXMethodDecl *MD) {
3339 SmallString<256> MethodName;
3340 llvm::raw_svector_ostream Out(MethodName);
3341 getMangleContext().mangleCXXName(GD: MD, Out);
3342 MethodName += "_vfpthunk_";
3343 StringRef ThunkName = MethodName.str();
3344 llvm::Function *ThunkFn;
3345 if ((ThunkFn = cast_or_null<llvm::Function>(
3346 Val: CGM.getModule().getNamedValue(Name: ThunkName))))
3347 return ThunkFn;
3348
3349 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeCXXMethodDeclaration(MD);
3350 llvm::FunctionType *ThunkTy = CGM.getTypes().GetFunctionType(Info: FnInfo);
3351 llvm::GlobalValue::LinkageTypes Linkage =
3352 MD->isExternallyVisible() ? llvm::GlobalValue::LinkOnceODRLinkage
3353 : llvm::GlobalValue::InternalLinkage;
3354 ThunkFn =
3355 llvm::Function::Create(Ty: ThunkTy, Linkage, N: ThunkName, M: &CGM.getModule());
3356 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
3357 ThunkFn->setVisibility(llvm::GlobalValue::HiddenVisibility);
3358 assert(ThunkFn->getName() == ThunkName && "name was uniqued!");
3359
3360 CGM.SetLLVMFunctionAttributes(GD: MD, Info: FnInfo, F: ThunkFn, /*IsThunk=*/true);
3361 CGM.SetLLVMFunctionAttributesForDefinition(D: MD, F: ThunkFn);
3362
3363 // Stack protection sometimes gets inserted after the musttail call.
3364 ThunkFn->removeFnAttr(Kind: llvm::Attribute::StackProtect);
3365 ThunkFn->removeFnAttr(Kind: llvm::Attribute::StackProtectStrong);
3366 ThunkFn->removeFnAttr(Kind: llvm::Attribute::StackProtectReq);
3367
3368 // Start codegen.
3369 CodeGenFunction CGF(CGM);
3370 CGF.CurGD = GlobalDecl(MD);
3371 CGF.CurFuncIsThunk = true;
3372
3373 // Build FunctionArgs.
3374 FunctionArgList FunctionArgs;
3375 CGF.BuildFunctionArgList(GD: CGF.CurGD, Args&: FunctionArgs);
3376
3377 CGF.StartFunction(GD: GlobalDecl(), RetTy: FnInfo.getReturnType(), Fn: ThunkFn, FnInfo,
3378 Args: FunctionArgs, Loc: MD->getLocation(), StartLoc: SourceLocation());
3379 llvm::Value *ThisVal = loadIncomingCXXThis(CGF);
3380 setCXXABIThisValue(CGF, ThisPtr: ThisVal);
3381
3382 CallArgList CallArgs;
3383 for (const VarDecl *VD : FunctionArgs)
3384 CGF.EmitDelegateCallArg(args&: CallArgs, param: VD, loc: SourceLocation());
3385
3386 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
3387 RequiredArgs Required = RequiredArgs::forPrototypePlus(prototype: FPT, /*this*/ additional: 1);
3388 const CGFunctionInfo &CallInfo =
3389 CGM.getTypes().arrangeCXXMethodCall(args: CallArgs, type: FPT, required: Required, numPrefixArgs: 0);
3390 CGCallee Callee = CGCallee::forVirtual(CE: nullptr, MD: GlobalDecl(MD),
3391 Addr: getThisAddress(CGF), FTy: ThunkTy);
3392 llvm::CallBase *CallOrInvoke;
3393 CGF.EmitCall(CallInfo, Callee, ReturnValue: ReturnValueSlot(), Args: CallArgs, callOrInvoke: &CallOrInvoke,
3394 /*IsMustTail=*/true, Loc: SourceLocation(), IsVirtualFunctionPointerThunk: true);
3395 auto *Call = cast<llvm::CallInst>(Val: CallOrInvoke);
3396 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
3397 if (Call->getType()->isVoidTy())
3398 CGF.Builder.CreateRetVoid();
3399 else
3400 CGF.Builder.CreateRet(V: Call);
3401
3402 // Finish the function to maintain CodeGenFunction invariants.
3403 // FIXME: Don't emit unreachable code.
3404 CGF.EmitBlock(BB: CGF.createBasicBlock());
3405 CGF.FinishFunction();
3406 return ThunkFn;
3407}
3408
3409namespace {
3410class ItaniumRTTIBuilder {
3411 CodeGenModule &CGM; // Per-module state.
3412 llvm::LLVMContext &VMContext;
3413 const ItaniumCXXABI &CXXABI; // Per-module state.
3414
3415 /// Fields - The fields of the RTTI descriptor currently being built.
3416 SmallVector<llvm::Constant *, 16> Fields;
3417
3418 /// GetAddrOfTypeName - Returns the mangled type name of the given type.
3419 llvm::GlobalVariable *
3420 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
3421
3422 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
3423 /// descriptor of the given type.
3424 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
3425
3426 /// BuildVTablePointer - Build the vtable pointer for the given type.
3427 void BuildVTablePointer(const Type *Ty);
3428
3429 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3430 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
3431 void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
3432
3433 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3434 /// classes with bases that do not satisfy the abi::__si_class_type_info
3435 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3436 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
3437
3438 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
3439 /// for pointer types.
3440 void BuildPointerTypeInfo(QualType PointeeTy);
3441
3442 /// BuildObjCObjectTypeInfo - Build the appropriate kind of
3443 /// type_info for an object type.
3444 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
3445
3446 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3447 /// struct, used for member pointer types.
3448 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
3449
3450public:
3451 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
3452 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
3453
3454 // Pointer type info flags.
3455 enum {
3456 /// PTI_Const - Type has const qualifier.
3457 PTI_Const = 0x1,
3458
3459 /// PTI_Volatile - Type has volatile qualifier.
3460 PTI_Volatile = 0x2,
3461
3462 /// PTI_Restrict - Type has restrict qualifier.
3463 PTI_Restrict = 0x4,
3464
3465 /// PTI_Incomplete - Type is incomplete.
3466 PTI_Incomplete = 0x8,
3467
3468 /// PTI_ContainingClassIncomplete - Containing class is incomplete.
3469 /// (in pointer to member).
3470 PTI_ContainingClassIncomplete = 0x10,
3471
3472 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
3473 //PTI_TransactionSafe = 0x20,
3474
3475 /// PTI_Noexcept - Pointee is noexcept function (C++1z).
3476 PTI_Noexcept = 0x40,
3477 };
3478
3479 // VMI type info flags.
3480 enum {
3481 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
3482 VMI_NonDiamondRepeat = 0x1,
3483
3484 /// VMI_DiamondShaped - Class is diamond shaped.
3485 VMI_DiamondShaped = 0x2
3486 };
3487
3488 // Base class type info flags.
3489 enum {
3490 /// BCTI_Virtual - Base class is virtual.
3491 BCTI_Virtual = 0x1,
3492
3493 /// BCTI_Public - Base class is public.
3494 BCTI_Public = 0x2
3495 };
3496
3497 /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
3498 /// link to an existing RTTI descriptor if one already exists.
3499 llvm::Constant *BuildTypeInfo(QualType Ty);
3500
3501 /// BuildTypeInfo - Build the RTTI type info struct for the given type.
3502 llvm::Constant *BuildTypeInfo(
3503 QualType Ty,
3504 llvm::GlobalVariable::LinkageTypes Linkage,
3505 llvm::GlobalValue::VisibilityTypes Visibility,
3506 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
3507};
3508}
3509
3510llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
3511 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
3512 SmallString<256> Name;
3513 llvm::raw_svector_ostream Out(Name);
3514 CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(T: Ty, Out);
3515
3516 // We know that the mangled name of the type starts at index 4 of the
3517 // mangled name of the typename, so we can just index into it in order to
3518 // get the mangled name of the type.
3519 llvm::Constant *Init = llvm::ConstantDataArray::getString(Context&: VMContext,
3520 Initializer: Name.substr(Start: 4));
3521 auto Align = CGM.getContext().getTypeAlignInChars(T: CGM.getContext().CharTy);
3522
3523 llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
3524 Name, Ty: Init->getType(), Linkage, Alignment: Align.getAsAlign());
3525
3526 GV->setInitializer(Init);
3527
3528 return GV;
3529}
3530
3531llvm::Constant *
3532ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
3533 // Mangle the RTTI name.
3534 SmallString<256> Name;
3535 llvm::raw_svector_ostream Out(Name);
3536 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(T: Ty, Out);
3537
3538 // Look for an existing global.
3539 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
3540
3541 if (!GV) {
3542 // Create a new global variable.
3543 // Note for the future: If we would ever like to do deferred emission of
3544 // RTTI, check if emitting vtables opportunistically need any adjustment.
3545
3546 GV = new llvm::GlobalVariable(
3547 CGM.getModule(), CGM.GlobalsInt8PtrTy,
3548 /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr, Name);
3549 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
3550 CGM.setGVProperties(GV, D: RD);
3551 // Import the typeinfo symbol when all non-inline virtual methods are
3552 // imported.
3553 if (CGM.getTarget().hasPS4DLLImportExport()) {
3554 if (RD && CXXRecordNonInlineHasAttr<DLLImportAttr>(RD)) {
3555 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
3556 CGM.setDSOLocal(GV);
3557 }
3558 }
3559 }
3560
3561 return GV;
3562}
3563
3564/// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
3565/// info for that type is defined in the standard library.
3566static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
3567 // Itanium C++ ABI 2.9.2:
3568 // Basic type information (e.g. for "int", "bool", etc.) will be kept in
3569 // the run-time support library. Specifically, the run-time support
3570 // library should contain type_info objects for the types X, X* and
3571 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
3572 // unsigned char, signed char, short, unsigned short, int, unsigned int,
3573 // long, unsigned long, long long, unsigned long long, float, double,
3574 // long double, char16_t, char32_t, and the IEEE 754r decimal and
3575 // half-precision floating point types.
3576 //
3577 // GCC also emits RTTI for __int128.
3578 // FIXME: We do not emit RTTI information for decimal types here.
3579
3580 // Types added here must also be added to EmitFundamentalRTTIDescriptors.
3581 switch (Ty->getKind()) {
3582 case BuiltinType::Void:
3583 case BuiltinType::NullPtr:
3584 case BuiltinType::Bool:
3585 case BuiltinType::WChar_S:
3586 case BuiltinType::WChar_U:
3587 case BuiltinType::Char_U:
3588 case BuiltinType::Char_S:
3589 case BuiltinType::UChar:
3590 case BuiltinType::SChar:
3591 case BuiltinType::Short:
3592 case BuiltinType::UShort:
3593 case BuiltinType::Int:
3594 case BuiltinType::UInt:
3595 case BuiltinType::Long:
3596 case BuiltinType::ULong:
3597 case BuiltinType::LongLong:
3598 case BuiltinType::ULongLong:
3599 case BuiltinType::Half:
3600 case BuiltinType::Float:
3601 case BuiltinType::Double:
3602 case BuiltinType::LongDouble:
3603 case BuiltinType::Float16:
3604 case BuiltinType::Float128:
3605 case BuiltinType::Ibm128:
3606 case BuiltinType::Char8:
3607 case BuiltinType::Char16:
3608 case BuiltinType::Char32:
3609 case BuiltinType::Int128:
3610 case BuiltinType::UInt128:
3611 return true;
3612
3613#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3614 case BuiltinType::Id:
3615#include "clang/Basic/OpenCLImageTypes.def"
3616#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3617 case BuiltinType::Id:
3618#include "clang/Basic/OpenCLExtensionTypes.def"
3619 case BuiltinType::OCLSampler:
3620 case BuiltinType::OCLEvent:
3621 case BuiltinType::OCLClkEvent:
3622 case BuiltinType::OCLQueue:
3623 case BuiltinType::OCLReserveID:
3624#define SVE_TYPE(Name, Id, SingletonId) \
3625 case BuiltinType::Id:
3626#include "clang/Basic/AArch64SVEACLETypes.def"
3627#define PPC_VECTOR_TYPE(Name, Id, Size) \
3628 case BuiltinType::Id:
3629#include "clang/Basic/PPCTypes.def"
3630#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3631#include "clang/Basic/RISCVVTypes.def"
3632#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3633#include "clang/Basic/WebAssemblyReferenceTypes.def"
3634#define AMDGPU_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3635#include "clang/Basic/AMDGPUTypes.def"
3636 case BuiltinType::ShortAccum:
3637 case BuiltinType::Accum:
3638 case BuiltinType::LongAccum:
3639 case BuiltinType::UShortAccum:
3640 case BuiltinType::UAccum:
3641 case BuiltinType::ULongAccum:
3642 case BuiltinType::ShortFract:
3643 case BuiltinType::Fract:
3644 case BuiltinType::LongFract:
3645 case BuiltinType::UShortFract:
3646 case BuiltinType::UFract:
3647 case BuiltinType::ULongFract:
3648 case BuiltinType::SatShortAccum:
3649 case BuiltinType::SatAccum:
3650 case BuiltinType::SatLongAccum:
3651 case BuiltinType::SatUShortAccum:
3652 case BuiltinType::SatUAccum:
3653 case BuiltinType::SatULongAccum:
3654 case BuiltinType::SatShortFract:
3655 case BuiltinType::SatFract:
3656 case BuiltinType::SatLongFract:
3657 case BuiltinType::SatUShortFract:
3658 case BuiltinType::SatUFract:
3659 case BuiltinType::SatULongFract:
3660 case BuiltinType::BFloat16:
3661 return false;
3662
3663 case BuiltinType::Dependent:
3664#define BUILTIN_TYPE(Id, SingletonId)
3665#define PLACEHOLDER_TYPE(Id, SingletonId) \
3666 case BuiltinType::Id:
3667#include "clang/AST/BuiltinTypes.def"
3668 llvm_unreachable("asking for RRTI for a placeholder type!");
3669
3670 case BuiltinType::ObjCId:
3671 case BuiltinType::ObjCClass:
3672 case BuiltinType::ObjCSel:
3673 llvm_unreachable("FIXME: Objective-C types are unsupported!");
3674 }
3675
3676 llvm_unreachable("Invalid BuiltinType Kind!");
3677}
3678
3679static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3680 QualType PointeeTy = PointerTy->getPointeeType();
3681 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Val&: PointeeTy);
3682 if (!BuiltinTy)
3683 return false;
3684
3685 // Check the qualifiers.
3686 Qualifiers Quals = PointeeTy.getQualifiers();
3687 Quals.removeConst();
3688
3689 if (!Quals.empty())
3690 return false;
3691
3692 return TypeInfoIsInStandardLibrary(Ty: BuiltinTy);
3693}
3694
3695/// IsStandardLibraryRTTIDescriptor - Returns whether the type
3696/// information for the given type exists in the standard library.
3697static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
3698 // Type info for builtin types is defined in the standard library.
3699 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Val&: Ty))
3700 return TypeInfoIsInStandardLibrary(Ty: BuiltinTy);
3701
3702 // Type info for some pointer types to builtin types is defined in the
3703 // standard library.
3704 if (const PointerType *PointerTy = dyn_cast<PointerType>(Val&: Ty))
3705 return TypeInfoIsInStandardLibrary(PointerTy);
3706
3707 return false;
3708}
3709
3710/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3711/// the given type exists somewhere else, and that we should not emit the type
3712/// information in this translation unit. Assumes that it is not a
3713/// standard-library type.
3714static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
3715 QualType Ty) {
3716 ASTContext &Context = CGM.getContext();
3717
3718 // If RTTI is disabled, assume it might be disabled in the
3719 // translation unit that defines any potential key function, too.
3720 if (!Context.getLangOpts().RTTI) return false;
3721
3722 if (const RecordType *RecordTy = dyn_cast<RecordType>(Val&: Ty)) {
3723 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Val: RecordTy->getDecl());
3724 if (!RD->hasDefinition())
3725 return false;
3726
3727 if (!RD->isDynamicClass())
3728 return false;
3729
3730 // FIXME: this may need to be reconsidered if the key function
3731 // changes.
3732 // N.B. We must always emit the RTTI data ourselves if there exists a key
3733 // function.
3734 bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3735
3736 // Don't import the RTTI but emit it locally.
3737 if (CGM.getTriple().isWindowsGNUEnvironment())
3738 return false;
3739
3740 if (CGM.getVTables().isVTableExternal(RD)) {
3741 if (CGM.getTarget().hasPS4DLLImportExport())
3742 return true;
3743
3744 return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
3745 ? false
3746 : true;
3747 }
3748 if (IsDLLImport)
3749 return true;
3750 }
3751
3752 return false;
3753}
3754
3755/// IsIncompleteClassType - Returns whether the given record type is incomplete.
3756static bool IsIncompleteClassType(const RecordType *RecordTy) {
3757 return !RecordTy->getDecl()->isCompleteDefinition();
3758}
3759
3760/// ContainsIncompleteClassType - Returns whether the given type contains an
3761/// incomplete class type. This is true if
3762///
3763/// * The given type is an incomplete class type.
3764/// * The given type is a pointer type whose pointee type contains an
3765/// incomplete class type.
3766/// * The given type is a member pointer type whose class is an incomplete
3767/// class type.
3768/// * The given type is a member pointer type whoise pointee type contains an
3769/// incomplete class type.
3770/// is an indirect or direct pointer to an incomplete class type.
3771static bool ContainsIncompleteClassType(QualType Ty) {
3772 if (const RecordType *RecordTy = dyn_cast<RecordType>(Val&: Ty)) {
3773 if (IsIncompleteClassType(RecordTy))
3774 return true;
3775 }
3776
3777 if (const PointerType *PointerTy = dyn_cast<PointerType>(Val&: Ty))
3778 return ContainsIncompleteClassType(Ty: PointerTy->getPointeeType());
3779
3780 if (const MemberPointerType *MemberPointerTy =
3781 dyn_cast<MemberPointerType>(Val&: Ty)) {
3782 // Check if the class type is incomplete.
3783 const RecordType *ClassType = cast<RecordType>(Val: MemberPointerTy->getClass());
3784 if (IsIncompleteClassType(RecordTy: ClassType))
3785 return true;
3786
3787 return ContainsIncompleteClassType(Ty: MemberPointerTy->getPointeeType());
3788 }
3789
3790 return false;
3791}
3792
3793// CanUseSingleInheritance - Return whether the given record decl has a "single,
3794// public, non-virtual base at offset zero (i.e. the derived class is dynamic
3795// iff the base is)", according to Itanium C++ ABI, 2.95p6b.
3796static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
3797 // Check the number of bases.
3798 if (RD->getNumBases() != 1)
3799 return false;
3800
3801 // Get the base.
3802 CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
3803
3804 // Check that the base is not virtual.
3805 if (Base->isVirtual())
3806 return false;
3807
3808 // Check that the base is public.
3809 if (Base->getAccessSpecifier() != AS_public)
3810 return false;
3811
3812 // Check that the class is dynamic iff the base is.
3813 auto *BaseDecl =
3814 cast<CXXRecordDecl>(Val: Base->getType()->castAs<RecordType>()->getDecl());
3815 if (!BaseDecl->isEmpty() &&
3816 BaseDecl->isDynamicClass() != RD->isDynamicClass())
3817 return false;
3818
3819 return true;
3820}
3821
3822void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
3823 // abi::__class_type_info.
3824 static const char * const ClassTypeInfo =
3825 "_ZTVN10__cxxabiv117__class_type_infoE";
3826 // abi::__si_class_type_info.
3827 static const char * const SIClassTypeInfo =
3828 "_ZTVN10__cxxabiv120__si_class_type_infoE";
3829 // abi::__vmi_class_type_info.
3830 static const char * const VMIClassTypeInfo =
3831 "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3832
3833 const char *VTableName = nullptr;
3834
3835 switch (Ty->getTypeClass()) {
3836#define TYPE(Class, Base)
3837#define ABSTRACT_TYPE(Class, Base)
3838#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3839#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3840#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3841#include "clang/AST/TypeNodes.inc"
3842 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3843
3844 case Type::LValueReference:
3845 case Type::RValueReference:
3846 llvm_unreachable("References shouldn't get here");
3847
3848 case Type::Auto:
3849 case Type::DeducedTemplateSpecialization:
3850 llvm_unreachable("Undeduced type shouldn't get here");
3851
3852 case Type::Pipe:
3853 llvm_unreachable("Pipe types shouldn't get here");
3854
3855 case Type::ArrayParameter:
3856 llvm_unreachable("Array Parameter types should not get here.");
3857
3858 case Type::Builtin:
3859 case Type::BitInt:
3860 // GCC treats vector and complex types as fundamental types.
3861 case Type::Vector:
3862 case Type::ExtVector:
3863 case Type::ConstantMatrix:
3864 case Type::Complex:
3865 case Type::Atomic:
3866 // FIXME: GCC treats block pointers as fundamental types?!
3867 case Type::BlockPointer:
3868 // abi::__fundamental_type_info.
3869 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3870 break;
3871
3872 case Type::ConstantArray:
3873 case Type::IncompleteArray:
3874 case Type::VariableArray:
3875 // abi::__array_type_info.
3876 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3877 break;
3878
3879 case Type::FunctionNoProto:
3880 case Type::FunctionProto:
3881 // abi::__function_type_info.
3882 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3883 break;
3884
3885 case Type::Enum:
3886 // abi::__enum_type_info.
3887 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3888 break;
3889
3890 case Type::Record: {
3891 const CXXRecordDecl *RD =
3892 cast<CXXRecordDecl>(Val: cast<RecordType>(Val: Ty)->getDecl());
3893
3894 if (!RD->hasDefinition() || !RD->getNumBases()) {
3895 VTableName = ClassTypeInfo;
3896 } else if (CanUseSingleInheritance(RD)) {
3897 VTableName = SIClassTypeInfo;
3898 } else {
3899 VTableName = VMIClassTypeInfo;
3900 }
3901
3902 break;
3903 }
3904
3905 case Type::ObjCObject:
3906 // Ignore protocol qualifiers.
3907 Ty = cast<ObjCObjectType>(Val: Ty)->getBaseType().getTypePtr();
3908
3909 // Handle id and Class.
3910 if (isa<BuiltinType>(Val: Ty)) {
3911 VTableName = ClassTypeInfo;
3912 break;
3913 }
3914
3915 assert(isa<ObjCInterfaceType>(Ty));
3916 [[fallthrough]];
3917
3918 case Type::ObjCInterface:
3919 if (cast<ObjCInterfaceType>(Val: Ty)->getDecl()->getSuperClass()) {
3920 VTableName = SIClassTypeInfo;
3921 } else {
3922 VTableName = ClassTypeInfo;
3923 }
3924 break;
3925
3926 case Type::ObjCObjectPointer:
3927 case Type::Pointer:
3928 // abi::__pointer_type_info.
3929 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3930 break;
3931
3932 case Type::MemberPointer:
3933 // abi::__pointer_to_member_type_info.
3934 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3935 break;
3936 }
3937
3938 llvm::Constant *VTable = nullptr;
3939
3940 // Check if the alias exists. If it doesn't, then get or create the global.
3941 if (CGM.getItaniumVTableContext().isRelativeLayout())
3942 VTable = CGM.getModule().getNamedAlias(Name: VTableName);
3943 if (!VTable) {
3944 llvm::Type *Ty = llvm::ArrayType::get(ElementType: CGM.GlobalsInt8PtrTy, NumElements: 0);
3945 VTable = CGM.getModule().getOrInsertGlobal(Name: VTableName, Ty);
3946 }
3947
3948 CGM.setDSOLocal(cast<llvm::GlobalValue>(Val: VTable->stripPointerCasts()));
3949
3950 llvm::Type *PtrDiffTy =
3951 CGM.getTypes().ConvertType(T: CGM.getContext().getPointerDiffType());
3952
3953 // The vtable address point is 2.
3954 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
3955 // The vtable address point is 8 bytes after its start:
3956 // 4 for the offset to top + 4 for the relative offset to rtti.
3957 llvm::Constant *Eight = llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: 8);
3958 VTable =
3959 llvm::ConstantExpr::getInBoundsGetElementPtr(Ty: CGM.Int8Ty, C: VTable, Idx: Eight);
3960 } else {
3961 llvm::Constant *Two = llvm::ConstantInt::get(Ty: PtrDiffTy, V: 2);
3962 VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(Ty: CGM.GlobalsInt8PtrTy,
3963 C: VTable, Idx: Two);
3964 }
3965
3966 if (auto &Schema = CGM.getCodeGenOpts().PointerAuth.CXXTypeInfoVTablePointer)
3967 VTable = CGM.getConstantSignedPointer(Pointer: VTable, Schema, StorageAddress: nullptr, SchemaDecl: GlobalDecl(),
3968 SchemaType: QualType(Ty, 0));
3969
3970 Fields.push_back(Elt: VTable);
3971}
3972
3973/// Return the linkage that the type info and type info name constants
3974/// should have for the given type.
3975static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3976 QualType Ty) {
3977 // Itanium C++ ABI 2.9.5p7:
3978 // In addition, it and all of the intermediate abi::__pointer_type_info
3979 // structs in the chain down to the abi::__class_type_info for the
3980 // incomplete class type must be prevented from resolving to the
3981 // corresponding type_info structs for the complete class type, possibly
3982 // by making them local static objects. Finally, a dummy class RTTI is
3983 // generated for the incomplete type that will not resolve to the final
3984 // complete class RTTI (because the latter need not exist), possibly by
3985 // making it a local static object.
3986 if (ContainsIncompleteClassType(Ty))
3987 return llvm::GlobalValue::InternalLinkage;
3988
3989 switch (Ty->getLinkage()) {
3990 case Linkage::Invalid:
3991 llvm_unreachable("Linkage hasn't been computed!");
3992
3993 case Linkage::None:
3994 case Linkage::Internal:
3995 case Linkage::UniqueExternal:
3996 return llvm::GlobalValue::InternalLinkage;
3997
3998 case Linkage::VisibleNone:
3999 case Linkage::Module:
4000 case Linkage::External:
4001 // RTTI is not enabled, which means that this type info struct is going
4002 // to be used for exception handling. Give it linkonce_odr linkage.
4003 if (!CGM.getLangOpts().RTTI)
4004 return llvm::GlobalValue::LinkOnceODRLinkage;
4005
4006 if (const RecordType *Record = dyn_cast<RecordType>(Val&: Ty)) {
4007 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Val: Record->getDecl());
4008 if (RD->hasAttr<WeakAttr>())
4009 return llvm::GlobalValue::WeakODRLinkage;
4010 if (CGM.getTriple().isWindowsItaniumEnvironment())
4011 if (RD->hasAttr<DLLImportAttr>() &&
4012 ShouldUseExternalRTTIDescriptor(CGM, Ty))
4013 return llvm::GlobalValue::ExternalLinkage;
4014 // MinGW always uses LinkOnceODRLinkage for type info.
4015 if (RD->isDynamicClass() &&
4016 !CGM.getContext()
4017 .getTargetInfo()
4018 .getTriple()
4019 .isWindowsGNUEnvironment())
4020 return CGM.getVTableLinkage(RD);
4021 }
4022
4023 return llvm::GlobalValue::LinkOnceODRLinkage;
4024 }
4025
4026 llvm_unreachable("Invalid linkage!");
4027}
4028
4029llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
4030 // We want to operate on the canonical type.
4031 Ty = Ty.getCanonicalType();
4032
4033 // Check if we've already emitted an RTTI descriptor for this type.
4034 SmallString<256> Name;
4035 llvm::raw_svector_ostream Out(Name);
4036 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(T: Ty, Out);
4037
4038 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
4039 if (OldGV && !OldGV->isDeclaration()) {
4040 assert(!OldGV->hasAvailableExternallyLinkage() &&
4041 "available_externally typeinfos not yet implemented");
4042
4043 return OldGV;
4044 }
4045
4046 // Check if there is already an external RTTI descriptor for this type.
4047 if (IsStandardLibraryRTTIDescriptor(Ty) ||
4048 ShouldUseExternalRTTIDescriptor(CGM, Ty))
4049 return GetAddrOfExternalRTTIDescriptor(Ty);
4050
4051 // Emit the standard library with external linkage.
4052 llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
4053
4054 // Give the type_info object and name the formal visibility of the
4055 // type itself.
4056 llvm::GlobalValue::VisibilityTypes llvmVisibility;
4057 if (llvm::GlobalValue::isLocalLinkage(Linkage))
4058 // If the linkage is local, only default visibility makes sense.
4059 llvmVisibility = llvm::GlobalValue::DefaultVisibility;
4060 else if (CXXABI.classifyRTTIUniqueness(CanTy: Ty, Linkage) ==
4061 ItaniumCXXABI::RUK_NonUniqueHidden)
4062 llvmVisibility = llvm::GlobalValue::HiddenVisibility;
4063 else
4064 llvmVisibility = CodeGenModule::GetLLVMVisibility(V: Ty->getVisibility());
4065
4066 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4067 llvm::GlobalValue::DefaultStorageClass;
4068 if (auto RD = Ty->getAsCXXRecordDecl()) {
4069 if ((CGM.getTriple().isWindowsItaniumEnvironment() &&
4070 RD->hasAttr<DLLExportAttr>()) ||
4071 (CGM.shouldMapVisibilityToDLLExport(D: RD) &&
4072 !llvm::GlobalValue::isLocalLinkage(Linkage) &&
4073 llvmVisibility == llvm::GlobalValue::DefaultVisibility))
4074 DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
4075 }
4076 return BuildTypeInfo(Ty, Linkage, Visibility: llvmVisibility, DLLStorageClass);
4077}
4078
4079llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
4080 QualType Ty,
4081 llvm::GlobalVariable::LinkageTypes Linkage,
4082 llvm::GlobalValue::VisibilityTypes Visibility,
4083 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
4084 // Add the vtable pointer.
4085 BuildVTablePointer(Ty: cast<Type>(Val&: Ty));
4086
4087 // And the name.
4088 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
4089 llvm::Constant *TypeNameField;
4090
4091 // If we're supposed to demote the visibility, be sure to set a flag
4092 // to use a string comparison for type_info comparisons.
4093 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
4094 CXXABI.classifyRTTIUniqueness(CanTy: Ty, Linkage);
4095 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
4096 // The flag is the sign bit, which on ARM64 is defined to be clear
4097 // for global pointers. This is very ARM64-specific.
4098 TypeNameField = llvm::ConstantExpr::getPtrToInt(C: TypeName, Ty: CGM.Int64Ty);
4099 llvm::Constant *flag =
4100 llvm::ConstantInt::get(Ty: CGM.Int64Ty, V: ((uint64_t)1) << 63);
4101 TypeNameField = llvm::ConstantExpr::getAdd(C1: TypeNameField, C2: flag);
4102 TypeNameField =
4103 llvm::ConstantExpr::getIntToPtr(C: TypeNameField, Ty: CGM.GlobalsInt8PtrTy);
4104 } else {
4105 TypeNameField = TypeName;
4106 }
4107 Fields.push_back(Elt: TypeNameField);
4108
4109 switch (Ty->getTypeClass()) {
4110#define TYPE(Class, Base)
4111#define ABSTRACT_TYPE(Class, Base)
4112#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
4113#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
4114#define DEPENDENT_TYPE(Class, Base) case Type::Class:
4115#include "clang/AST/TypeNodes.inc"
4116 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
4117
4118 // GCC treats vector types as fundamental types.
4119 case Type::Builtin:
4120 case Type::Vector:
4121 case Type::ExtVector:
4122 case Type::ConstantMatrix:
4123 case Type::Complex:
4124 case Type::BlockPointer:
4125 // Itanium C++ ABI 2.9.5p4:
4126 // abi::__fundamental_type_info adds no data members to std::type_info.
4127 break;
4128
4129 case Type::LValueReference:
4130 case Type::RValueReference:
4131 llvm_unreachable("References shouldn't get here");
4132
4133 case Type::Auto:
4134 case Type::DeducedTemplateSpecialization:
4135 llvm_unreachable("Undeduced type shouldn't get here");
4136
4137 case Type::Pipe:
4138 break;
4139
4140 case Type::BitInt:
4141 break;
4142
4143 case Type::ConstantArray:
4144 case Type::IncompleteArray:
4145 case Type::VariableArray:
4146 case Type::ArrayParameter:
4147 // Itanium C++ ABI 2.9.5p5:
4148 // abi::__array_type_info adds no data members to std::type_info.
4149 break;
4150
4151 case Type::FunctionNoProto:
4152 case Type::FunctionProto:
4153 // Itanium C++ ABI 2.9.5p5:
4154 // abi::__function_type_info adds no data members to std::type_info.
4155 break;
4156
4157 case Type::Enum:
4158 // Itanium C++ ABI 2.9.5p5:
4159 // abi::__enum_type_info adds no data members to std::type_info.
4160 break;
4161
4162 case Type::Record: {
4163 const CXXRecordDecl *RD =
4164 cast<CXXRecordDecl>(Val: cast<RecordType>(Val&: Ty)->getDecl());
4165 if (!RD->hasDefinition() || !RD->getNumBases()) {
4166 // We don't need to emit any fields.
4167 break;
4168 }
4169
4170 if (CanUseSingleInheritance(RD))
4171 BuildSIClassTypeInfo(RD);
4172 else
4173 BuildVMIClassTypeInfo(RD);
4174
4175 break;
4176 }
4177
4178 case Type::ObjCObject:
4179 case Type::ObjCInterface:
4180 BuildObjCObjectTypeInfo(Ty: cast<ObjCObjectType>(Val&: Ty));
4181 break;
4182
4183 case Type::ObjCObjectPointer:
4184 BuildPointerTypeInfo(PointeeTy: cast<ObjCObjectPointerType>(Val&: Ty)->getPointeeType());
4185 break;
4186
4187 case Type::Pointer:
4188 BuildPointerTypeInfo(PointeeTy: cast<PointerType>(Val&: Ty)->getPointeeType());
4189 break;
4190
4191 case Type::MemberPointer:
4192 BuildPointerToMemberTypeInfo(Ty: cast<MemberPointerType>(Val&: Ty));
4193 break;
4194
4195 case Type::Atomic:
4196 // No fields, at least for the moment.
4197 break;
4198 }
4199
4200 llvm::Constant *Init = llvm::ConstantStruct::getAnon(V: Fields);
4201
4202 SmallString<256> Name;
4203 llvm::raw_svector_ostream Out(Name);
4204 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(T: Ty, Out);
4205 llvm::Module &M = CGM.getModule();
4206 llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
4207 llvm::GlobalVariable *GV =
4208 new llvm::GlobalVariable(M, Init->getType(),
4209 /*isConstant=*/true, Linkage, Init, Name);
4210
4211 // Export the typeinfo in the same circumstances as the vtable is exported.
4212 auto GVDLLStorageClass = DLLStorageClass;
4213 if (CGM.getTarget().hasPS4DLLImportExport() &&
4214 GVDLLStorageClass != llvm::GlobalVariable::DLLExportStorageClass) {
4215 if (const RecordType *RecordTy = dyn_cast<RecordType>(Val&: Ty)) {
4216 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Val: RecordTy->getDecl());
4217 if (RD->hasAttr<DLLExportAttr>() ||
4218 CXXRecordNonInlineHasAttr<DLLExportAttr>(RD))
4219 GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
4220 }
4221 }
4222
4223 // If there's already an old global variable, replace it with the new one.
4224 if (OldGV) {
4225 GV->takeName(V: OldGV);
4226 OldGV->replaceAllUsesWith(V: GV);
4227 OldGV->eraseFromParent();
4228 }
4229
4230 if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
4231 GV->setComdat(M.getOrInsertComdat(Name: GV->getName()));
4232
4233 CharUnits Align = CGM.getContext().toCharUnitsFromBits(
4234 BitSize: CGM.getTarget().getPointerAlign(AddrSpace: CGM.GetGlobalVarAddressSpace(D: nullptr)));
4235 GV->setAlignment(Align.getAsAlign());
4236
4237 // The Itanium ABI specifies that type_info objects must be globally
4238 // unique, with one exception: if the type is an incomplete class
4239 // type or a (possibly indirect) pointer to one. That exception
4240 // affects the general case of comparing type_info objects produced
4241 // by the typeid operator, which is why the comparison operators on
4242 // std::type_info generally use the type_info name pointers instead
4243 // of the object addresses. However, the language's built-in uses
4244 // of RTTI generally require class types to be complete, even when
4245 // manipulating pointers to those class types. This allows the
4246 // implementation of dynamic_cast to rely on address equality tests,
4247 // which is much faster.
4248
4249 // All of this is to say that it's important that both the type_info
4250 // object and the type_info name be uniqued when weakly emitted.
4251
4252 TypeName->setVisibility(Visibility);
4253 CGM.setDSOLocal(TypeName);
4254
4255 GV->setVisibility(Visibility);
4256 CGM.setDSOLocal(GV);
4257
4258 TypeName->setDLLStorageClass(DLLStorageClass);
4259 GV->setDLLStorageClass(GVDLLStorageClass);
4260
4261 TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
4262 GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
4263
4264 return GV;
4265}
4266
4267/// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
4268/// for the given Objective-C object type.
4269void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
4270 // Drop qualifiers.
4271 const Type *T = OT->getBaseType().getTypePtr();
4272 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
4273
4274 // The builtin types are abi::__class_type_infos and don't require
4275 // extra fields.
4276 if (isa<BuiltinType>(Val: T)) return;
4277
4278 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(Val: T)->getDecl();
4279 ObjCInterfaceDecl *Super = Class->getSuperClass();
4280
4281 // Root classes are also __class_type_info.
4282 if (!Super) return;
4283
4284 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Decl: Super);
4285
4286 // Everything else is single inheritance.
4287 llvm::Constant *BaseTypeInfo =
4288 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Ty: SuperTy);
4289 Fields.push_back(Elt: BaseTypeInfo);
4290}
4291
4292/// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
4293/// inheritance, according to the Itanium C++ ABI, 2.95p6b.
4294void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
4295 // Itanium C++ ABI 2.9.5p6b:
4296 // It adds to abi::__class_type_info a single member pointing to the
4297 // type_info structure for the base type,
4298 llvm::Constant *BaseTypeInfo =
4299 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Ty: RD->bases_begin()->getType());
4300 Fields.push_back(Elt: BaseTypeInfo);
4301}
4302
4303namespace {
4304 /// SeenBases - Contains virtual and non-virtual bases seen when traversing
4305 /// a class hierarchy.
4306 struct SeenBases {
4307 llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
4308 llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
4309 };
4310}
4311
4312/// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
4313/// abi::__vmi_class_type_info.
4314///
4315static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
4316 SeenBases &Bases) {
4317
4318 unsigned Flags = 0;
4319
4320 auto *BaseDecl =
4321 cast<CXXRecordDecl>(Val: Base->getType()->castAs<RecordType>()->getDecl());
4322
4323 if (Base->isVirtual()) {
4324 // Mark the virtual base as seen.
4325 if (!Bases.VirtualBases.insert(Ptr: BaseDecl).second) {
4326 // If this virtual base has been seen before, then the class is diamond
4327 // shaped.
4328 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
4329 } else {
4330 if (Bases.NonVirtualBases.count(Ptr: BaseDecl))
4331 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4332 }
4333 } else {
4334 // Mark the non-virtual base as seen.
4335 if (!Bases.NonVirtualBases.insert(Ptr: BaseDecl).second) {
4336 // If this non-virtual base has been seen before, then the class has non-
4337 // diamond shaped repeated inheritance.
4338 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4339 } else {
4340 if (Bases.VirtualBases.count(Ptr: BaseDecl))
4341 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4342 }
4343 }
4344
4345 // Walk all bases.
4346 for (const auto &I : BaseDecl->bases())
4347 Flags |= ComputeVMIClassTypeInfoFlags(Base: &I, Bases);
4348
4349 return Flags;
4350}
4351
4352static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
4353 unsigned Flags = 0;
4354 SeenBases Bases;
4355
4356 // Walk all bases.
4357 for (const auto &I : RD->bases())
4358 Flags |= ComputeVMIClassTypeInfoFlags(Base: &I, Bases);
4359
4360 return Flags;
4361}
4362
4363/// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
4364/// classes with bases that do not satisfy the abi::__si_class_type_info
4365/// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
4366void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
4367 llvm::Type *UnsignedIntLTy =
4368 CGM.getTypes().ConvertType(T: CGM.getContext().UnsignedIntTy);
4369
4370 // Itanium C++ ABI 2.9.5p6c:
4371 // __flags is a word with flags describing details about the class
4372 // structure, which may be referenced by using the __flags_masks
4373 // enumeration. These flags refer to both direct and indirect bases.
4374 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
4375 Fields.push_back(Elt: llvm::ConstantInt::get(Ty: UnsignedIntLTy, V: Flags));
4376
4377 // Itanium C++ ABI 2.9.5p6c:
4378 // __base_count is a word with the number of direct proper base class
4379 // descriptions that follow.
4380 Fields.push_back(Elt: llvm::ConstantInt::get(Ty: UnsignedIntLTy, V: RD->getNumBases()));
4381
4382 if (!RD->getNumBases())
4383 return;
4384
4385 // Now add the base class descriptions.
4386
4387 // Itanium C++ ABI 2.9.5p6c:
4388 // __base_info[] is an array of base class descriptions -- one for every
4389 // direct proper base. Each description is of the type:
4390 //
4391 // struct abi::__base_class_type_info {
4392 // public:
4393 // const __class_type_info *__base_type;
4394 // long __offset_flags;
4395 //
4396 // enum __offset_flags_masks {
4397 // __virtual_mask = 0x1,
4398 // __public_mask = 0x2,
4399 // __offset_shift = 8
4400 // };
4401 // };
4402
4403 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
4404 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
4405 // LLP64 platforms.
4406 // FIXME: Consider updating libc++abi to match, and extend this logic to all
4407 // LLP64 platforms.
4408 QualType OffsetFlagsTy = CGM.getContext().LongTy;
4409 const TargetInfo &TI = CGM.getContext().getTargetInfo();
4410 if (TI.getTriple().isOSCygMing() &&
4411 TI.getPointerWidth(AddrSpace: LangAS::Default) > TI.getLongWidth())
4412 OffsetFlagsTy = CGM.getContext().LongLongTy;
4413 llvm::Type *OffsetFlagsLTy =
4414 CGM.getTypes().ConvertType(T: OffsetFlagsTy);
4415
4416 for (const auto &Base : RD->bases()) {
4417 // The __base_type member points to the RTTI for the base type.
4418 Fields.push_back(Elt: ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Ty: Base.getType()));
4419
4420 auto *BaseDecl =
4421 cast<CXXRecordDecl>(Val: Base.getType()->castAs<RecordType>()->getDecl());
4422
4423 int64_t OffsetFlags = 0;
4424
4425 // All but the lower 8 bits of __offset_flags are a signed offset.
4426 // For a non-virtual base, this is the offset in the object of the base
4427 // subobject. For a virtual base, this is the offset in the virtual table of
4428 // the virtual base offset for the virtual base referenced (negative).
4429 CharUnits Offset;
4430 if (Base.isVirtual())
4431 Offset =
4432 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, VBase: BaseDecl);
4433 else {
4434 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(D: RD);
4435 Offset = Layout.getBaseClassOffset(Base: BaseDecl);
4436 };
4437
4438 OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
4439
4440 // The low-order byte of __offset_flags contains flags, as given by the
4441 // masks from the enumeration __offset_flags_masks.
4442 if (Base.isVirtual())
4443 OffsetFlags |= BCTI_Virtual;
4444 if (Base.getAccessSpecifier() == AS_public)
4445 OffsetFlags |= BCTI_Public;
4446
4447 Fields.push_back(Elt: llvm::ConstantInt::get(Ty: OffsetFlagsLTy, V: OffsetFlags));
4448 }
4449}
4450
4451/// Compute the flags for a __pbase_type_info, and remove the corresponding
4452/// pieces from \p Type.
4453static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
4454 unsigned Flags = 0;
4455
4456 if (Type.isConstQualified())
4457 Flags |= ItaniumRTTIBuilder::PTI_Const;
4458 if (Type.isVolatileQualified())
4459 Flags |= ItaniumRTTIBuilder::PTI_Volatile;
4460 if (Type.isRestrictQualified())
4461 Flags |= ItaniumRTTIBuilder::PTI_Restrict;
4462 Type = Type.getUnqualifiedType();
4463
4464 // Itanium C++ ABI 2.9.5p7:
4465 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
4466 // incomplete class type, the incomplete target type flag is set.
4467 if (ContainsIncompleteClassType(Ty: Type))
4468 Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
4469
4470 if (auto *Proto = Type->getAs<FunctionProtoType>()) {
4471 if (Proto->isNothrow()) {
4472 Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
4473 Type = Ctx.getFunctionTypeWithExceptionSpec(Orig: Type, ESI: EST_None);
4474 }
4475 }
4476
4477 return Flags;
4478}
4479
4480/// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
4481/// used for pointer types.
4482void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
4483 // Itanium C++ ABI 2.9.5p7:
4484 // __flags is a flag word describing the cv-qualification and other
4485 // attributes of the type pointed to
4486 unsigned Flags = extractPBaseFlags(Ctx&: CGM.getContext(), Type&: PointeeTy);
4487
4488 llvm::Type *UnsignedIntLTy =
4489 CGM.getTypes().ConvertType(T: CGM.getContext().UnsignedIntTy);
4490 Fields.push_back(Elt: llvm::ConstantInt::get(Ty: UnsignedIntLTy, V: Flags));
4491
4492 // Itanium C++ ABI 2.9.5p7:
4493 // __pointee is a pointer to the std::type_info derivation for the
4494 // unqualified type being pointed to.
4495 llvm::Constant *PointeeTypeInfo =
4496 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Ty: PointeeTy);
4497 Fields.push_back(Elt: PointeeTypeInfo);
4498}
4499
4500/// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
4501/// struct, used for member pointer types.
4502void
4503ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
4504 QualType PointeeTy = Ty->getPointeeType();
4505
4506 // Itanium C++ ABI 2.9.5p7:
4507 // __flags is a flag word describing the cv-qualification and other
4508 // attributes of the type pointed to.
4509 unsigned Flags = extractPBaseFlags(Ctx&: CGM.getContext(), Type&: PointeeTy);
4510
4511 const RecordType *ClassType = cast<RecordType>(Val: Ty->getClass());
4512 if (IsIncompleteClassType(RecordTy: ClassType))
4513 Flags |= PTI_ContainingClassIncomplete;
4514
4515 llvm::Type *UnsignedIntLTy =
4516 CGM.getTypes().ConvertType(T: CGM.getContext().UnsignedIntTy);
4517 Fields.push_back(Elt: llvm::ConstantInt::get(Ty: UnsignedIntLTy, V: Flags));
4518
4519 // Itanium C++ ABI 2.9.5p7:
4520 // __pointee is a pointer to the std::type_info derivation for the
4521 // unqualified type being pointed to.
4522 llvm::Constant *PointeeTypeInfo =
4523 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Ty: PointeeTy);
4524 Fields.push_back(Elt: PointeeTypeInfo);
4525
4526 // Itanium C++ ABI 2.9.5p9:
4527 // __context is a pointer to an abi::__class_type_info corresponding to the
4528 // class type containing the member pointed to
4529 // (e.g., the "A" in "int A::*").
4530 Fields.push_back(
4531 Elt: ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Ty: QualType(ClassType, 0)));
4532}
4533
4534llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
4535 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
4536}
4537
4538void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
4539 // Types added here must also be added to TypeInfoIsInStandardLibrary.
4540 QualType FundamentalTypes[] = {
4541 getContext().VoidTy, getContext().NullPtrTy,
4542 getContext().BoolTy, getContext().WCharTy,
4543 getContext().CharTy, getContext().UnsignedCharTy,
4544 getContext().SignedCharTy, getContext().ShortTy,
4545 getContext().UnsignedShortTy, getContext().IntTy,
4546 getContext().UnsignedIntTy, getContext().LongTy,
4547 getContext().UnsignedLongTy, getContext().LongLongTy,
4548 getContext().UnsignedLongLongTy, getContext().Int128Ty,
4549 getContext().UnsignedInt128Ty, getContext().HalfTy,
4550 getContext().FloatTy, getContext().DoubleTy,
4551 getContext().LongDoubleTy, getContext().Float128Ty,
4552 getContext().Char8Ty, getContext().Char16Ty,
4553 getContext().Char32Ty
4554 };
4555 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4556 RD->hasAttr<DLLExportAttr>() || CGM.shouldMapVisibilityToDLLExport(D: RD)
4557 ? llvm::GlobalValue::DLLExportStorageClass
4558 : llvm::GlobalValue::DefaultStorageClass;
4559 llvm::GlobalValue::VisibilityTypes Visibility =
4560 CodeGenModule::GetLLVMVisibility(V: RD->getVisibility());
4561 for (const QualType &FundamentalType : FundamentalTypes) {
4562 QualType PointerType = getContext().getPointerType(T: FundamentalType);
4563 QualType PointerTypeConst = getContext().getPointerType(
4564 T: FundamentalType.withConst());
4565 for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
4566 ItaniumRTTIBuilder(*this).BuildTypeInfo(
4567 Ty: Type, Linkage: llvm::GlobalValue::ExternalLinkage,
4568 Visibility, DLLStorageClass);
4569 }
4570}
4571
4572/// What sort of uniqueness rules should we use for the RTTI for the
4573/// given type?
4574ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
4575 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
4576 if (shouldRTTIBeUnique())
4577 return RUK_Unique;
4578
4579 // It's only necessary for linkonce_odr or weak_odr linkage.
4580 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
4581 Linkage != llvm::GlobalValue::WeakODRLinkage)
4582 return RUK_Unique;
4583
4584 // It's only necessary with default visibility.
4585 if (CanTy->getVisibility() != DefaultVisibility)
4586 return RUK_Unique;
4587
4588 // If we're not required to publish this symbol, hide it.
4589 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4590 return RUK_NonUniqueHidden;
4591
4592 // If we're required to publish this symbol, as we might be under an
4593 // explicit instantiation, leave it with default visibility but
4594 // enable string-comparisons.
4595 assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
4596 return RUK_NonUniqueVisible;
4597}
4598
4599// Find out how to codegen the complete destructor and constructor
4600namespace {
4601enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
4602}
4603static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
4604 const CXXMethodDecl *MD) {
4605 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
4606 return StructorCodegen::Emit;
4607
4608 // The complete and base structors are not equivalent if there are any virtual
4609 // bases, so emit separate functions.
4610 if (MD->getParent()->getNumVBases())
4611 return StructorCodegen::Emit;
4612
4613 GlobalDecl AliasDecl;
4614 if (const auto *DD = dyn_cast<CXXDestructorDecl>(Val: MD)) {
4615 AliasDecl = GlobalDecl(DD, Dtor_Complete);
4616 } else {
4617 const auto *CD = cast<CXXConstructorDecl>(Val: MD);
4618 AliasDecl = GlobalDecl(CD, Ctor_Complete);
4619 }
4620 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(GD: AliasDecl);
4621
4622 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
4623 return StructorCodegen::RAUW;
4624
4625 // FIXME: Should we allow available_externally aliases?
4626 if (!llvm::GlobalAlias::isValidLinkage(L: Linkage))
4627 return StructorCodegen::RAUW;
4628
4629 if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
4630 // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
4631 if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
4632 CGM.getTarget().getTriple().isOSBinFormatWasm())
4633 return StructorCodegen::COMDAT;
4634 return StructorCodegen::Emit;
4635 }
4636
4637 return StructorCodegen::Alias;
4638}
4639
4640static void emitConstructorDestructorAlias(CodeGenModule &CGM,
4641 GlobalDecl AliasDecl,
4642 GlobalDecl TargetDecl) {
4643 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(GD: AliasDecl);
4644
4645 StringRef MangledName = CGM.getMangledName(GD: AliasDecl);
4646 llvm::GlobalValue *Entry = CGM.GetGlobalValue(Ref: MangledName);
4647 if (Entry && !Entry->isDeclaration())
4648 return;
4649
4650 auto *Aliasee = cast<llvm::GlobalValue>(Val: CGM.GetAddrOfGlobal(GD: TargetDecl));
4651
4652 // Create the alias with no name.
4653 auto *Alias = llvm::GlobalAlias::create(Linkage, Name: "", Aliasee);
4654
4655 // Constructors and destructors are always unnamed_addr.
4656 Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4657
4658 // Switch any previous uses to the alias.
4659 if (Entry) {
4660 assert(Entry->getType() == Aliasee->getType() &&
4661 "declaration exists with different type");
4662 Alias->takeName(V: Entry);
4663 Entry->replaceAllUsesWith(V: Alias);
4664 Entry->eraseFromParent();
4665 } else {
4666 Alias->setName(MangledName);
4667 }
4668
4669 // Finally, set up the alias with its proper name and attributes.
4670 CGM.SetCommonAttributes(GD: AliasDecl, GV: Alias);
4671}
4672
4673void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
4674 auto *MD = cast<CXXMethodDecl>(Val: GD.getDecl());
4675 auto *CD = dyn_cast<CXXConstructorDecl>(Val: MD);
4676 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(Val: MD);
4677
4678 StructorCodegen CGType = getCodegenToUse(CGM, MD);
4679
4680 if (CD ? GD.getCtorType() == Ctor_Complete
4681 : GD.getDtorType() == Dtor_Complete) {
4682 GlobalDecl BaseDecl;
4683 if (CD)
4684 BaseDecl = GD.getWithCtorType(Type: Ctor_Base);
4685 else
4686 BaseDecl = GD.getWithDtorType(Type: Dtor_Base);
4687
4688 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
4689 emitConstructorDestructorAlias(CGM, AliasDecl: GD, TargetDecl: BaseDecl);
4690 return;
4691 }
4692
4693 if (CGType == StructorCodegen::RAUW) {
4694 StringRef MangledName = CGM.getMangledName(GD);
4695 auto *Aliasee = CGM.GetAddrOfGlobal(GD: BaseDecl);
4696 CGM.addReplacement(Name: MangledName, C: Aliasee);
4697 return;
4698 }
4699 }
4700
4701 // The base destructor is equivalent to the base destructor of its
4702 // base class if there is exactly one non-virtual base class with a
4703 // non-trivial destructor, there are no fields with a non-trivial
4704 // destructor, and the body of the destructor is trivial.
4705 if (DD && GD.getDtorType() == Dtor_Base &&
4706 CGType != StructorCodegen::COMDAT &&
4707 !CGM.TryEmitBaseDestructorAsAlias(D: DD))
4708 return;
4709
4710 // FIXME: The deleting destructor is equivalent to the selected operator
4711 // delete if:
4712 // * either the delete is a destroying operator delete or the destructor
4713 // would be trivial if it weren't virtual,
4714 // * the conversion from the 'this' parameter to the first parameter of the
4715 // destructor is equivalent to a bitcast,
4716 // * the destructor does not have an implicit "this" return, and
4717 // * the operator delete has the same calling convention and IR function type
4718 // as the destructor.
4719 // In such cases we should try to emit the deleting dtor as an alias to the
4720 // selected 'operator delete'.
4721
4722 llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4723
4724 if (CGType == StructorCodegen::COMDAT) {
4725 SmallString<256> Buffer;
4726 llvm::raw_svector_ostream Out(Buffer);
4727 if (DD)
4728 getMangleContext().mangleCXXDtorComdat(D: DD, Out);
4729 else
4730 getMangleContext().mangleCXXCtorComdat(D: CD, Out);
4731 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Name: Out.str());
4732 Fn->setComdat(C);
4733 } else {
4734 CGM.maybeSetTrivialComdat(D: *MD, GO&: *Fn);
4735 }
4736}
4737
4738static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4739 // void *__cxa_begin_catch(void*);
4740 llvm::FunctionType *FTy = llvm::FunctionType::get(
4741 Result: CGM.Int8PtrTy, Params: CGM.Int8PtrTy, /*isVarArg=*/false);
4742
4743 return CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_begin_catch");
4744}
4745
4746static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4747 // void __cxa_end_catch();
4748 llvm::FunctionType *FTy =
4749 llvm::FunctionType::get(Result: CGM.VoidTy, /*isVarArg=*/false);
4750
4751 return CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_end_catch");
4752}
4753
4754static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4755 // void *__cxa_get_exception_ptr(void*);
4756 llvm::FunctionType *FTy = llvm::FunctionType::get(
4757 Result: CGM.Int8PtrTy, Params: CGM.Int8PtrTy, /*isVarArg=*/false);
4758
4759 return CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cxa_get_exception_ptr");
4760}
4761
4762namespace {
4763 /// A cleanup to call __cxa_end_catch. In many cases, the caught
4764 /// exception type lets us state definitively that the thrown exception
4765 /// type does not have a destructor. In particular:
4766 /// - Catch-alls tell us nothing, so we have to conservatively
4767 /// assume that the thrown exception might have a destructor.
4768 /// - Catches by reference behave according to their base types.
4769 /// - Catches of non-record types will only trigger for exceptions
4770 /// of non-record types, which never have destructors.
4771 /// - Catches of record types can trigger for arbitrary subclasses
4772 /// of the caught type, so we have to assume the actual thrown
4773 /// exception type might have a throwing destructor, even if the
4774 /// caught type's destructor is trivial or nothrow.
4775 struct CallEndCatch final : EHScopeStack::Cleanup {
4776 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4777 bool MightThrow;
4778
4779 void Emit(CodeGenFunction &CGF, Flags flags) override {
4780 if (!MightThrow) {
4781 CGF.EmitNounwindRuntimeCall(callee: getEndCatchFn(CGM&: CGF.CGM));
4782 return;
4783 }
4784
4785 CGF.EmitRuntimeCallOrInvoke(callee: getEndCatchFn(CGM&: CGF.CGM));
4786 }
4787 };
4788}
4789
4790/// Emits a call to __cxa_begin_catch and enters a cleanup to call
4791/// __cxa_end_catch. If -fassume-nothrow-exception-dtor is specified, we assume
4792/// that the exception object's dtor is nothrow, therefore the __cxa_end_catch
4793/// call can be marked as nounwind even if EndMightThrow is true.
4794///
4795/// \param EndMightThrow - true if __cxa_end_catch might throw
4796static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4797 llvm::Value *Exn,
4798 bool EndMightThrow) {
4799 llvm::CallInst *call =
4800 CGF.EmitNounwindRuntimeCall(callee: getBeginCatchFn(CGM&: CGF.CGM), args: Exn);
4801
4802 CGF.EHStack.pushCleanup<CallEndCatch>(
4803 Kind: NormalAndEHCleanup,
4804 A: EndMightThrow && !CGF.CGM.getLangOpts().AssumeNothrowExceptionDtor);
4805
4806 return call;
4807}
4808
4809/// A "special initializer" callback for initializing a catch
4810/// parameter during catch initialization.
4811static void InitCatchParam(CodeGenFunction &CGF,
4812 const VarDecl &CatchParam,
4813 Address ParamAddr,
4814 SourceLocation Loc) {
4815 // Load the exception from where the landing pad saved it.
4816 llvm::Value *Exn = CGF.getExceptionFromSlot();
4817
4818 CanQualType CatchType =
4819 CGF.CGM.getContext().getCanonicalType(T: CatchParam.getType());
4820 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(T: CatchType);
4821
4822 // If we're catching by reference, we can just cast the object
4823 // pointer to the appropriate pointer.
4824 if (isa<ReferenceType>(Val: CatchType)) {
4825 QualType CaughtType = cast<ReferenceType>(Val&: CatchType)->getPointeeType();
4826 bool EndCatchMightThrow = CaughtType->isRecordType();
4827
4828 // __cxa_begin_catch returns the adjusted object pointer.
4829 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndMightThrow: EndCatchMightThrow);
4830
4831 // We have no way to tell the personality function that we're
4832 // catching by reference, so if we're catching a pointer,
4833 // __cxa_begin_catch will actually return that pointer by value.
4834 if (const PointerType *PT = dyn_cast<PointerType>(Val&: CaughtType)) {
4835 QualType PointeeType = PT->getPointeeType();
4836
4837 // When catching by reference, generally we should just ignore
4838 // this by-value pointer and use the exception object instead.
4839 if (!PointeeType->isRecordType()) {
4840
4841 // Exn points to the struct _Unwind_Exception header, which
4842 // we have to skip past in order to reach the exception data.
4843 unsigned HeaderSize =
4844 CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
4845 AdjustedExn =
4846 CGF.Builder.CreateConstGEP1_32(Ty: CGF.Int8Ty, Ptr: Exn, Idx0: HeaderSize);
4847
4848 // However, if we're catching a pointer-to-record type that won't
4849 // work, because the personality function might have adjusted
4850 // the pointer. There's actually no way for us to fully satisfy
4851 // the language/ABI contract here: we can't use Exn because it
4852 // might have the wrong adjustment, but we can't use the by-value
4853 // pointer because it's off by a level of abstraction.
4854 //
4855 // The current solution is to dump the adjusted pointer into an
4856 // alloca, which breaks language semantics (because changing the
4857 // pointer doesn't change the exception) but at least works.
4858 // The better solution would be to filter out non-exact matches
4859 // and rethrow them, but this is tricky because the rethrow
4860 // really needs to be catchable by other sites at this landing
4861 // pad. The best solution is to fix the personality function.
4862 } else {
4863 // Pull the pointer for the reference type off.
4864 llvm::Type *PtrTy = CGF.ConvertTypeForMem(T: CaughtType);
4865
4866 // Create the temporary and write the adjusted pointer into it.
4867 Address ExnPtrTmp =
4868 CGF.CreateTempAlloca(Ty: PtrTy, align: CGF.getPointerAlign(), Name: "exn.byref.tmp");
4869 llvm::Value *Casted = CGF.Builder.CreateBitCast(V: AdjustedExn, DestTy: PtrTy);
4870 CGF.Builder.CreateStore(Val: Casted, Addr: ExnPtrTmp);
4871
4872 // Bind the reference to the temporary.
4873 AdjustedExn = ExnPtrTmp.emitRawPointer(CGF);
4874 }
4875 }
4876
4877 llvm::Value *ExnCast =
4878 CGF.Builder.CreateBitCast(V: AdjustedExn, DestTy: LLVMCatchTy, Name: "exn.byref");
4879 CGF.Builder.CreateStore(Val: ExnCast, Addr: ParamAddr);
4880 return;
4881 }
4882
4883 // Scalars and complexes.
4884 TypeEvaluationKind TEK = CGF.getEvaluationKind(T: CatchType);
4885 if (TEK != TEK_Aggregate) {
4886 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndMightThrow: false);
4887
4888 // If the catch type is a pointer type, __cxa_begin_catch returns
4889 // the pointer by value.
4890 if (CatchType->hasPointerRepresentation()) {
4891 llvm::Value *CastExn =
4892 CGF.Builder.CreateBitCast(V: AdjustedExn, DestTy: LLVMCatchTy, Name: "exn.casted");
4893
4894 switch (CatchType.getQualifiers().getObjCLifetime()) {
4895 case Qualifiers::OCL_Strong:
4896 CastExn = CGF.EmitARCRetainNonBlock(value: CastExn);
4897 [[fallthrough]];
4898
4899 case Qualifiers::OCL_None:
4900 case Qualifiers::OCL_ExplicitNone:
4901 case Qualifiers::OCL_Autoreleasing:
4902 CGF.Builder.CreateStore(Val: CastExn, Addr: ParamAddr);
4903 return;
4904
4905 case Qualifiers::OCL_Weak:
4906 CGF.EmitARCInitWeak(addr: ParamAddr, value: CastExn);
4907 return;
4908 }
4909 llvm_unreachable("bad ownership qualifier!");
4910 }
4911
4912 // Otherwise, it returns a pointer into the exception object.
4913
4914 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(V: AdjustedExn, T: CatchType);
4915 LValue destLV = CGF.MakeAddrLValue(Addr: ParamAddr, T: CatchType);
4916 switch (TEK) {
4917 case TEK_Complex:
4918 CGF.EmitStoreOfComplex(V: CGF.EmitLoadOfComplex(src: srcLV, loc: Loc), dest: destLV,
4919 /*init*/ isInit: true);
4920 return;
4921 case TEK_Scalar: {
4922 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(lvalue: srcLV, Loc);
4923 CGF.EmitStoreOfScalar(value: ExnLoad, lvalue: destLV, /*init*/ isInit: true);
4924 return;
4925 }
4926 case TEK_Aggregate:
4927 llvm_unreachable("evaluation kind filtered out!");
4928 }
4929 llvm_unreachable("bad evaluation kind");
4930 }
4931
4932 assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4933 auto catchRD = CatchType->getAsCXXRecordDecl();
4934 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(CD: catchRD);
4935
4936 llvm::Type *PtrTy = CGF.UnqualPtrTy; // addrspace 0 ok
4937
4938 // Check for a copy expression. If we don't have a copy expression,
4939 // that means a trivial copy is okay.
4940 const Expr *copyExpr = CatchParam.getInit();
4941 if (!copyExpr) {
4942 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, EndMightThrow: true);
4943 Address adjustedExn(CGF.Builder.CreateBitCast(V: rawAdjustedExn, DestTy: PtrTy),
4944 LLVMCatchTy, caughtExnAlignment);
4945 LValue Dest = CGF.MakeAddrLValue(Addr: ParamAddr, T: CatchType);
4946 LValue Src = CGF.MakeAddrLValue(Addr: adjustedExn, T: CatchType);
4947 CGF.EmitAggregateCopy(Dest, Src, EltTy: CatchType, MayOverlap: AggValueSlot::DoesNotOverlap);
4948 return;
4949 }
4950
4951 // We have to call __cxa_get_exception_ptr to get the adjusted
4952 // pointer before copying.
4953 llvm::CallInst *rawAdjustedExn =
4954 CGF.EmitNounwindRuntimeCall(callee: getGetExceptionPtrFn(CGM&: CGF.CGM), args: Exn);
4955
4956 // Cast that to the appropriate type.
4957 Address adjustedExn(CGF.Builder.CreateBitCast(V: rawAdjustedExn, DestTy: PtrTy),
4958 LLVMCatchTy, caughtExnAlignment);
4959
4960 // The copy expression is defined in terms of an OpaqueValueExpr.
4961 // Find it and map it to the adjusted expression.
4962 CodeGenFunction::OpaqueValueMapping
4963 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(expr: copyExpr),
4964 CGF.MakeAddrLValue(Addr: adjustedExn, T: CatchParam.getType()));
4965
4966 // Call the copy ctor in a terminate scope.
4967 CGF.EHStack.pushTerminate();
4968
4969 // Perform the copy construction.
4970 CGF.EmitAggExpr(E: copyExpr,
4971 AS: AggValueSlot::forAddr(addr: ParamAddr, quals: Qualifiers(),
4972 isDestructed: AggValueSlot::IsNotDestructed,
4973 needsGC: AggValueSlot::DoesNotNeedGCBarriers,
4974 isAliased: AggValueSlot::IsNotAliased,
4975 mayOverlap: AggValueSlot::DoesNotOverlap));
4976
4977 // Leave the terminate scope.
4978 CGF.EHStack.popTerminate();
4979
4980 // Undo the opaque value mapping.
4981 opaque.pop();
4982
4983 // Finally we can call __cxa_begin_catch.
4984 CallBeginCatch(CGF, Exn, EndMightThrow: true);
4985}
4986
4987/// Begins a catch statement by initializing the catch variable and
4988/// calling __cxa_begin_catch.
4989void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4990 const CXXCatchStmt *S) {
4991 // We have to be very careful with the ordering of cleanups here:
4992 // C++ [except.throw]p4:
4993 // The destruction [of the exception temporary] occurs
4994 // immediately after the destruction of the object declared in
4995 // the exception-declaration in the handler.
4996 //
4997 // So the precise ordering is:
4998 // 1. Construct catch variable.
4999 // 2. __cxa_begin_catch
5000 // 3. Enter __cxa_end_catch cleanup
5001 // 4. Enter dtor cleanup
5002 //
5003 // We do this by using a slightly abnormal initialization process.
5004 // Delegation sequence:
5005 // - ExitCXXTryStmt opens a RunCleanupsScope
5006 // - EmitAutoVarAlloca creates the variable and debug info
5007 // - InitCatchParam initializes the variable from the exception
5008 // - CallBeginCatch calls __cxa_begin_catch
5009 // - CallBeginCatch enters the __cxa_end_catch cleanup
5010 // - EmitAutoVarCleanups enters the variable destructor cleanup
5011 // - EmitCXXTryStmt emits the code for the catch body
5012 // - EmitCXXTryStmt close the RunCleanupsScope
5013
5014 VarDecl *CatchParam = S->getExceptionDecl();
5015 if (!CatchParam) {
5016 llvm::Value *Exn = CGF.getExceptionFromSlot();
5017 CallBeginCatch(CGF, Exn, EndMightThrow: true);
5018 return;
5019 }
5020
5021 // Emit the local.
5022 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(var: *CatchParam);
5023 InitCatchParam(CGF, CatchParam: *CatchParam, ParamAddr: var.getObjectAddress(CGF), Loc: S->getBeginLoc());
5024 CGF.EmitAutoVarCleanups(emission: var);
5025}
5026
5027/// Get or define the following function:
5028/// void @__clang_call_terminate(i8* %exn) nounwind noreturn
5029/// This code is used only in C++.
5030static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
5031 ASTContext &C = CGM.getContext();
5032 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
5033 resultType: C.VoidTy, argTypes: {C.getPointerType(T: C.CharTy)});
5034 llvm::FunctionType *fnTy = CGM.getTypes().GetFunctionType(Info: FI);
5035 llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
5036 Ty: fnTy, Name: "__clang_call_terminate", ExtraAttrs: llvm::AttributeList(), /*Local=*/true);
5037 llvm::Function *fn =
5038 cast<llvm::Function>(Val: fnRef.getCallee()->stripPointerCasts());
5039 if (fn->empty()) {
5040 CGM.SetLLVMFunctionAttributes(GD: GlobalDecl(), Info: FI, F: fn, /*IsThunk=*/false);
5041 CGM.SetLLVMFunctionAttributesForDefinition(D: nullptr, F: fn);
5042 fn->setDoesNotThrow();
5043 fn->setDoesNotReturn();
5044
5045 // What we really want is to massively penalize inlining without
5046 // forbidding it completely. The difference between that and
5047 // 'noinline' is negligible.
5048 fn->addFnAttr(Kind: llvm::Attribute::NoInline);
5049
5050 // Allow this function to be shared across translation units, but
5051 // we don't want it to turn into an exported symbol.
5052 fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
5053 fn->setVisibility(llvm::Function::HiddenVisibility);
5054 if (CGM.supportsCOMDAT())
5055 fn->setComdat(CGM.getModule().getOrInsertComdat(Name: fn->getName()));
5056
5057 // Set up the function.
5058 llvm::BasicBlock *entry =
5059 llvm::BasicBlock::Create(Context&: CGM.getLLVMContext(), Name: "", Parent: fn);
5060 CGBuilderTy builder(CGM, entry);
5061
5062 // Pull the exception pointer out of the parameter list.
5063 llvm::Value *exn = &*fn->arg_begin();
5064
5065 // Call __cxa_begin_catch(exn).
5066 llvm::CallInst *catchCall = builder.CreateCall(Callee: getBeginCatchFn(CGM), Args: exn);
5067 catchCall->setDoesNotThrow();
5068 catchCall->setCallingConv(CGM.getRuntimeCC());
5069
5070 // Call std::terminate().
5071 llvm::CallInst *termCall = builder.CreateCall(Callee: CGM.getTerminateFn());
5072 termCall->setDoesNotThrow();
5073 termCall->setDoesNotReturn();
5074 termCall->setCallingConv(CGM.getRuntimeCC());
5075
5076 // std::terminate cannot return.
5077 builder.CreateUnreachable();
5078 }
5079 return fnRef;
5080}
5081
5082llvm::CallInst *
5083ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
5084 llvm::Value *Exn) {
5085 // In C++, we want to call __cxa_begin_catch() before terminating.
5086 if (Exn) {
5087 assert(CGF.CGM.getLangOpts().CPlusPlus);
5088 return CGF.EmitNounwindRuntimeCall(callee: getClangCallTerminateFn(CGM&: CGF.CGM), args: Exn);
5089 }
5090 return CGF.EmitNounwindRuntimeCall(callee: CGF.CGM.getTerminateFn());
5091}
5092
5093std::pair<llvm::Value *, const CXXRecordDecl *>
5094ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
5095 const CXXRecordDecl *RD) {
5096 return {CGF.GetVTablePtr(This, VTableTy: CGM.Int8PtrTy, VTableClass: RD), RD};
5097}
5098
5099llvm::Constant *
5100ItaniumCXXABI::getSignedVirtualMemberFunctionPointer(const CXXMethodDecl *MD) {
5101 const CXXMethodDecl *origMD =
5102 cast<CXXMethodDecl>(Val: CGM.getItaniumVTableContext()
5103 .findOriginalMethod(GD: MD->getCanonicalDecl())
5104 .getDecl());
5105 llvm::Constant *thunk = getOrCreateVirtualFunctionPointerThunk(MD: origMD);
5106 QualType funcType = CGM.getContext().getMemberPointerType(
5107 T: MD->getType(), Cls: MD->getParent()->getTypeForDecl());
5108 return CGM.getMemberFunctionPointer(Pointer: thunk, FT: funcType);
5109}
5110
5111void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
5112 const CXXCatchStmt *C) {
5113 if (CGF.getTarget().hasFeature(Feature: "exception-handling"))
5114 CGF.EHStack.pushCleanup<CatchRetScope>(
5115 Kind: NormalCleanup, A: cast<llvm::CatchPadInst>(Val: CGF.CurrentFuncletPad));
5116 ItaniumCXXABI::emitBeginCatch(CGF, S: C);
5117}
5118
5119llvm::CallInst *
5120WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
5121 llvm::Value *Exn) {
5122 // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on
5123 // the violating exception to mark it handled, but it is currently hard to do
5124 // with wasm EH instruction structure with catch/catch_all, we just call
5125 // std::terminate and ignore the violating exception as in CGCXXABI.
5126 // TODO Consider code transformation that makes calling __clang_call_terminate
5127 // possible.
5128 return CGCXXABI::emitTerminateForUnexpectedException(CGF, Exn);
5129}
5130
5131/// Register a global destructor as best as we know how.
5132void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
5133 llvm::FunctionCallee Dtor,
5134 llvm::Constant *Addr) {
5135 if (D.getTLSKind() != VarDecl::TLS_None) {
5136 llvm::PointerType *PtrTy = CGF.UnqualPtrTy;
5137
5138 // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
5139 llvm::FunctionType *AtExitTy =
5140 llvm::FunctionType::get(Result: CGM.IntTy, Params: {CGM.IntTy, PtrTy}, isVarArg: true);
5141
5142 // Fetch the actual function.
5143 llvm::FunctionCallee AtExit =
5144 CGM.CreateRuntimeFunction(Ty: AtExitTy, Name: "__pt_atexit_np");
5145
5146 // Create __dtor function for the var decl.
5147 llvm::Function *DtorStub = CGF.createTLSAtExitStub(VD: D, Dtor, Addr, AtExit);
5148
5149 // Register above __dtor with atexit().
5150 // First param is flags and must be 0, second param is function ptr
5151 llvm::Value *NV = llvm::Constant::getNullValue(Ty: CGM.IntTy);
5152 CGF.EmitNounwindRuntimeCall(callee: AtExit, args: {NV, DtorStub});
5153
5154 // Cannot unregister TLS __dtor so done
5155 return;
5156 }
5157
5158 // Create __dtor function for the var decl.
5159 llvm::Function *DtorStub =
5160 cast<llvm::Function>(Val: CGF.createAtExitStub(VD: D, Dtor, Addr));
5161
5162 // Register above __dtor with atexit().
5163 CGF.registerGlobalDtorWithAtExit(dtorStub: DtorStub);
5164
5165 // Emit __finalize function to unregister __dtor and (as appropriate) call
5166 // __dtor.
5167 emitCXXStermFinalizer(D, dtorStub: DtorStub, addr: Addr);
5168}
5169
5170void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
5171 llvm::Constant *addr) {
5172 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: CGM.VoidTy, isVarArg: false);
5173 SmallString<256> FnName;
5174 {
5175 llvm::raw_svector_ostream Out(FnName);
5176 getMangleContext().mangleDynamicStermFinalizer(D: &D, Out);
5177 }
5178
5179 // Create the finalization action associated with a variable.
5180 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
5181 llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
5182 ty: FTy, name: FnName.str(), FI, Loc: D.getLocation());
5183
5184 CodeGenFunction CGF(CGM);
5185
5186 CGF.StartFunction(GD: GlobalDecl(), RetTy: CGM.getContext().VoidTy, Fn: StermFinalizer, FnInfo: FI,
5187 Args: FunctionArgList(), Loc: D.getLocation(),
5188 StartLoc: D.getInit()->getExprLoc());
5189
5190 // The unatexit subroutine unregisters __dtor functions that were previously
5191 // registered by the atexit subroutine. If the referenced function is found,
5192 // the unatexit returns a value of 0, meaning that the cleanup is still
5193 // pending (and we should call the __dtor function).
5194 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
5195
5196 llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(Arg: V, Name: "needs_destruct");
5197
5198 llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock(name: "destruct.call");
5199 llvm::BasicBlock *EndBlock = CGF.createBasicBlock(name: "destruct.end");
5200
5201 // Check if unatexit returns a value of 0. If it does, jump to
5202 // DestructCallBlock, otherwise jump to EndBlock directly.
5203 CGF.Builder.CreateCondBr(Cond: NeedsDestruct, True: DestructCallBlock, False: EndBlock);
5204
5205 CGF.EmitBlock(BB: DestructCallBlock);
5206
5207 // Emit the call to dtorStub.
5208 llvm::CallInst *CI = CGF.Builder.CreateCall(Callee: dtorStub);
5209
5210 // Make sure the call and the callee agree on calling convention.
5211 CI->setCallingConv(dtorStub->getCallingConv());
5212
5213 CGF.EmitBlock(BB: EndBlock);
5214
5215 CGF.FinishFunction();
5216
5217 if (auto *IPA = D.getAttr<InitPriorityAttr>()) {
5218 CGM.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer,
5219 Priority: IPA->getPriority());
5220 } else if (isTemplateInstantiation(Kind: D.getTemplateSpecializationKind()) ||
5221 getContext().GetGVALinkageForVariable(VD: &D) == GVA_DiscardableODR) {
5222 // According to C++ [basic.start.init]p2, class template static data
5223 // members (i.e., implicitly or explicitly instantiated specializations)
5224 // have unordered initialization. As a consequence, we can put them into
5225 // their own llvm.global_dtors entry.
5226 CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, Priority: 65535);
5227 } else {
5228 CGM.AddCXXStermFinalizerEntry(DtorFn: StermFinalizer);
5229 }
5230}
5231