1//===--- CGVTables.cpp - Emit LLVM Code for C++ vtables -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code dealing with C++ code generation of virtual tables.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CodeGenFunction.h"
15#include "CodeGenModule.h"
16#include "clang/AST/Attr.h"
17#include "clang/AST/CXXInheritance.h"
18#include "clang/AST/RecordLayout.h"
19#include "clang/Basic/CodeGenOptions.h"
20#include "clang/CodeGen/CGFunctionInfo.h"
21#include "clang/CodeGen/ConstantInitBuilder.h"
22#include "llvm/IR/IntrinsicInst.h"
23#include "llvm/Support/Format.h"
24#include "llvm/Transforms/Utils/Cloning.h"
25#include <algorithm>
26#include <cstdio>
27#include <utility>
28
29using namespace clang;
30using namespace CodeGen;
31
32CodeGenVTables::CodeGenVTables(CodeGenModule &CGM)
33 : CGM(CGM), VTContext(CGM.getContext().getVTableContext()) {}
34
35llvm::Constant *CodeGenModule::GetAddrOfThunk(StringRef Name, llvm::Type *FnTy,
36 GlobalDecl GD) {
37 return GetOrCreateLLVMFunction(MangledName: Name, Ty: FnTy, D: GD, /*ForVTable=*/true,
38 /*DontDefer=*/true, /*IsThunk=*/true);
39}
40
41static void setThunkProperties(CodeGenModule &CGM, const ThunkInfo &Thunk,
42 llvm::Function *ThunkFn, bool ForVTable,
43 GlobalDecl GD) {
44 CGM.setFunctionLinkage(GD, F: ThunkFn);
45 CGM.getCXXABI().setThunkLinkage(Thunk: ThunkFn, ForVTable, GD,
46 ReturnAdjustment: !Thunk.Return.isEmpty());
47
48 // Set the right visibility.
49 CGM.setGVProperties(GV: ThunkFn, GD);
50
51 if (!CGM.getCXXABI().exportThunk()) {
52 ThunkFn->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
53 ThunkFn->setDSOLocal(true);
54 }
55
56 if (CGM.supportsCOMDAT() && ThunkFn->isWeakForLinker())
57 ThunkFn->setComdat(CGM.getModule().getOrInsertComdat(Name: ThunkFn->getName()));
58}
59
60#ifndef NDEBUG
61static bool similar(const ABIArgInfo &infoL, CanQualType typeL,
62 const ABIArgInfo &infoR, CanQualType typeR) {
63 return (infoL.getKind() == infoR.getKind() &&
64 (typeL == typeR ||
65 (isa<PointerType>(typeL) && isa<PointerType>(typeR)) ||
66 (isa<ReferenceType>(typeL) && isa<ReferenceType>(typeR))));
67}
68#endif
69
70static RValue PerformReturnAdjustment(CodeGenFunction &CGF,
71 QualType ResultType, RValue RV,
72 const ThunkInfo &Thunk) {
73 // Emit the return adjustment.
74 bool NullCheckValue = !ResultType->isReferenceType();
75
76 llvm::BasicBlock *AdjustNull = nullptr;
77 llvm::BasicBlock *AdjustNotNull = nullptr;
78 llvm::BasicBlock *AdjustEnd = nullptr;
79
80 llvm::Value *ReturnValue = RV.getScalarVal();
81
82 if (NullCheckValue) {
83 AdjustNull = CGF.createBasicBlock(name: "adjust.null");
84 AdjustNotNull = CGF.createBasicBlock(name: "adjust.notnull");
85 AdjustEnd = CGF.createBasicBlock(name: "adjust.end");
86
87 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Arg: ReturnValue);
88 CGF.Builder.CreateCondBr(Cond: IsNull, True: AdjustNull, False: AdjustNotNull);
89 CGF.EmitBlock(BB: AdjustNotNull);
90 }
91
92 auto ClassDecl = ResultType->getPointeeType()->getAsCXXRecordDecl();
93 auto ClassAlign = CGF.CGM.getClassPointerAlignment(CD: ClassDecl);
94 ReturnValue = CGF.CGM.getCXXABI().performReturnAdjustment(
95 CGF,
96 Ret: Address(ReturnValue, CGF.ConvertTypeForMem(T: ResultType->getPointeeType()),
97 ClassAlign),
98 UnadjustedClass: ClassDecl, RA: Thunk.Return);
99
100 if (NullCheckValue) {
101 CGF.Builder.CreateBr(Dest: AdjustEnd);
102 CGF.EmitBlock(BB: AdjustNull);
103 CGF.Builder.CreateBr(Dest: AdjustEnd);
104 CGF.EmitBlock(BB: AdjustEnd);
105
106 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Ty: ReturnValue->getType(), NumReservedValues: 2);
107 PHI->addIncoming(V: ReturnValue, BB: AdjustNotNull);
108 PHI->addIncoming(V: llvm::Constant::getNullValue(Ty: ReturnValue->getType()),
109 BB: AdjustNull);
110 ReturnValue = PHI;
111 }
112
113 return RValue::get(V: ReturnValue);
114}
115
116/// This function clones a function's DISubprogram node and enters it into
117/// a value map with the intent that the map can be utilized by the cloner
118/// to short-circuit Metadata node mapping.
119/// Furthermore, the function resolves any DILocalVariable nodes referenced
120/// by dbg.value intrinsics so they can be properly mapped during cloning.
121static void resolveTopLevelMetadata(llvm::Function *Fn,
122 llvm::ValueToValueMapTy &VMap) {
123 // Clone the DISubprogram node and put it into the Value map.
124 auto *DIS = Fn->getSubprogram();
125 if (!DIS)
126 return;
127 auto *NewDIS = DIS->replaceWithDistinct(N: DIS->clone());
128 VMap.MD()[DIS].reset(MD: NewDIS);
129
130 // Find all llvm.dbg.declare intrinsics and resolve the DILocalVariable nodes
131 // they are referencing.
132 for (auto &BB : *Fn) {
133 for (auto &I : BB) {
134 for (llvm::DbgVariableRecord &DVR :
135 llvm::filterDbgVars(R: I.getDbgRecordRange())) {
136 auto *DILocal = DVR.getVariable();
137 if (!DILocal->isResolved())
138 DILocal->resolve();
139 }
140 if (auto *DII = dyn_cast<llvm::DbgVariableIntrinsic>(Val: &I)) {
141 auto *DILocal = DII->getVariable();
142 if (!DILocal->isResolved())
143 DILocal->resolve();
144 }
145 }
146 }
147}
148
149// This function does roughly the same thing as GenerateThunk, but in a
150// very different way, so that va_start and va_end work correctly.
151// FIXME: This function assumes "this" is the first non-sret LLVM argument of
152// a function, and that there is an alloca built in the entry block
153// for all accesses to "this".
154// FIXME: This function assumes there is only one "ret" statement per function.
155// FIXME: Cloning isn't correct in the presence of indirect goto!
156// FIXME: This implementation of thunks bloats codesize by duplicating the
157// function definition. There are alternatives:
158// 1. Add some sort of stub support to LLVM for cases where we can
159// do a this adjustment, then a sibcall.
160// 2. We could transform the definition to take a va_list instead of an
161// actual variable argument list, then have the thunks (including a
162// no-op thunk for the regular definition) call va_start/va_end.
163// There's a bit of per-call overhead for this solution, but it's
164// better for codesize if the definition is long.
165llvm::Function *
166CodeGenFunction::GenerateVarArgsThunk(llvm::Function *Fn,
167 const CGFunctionInfo &FnInfo,
168 GlobalDecl GD, const ThunkInfo &Thunk) {
169 const CXXMethodDecl *MD = cast<CXXMethodDecl>(Val: GD.getDecl());
170 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
171 QualType ResultType = FPT->getReturnType();
172
173 // Get the original function
174 assert(FnInfo.isVariadic());
175 llvm::Type *Ty = CGM.getTypes().GetFunctionType(Info: FnInfo);
176 llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
177 llvm::Function *BaseFn = cast<llvm::Function>(Val: Callee);
178
179 // Cloning can't work if we don't have a definition. The Microsoft ABI may
180 // require thunks when a definition is not available. Emit an error in these
181 // cases.
182 if (!MD->isDefined()) {
183 CGM.ErrorUnsupported(D: MD, Type: "return-adjusting thunk with variadic arguments");
184 return Fn;
185 }
186 assert(!BaseFn->isDeclaration() && "cannot clone undefined variadic method");
187
188 // Clone to thunk.
189 llvm::ValueToValueMapTy VMap;
190
191 // We are cloning a function while some Metadata nodes are still unresolved.
192 // Ensure that the value mapper does not encounter any of them.
193 resolveTopLevelMetadata(Fn: BaseFn, VMap);
194 llvm::Function *NewFn = llvm::CloneFunction(F: BaseFn, VMap);
195 Fn->replaceAllUsesWith(V: NewFn);
196 NewFn->takeName(V: Fn);
197 Fn->eraseFromParent();
198 Fn = NewFn;
199
200 // "Initialize" CGF (minimally).
201 CurFn = Fn;
202
203 // Get the "this" value
204 llvm::Function::arg_iterator AI = Fn->arg_begin();
205 if (CGM.ReturnTypeUsesSRet(FI: FnInfo))
206 ++AI;
207
208 // Find the first store of "this", which will be to the alloca associated
209 // with "this".
210 Address ThisPtr = makeNaturalAddressForPointer(
211 Ptr: &*AI, T: MD->getFunctionObjectParameterType(),
212 Alignment: CGM.getClassPointerAlignment(CD: MD->getParent()));
213 llvm::BasicBlock *EntryBB = &Fn->front();
214 llvm::BasicBlock::iterator ThisStore =
215 llvm::find_if(Range&: *EntryBB, P: [&](llvm::Instruction &I) {
216 return isa<llvm::StoreInst>(Val: I) && I.getOperand(i: 0) == &*AI;
217 });
218 assert(ThisStore != EntryBB->end() &&
219 "Store of this should be in entry block?");
220 // Adjust "this", if necessary.
221 Builder.SetInsertPoint(&*ThisStore);
222
223 const CXXRecordDecl *ThisValueClass = Thunk.ThisType->getPointeeCXXRecordDecl();
224 llvm::Value *AdjustedThisPtr = CGM.getCXXABI().performThisAdjustment(
225 CGF&: *this, This: ThisPtr, UnadjustedClass: ThisValueClass, TI: Thunk);
226 AdjustedThisPtr = Builder.CreateBitCast(V: AdjustedThisPtr,
227 DestTy: ThisStore->getOperand(i: 0)->getType());
228 ThisStore->setOperand(i: 0, Val: AdjustedThisPtr);
229
230 if (!Thunk.Return.isEmpty()) {
231 // Fix up the returned value, if necessary.
232 for (llvm::BasicBlock &BB : *Fn) {
233 llvm::Instruction *T = BB.getTerminator();
234 if (isa<llvm::ReturnInst>(Val: T)) {
235 RValue RV = RValue::get(V: T->getOperand(i: 0));
236 T->eraseFromParent();
237 Builder.SetInsertPoint(&BB);
238 RV = PerformReturnAdjustment(CGF&: *this, ResultType, RV, Thunk);
239 Builder.CreateRet(V: RV.getScalarVal());
240 break;
241 }
242 }
243 }
244
245 return Fn;
246}
247
248void CodeGenFunction::StartThunk(llvm::Function *Fn, GlobalDecl GD,
249 const CGFunctionInfo &FnInfo,
250 bool IsUnprototyped) {
251 assert(!CurGD.getDecl() && "CurGD was already set!");
252 CurGD = GD;
253 CurFuncIsThunk = true;
254
255 // Build FunctionArgs.
256 const CXXMethodDecl *MD = cast<CXXMethodDecl>(Val: GD.getDecl());
257 QualType ThisType = MD->getThisType();
258 QualType ResultType;
259 if (IsUnprototyped)
260 ResultType = CGM.getContext().VoidTy;
261 else if (CGM.getCXXABI().HasThisReturn(GD))
262 ResultType = ThisType;
263 else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
264 ResultType = CGM.getContext().VoidPtrTy;
265 else
266 ResultType = MD->getType()->castAs<FunctionProtoType>()->getReturnType();
267 FunctionArgList FunctionArgs;
268
269 // Create the implicit 'this' parameter declaration.
270 CGM.getCXXABI().buildThisParam(CGF&: *this, Params&: FunctionArgs);
271
272 // Add the rest of the parameters, if we have a prototype to work with.
273 if (!IsUnprototyped) {
274 FunctionArgs.append(in_start: MD->param_begin(), in_end: MD->param_end());
275
276 if (isa<CXXDestructorDecl>(Val: MD))
277 CGM.getCXXABI().addImplicitStructorParams(CGF&: *this, ResTy&: ResultType,
278 Params&: FunctionArgs);
279 }
280
281 // Start defining the function.
282 auto NL = ApplyDebugLocation::CreateEmpty(CGF&: *this);
283 StartFunction(GD: GlobalDecl(), RetTy: ResultType, Fn, FnInfo, Args: FunctionArgs,
284 Loc: MD->getLocation());
285 // Create a scope with an artificial location for the body of this function.
286 auto AL = ApplyDebugLocation::CreateArtificial(CGF&: *this);
287
288 // Since we didn't pass a GlobalDecl to StartFunction, do this ourselves.
289 CGM.getCXXABI().EmitInstanceFunctionProlog(CGF&: *this);
290 CXXThisValue = CXXABIThisValue;
291 CurCodeDecl = MD;
292 CurFuncDecl = MD;
293}
294
295void CodeGenFunction::FinishThunk() {
296 // Clear these to restore the invariants expected by
297 // StartFunction/FinishFunction.
298 CurCodeDecl = nullptr;
299 CurFuncDecl = nullptr;
300
301 FinishFunction();
302}
303
304void CodeGenFunction::EmitCallAndReturnForThunk(llvm::FunctionCallee Callee,
305 const ThunkInfo *Thunk,
306 bool IsUnprototyped) {
307 assert(isa<CXXMethodDecl>(CurGD.getDecl()) &&
308 "Please use a new CGF for this thunk");
309 const CXXMethodDecl *MD = cast<CXXMethodDecl>(Val: CurGD.getDecl());
310
311 // Adjust the 'this' pointer if necessary
312 const CXXRecordDecl *ThisValueClass =
313 MD->getThisType()->getPointeeCXXRecordDecl();
314 if (Thunk)
315 ThisValueClass = Thunk->ThisType->getPointeeCXXRecordDecl();
316
317 llvm::Value *AdjustedThisPtr =
318 Thunk ? CGM.getCXXABI().performThisAdjustment(CGF&: *this, This: LoadCXXThisAddress(),
319 UnadjustedClass: ThisValueClass, TI: *Thunk)
320 : LoadCXXThis();
321
322 // If perfect forwarding is required a variadic method, a method using
323 // inalloca, or an unprototyped thunk, use musttail. Emit an error if this
324 // thunk requires a return adjustment, since that is impossible with musttail.
325 if (CurFnInfo->usesInAlloca() || CurFnInfo->isVariadic() || IsUnprototyped) {
326 if (Thunk && !Thunk->Return.isEmpty()) {
327 if (IsUnprototyped)
328 CGM.ErrorUnsupported(
329 D: MD, Type: "return-adjusting thunk with incomplete parameter type");
330 else if (CurFnInfo->isVariadic())
331 llvm_unreachable("shouldn't try to emit musttail return-adjusting "
332 "thunks for variadic functions");
333 else
334 CGM.ErrorUnsupported(
335 D: MD, Type: "non-trivial argument copy for return-adjusting thunk");
336 }
337 EmitMustTailThunk(GD: CurGD, AdjustedThisPtr, Callee);
338 return;
339 }
340
341 // Start building CallArgs.
342 CallArgList CallArgs;
343 QualType ThisType = MD->getThisType();
344 CallArgs.add(rvalue: RValue::get(V: AdjustedThisPtr), type: ThisType);
345
346 if (isa<CXXDestructorDecl>(Val: MD))
347 CGM.getCXXABI().adjustCallArgsForDestructorThunk(CGF&: *this, GD: CurGD, CallArgs);
348
349#ifndef NDEBUG
350 unsigned PrefixArgs = CallArgs.size() - 1;
351#endif
352 // Add the rest of the arguments.
353 for (const ParmVarDecl *PD : MD->parameters())
354 EmitDelegateCallArg(args&: CallArgs, param: PD, loc: SourceLocation());
355
356 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
357
358#ifndef NDEBUG
359 const CGFunctionInfo &CallFnInfo = CGM.getTypes().arrangeCXXMethodCall(
360 CallArgs, FPT, RequiredArgs::forPrototypePlus(FPT, 1), PrefixArgs);
361 assert(CallFnInfo.getRegParm() == CurFnInfo->getRegParm() &&
362 CallFnInfo.isNoReturn() == CurFnInfo->isNoReturn() &&
363 CallFnInfo.getCallingConvention() == CurFnInfo->getCallingConvention());
364 assert(isa<CXXDestructorDecl>(MD) || // ignore dtor return types
365 similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(),
366 CurFnInfo->getReturnInfo(), CurFnInfo->getReturnType()));
367 assert(CallFnInfo.arg_size() == CurFnInfo->arg_size());
368 for (unsigned i = 0, e = CurFnInfo->arg_size(); i != e; ++i)
369 assert(similar(CallFnInfo.arg_begin()[i].info,
370 CallFnInfo.arg_begin()[i].type,
371 CurFnInfo->arg_begin()[i].info,
372 CurFnInfo->arg_begin()[i].type));
373#endif
374
375 // Determine whether we have a return value slot to use.
376 QualType ResultType = CGM.getCXXABI().HasThisReturn(GD: CurGD)
377 ? ThisType
378 : CGM.getCXXABI().hasMostDerivedReturn(GD: CurGD)
379 ? CGM.getContext().VoidPtrTy
380 : FPT->getReturnType();
381 ReturnValueSlot Slot;
382 if (!ResultType->isVoidType() &&
383 (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect ||
384 hasAggregateEvaluationKind(T: ResultType)))
385 Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified(),
386 /*IsUnused=*/false, /*IsExternallyDestructed=*/true);
387
388 // Now emit our call.
389 llvm::CallBase *CallOrInvoke;
390 RValue RV = EmitCall(CallInfo: *CurFnInfo, Callee: CGCallee::forDirect(functionPtr: Callee, abstractInfo: CurGD), ReturnValue: Slot,
391 Args: CallArgs, callOrInvoke: &CallOrInvoke);
392
393 // Consider return adjustment if we have ThunkInfo.
394 if (Thunk && !Thunk->Return.isEmpty())
395 RV = PerformReturnAdjustment(CGF&: *this, ResultType, RV, Thunk: *Thunk);
396 else if (llvm::CallInst* Call = dyn_cast<llvm::CallInst>(Val: CallOrInvoke))
397 Call->setTailCallKind(llvm::CallInst::TCK_Tail);
398
399 // Emit return.
400 if (!ResultType->isVoidType() && Slot.isNull())
401 CGM.getCXXABI().EmitReturnFromThunk(CGF&: *this, RV, ResultType);
402
403 // Disable the final ARC autorelease.
404 AutoreleaseResult = false;
405
406 FinishThunk();
407}
408
409void CodeGenFunction::EmitMustTailThunk(GlobalDecl GD,
410 llvm::Value *AdjustedThisPtr,
411 llvm::FunctionCallee Callee) {
412 // Emitting a musttail call thunk doesn't use any of the CGCall.cpp machinery
413 // to translate AST arguments into LLVM IR arguments. For thunks, we know
414 // that the caller prototype more or less matches the callee prototype with
415 // the exception of 'this'.
416 SmallVector<llvm::Value *, 8> Args(llvm::make_pointer_range(Range: CurFn->args()));
417
418 // Set the adjusted 'this' pointer.
419 const ABIArgInfo &ThisAI = CurFnInfo->arg_begin()->info;
420 if (ThisAI.isDirect()) {
421 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
422 int ThisArgNo = RetAI.isIndirect() && !RetAI.isSRetAfterThis() ? 1 : 0;
423 llvm::Type *ThisType = Args[ThisArgNo]->getType();
424 if (ThisType != AdjustedThisPtr->getType())
425 AdjustedThisPtr = Builder.CreateBitCast(V: AdjustedThisPtr, DestTy: ThisType);
426 Args[ThisArgNo] = AdjustedThisPtr;
427 } else {
428 assert(ThisAI.isInAlloca() && "this is passed directly or inalloca");
429 Address ThisAddr = GetAddrOfLocalVar(VD: CXXABIThisDecl);
430 llvm::Type *ThisType = ThisAddr.getElementType();
431 if (ThisType != AdjustedThisPtr->getType())
432 AdjustedThisPtr = Builder.CreateBitCast(V: AdjustedThisPtr, DestTy: ThisType);
433 Builder.CreateStore(Val: AdjustedThisPtr, Addr: ThisAddr);
434 }
435
436 // Emit the musttail call manually. Even if the prologue pushed cleanups, we
437 // don't actually want to run them.
438 llvm::CallInst *Call = Builder.CreateCall(Callee, Args);
439 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
440
441 // Apply the standard set of call attributes.
442 unsigned CallingConv;
443 llvm::AttributeList Attrs;
444 CGM.ConstructAttributeList(Name: Callee.getCallee()->getName(), Info: *CurFnInfo, CalleeInfo: GD,
445 Attrs, CallingConv, /*AttrOnCallSite=*/true,
446 /*IsThunk=*/false);
447 Call->setAttributes(Attrs);
448 Call->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
449
450 if (Call->getType()->isVoidTy())
451 Builder.CreateRetVoid();
452 else
453 Builder.CreateRet(V: Call);
454
455 // Finish the function to maintain CodeGenFunction invariants.
456 // FIXME: Don't emit unreachable code.
457 EmitBlock(BB: createBasicBlock());
458
459 FinishThunk();
460}
461
462void CodeGenFunction::generateThunk(llvm::Function *Fn,
463 const CGFunctionInfo &FnInfo, GlobalDecl GD,
464 const ThunkInfo &Thunk,
465 bool IsUnprototyped) {
466 StartThunk(Fn, GD, FnInfo, IsUnprototyped);
467 // Create a scope with an artificial location for the body of this function.
468 auto AL = ApplyDebugLocation::CreateArtificial(CGF&: *this);
469
470 // Get our callee. Use a placeholder type if this method is unprototyped so
471 // that CodeGenModule doesn't try to set attributes.
472 llvm::Type *Ty;
473 if (IsUnprototyped)
474 Ty = llvm::StructType::get(Context&: getLLVMContext());
475 else
476 Ty = CGM.getTypes().GetFunctionType(Info: FnInfo);
477
478 llvm::Constant *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
479
480 // Make the call and return the result.
481 EmitCallAndReturnForThunk(Callee: llvm::FunctionCallee(Fn->getFunctionType(), Callee),
482 Thunk: &Thunk, IsUnprototyped);
483}
484
485static bool shouldEmitVTableThunk(CodeGenModule &CGM, const CXXMethodDecl *MD,
486 bool IsUnprototyped, bool ForVTable) {
487 // Always emit thunks in the MS C++ ABI. We cannot rely on other TUs to
488 // provide thunks for us.
489 if (CGM.getTarget().getCXXABI().isMicrosoft())
490 return true;
491
492 // In the Itanium C++ ABI, vtable thunks are provided by TUs that provide
493 // definitions of the main method. Therefore, emitting thunks with the vtable
494 // is purely an optimization. Emit the thunk if optimizations are enabled and
495 // all of the parameter types are complete.
496 if (ForVTable)
497 return CGM.getCodeGenOpts().OptimizationLevel && !IsUnprototyped;
498
499 // Always emit thunks along with the method definition.
500 return true;
501}
502
503llvm::Constant *CodeGenVTables::maybeEmitThunk(GlobalDecl GD,
504 const ThunkInfo &TI,
505 bool ForVTable) {
506 const CXXMethodDecl *MD = cast<CXXMethodDecl>(Val: GD.getDecl());
507
508 // First, get a declaration. Compute the mangled name. Don't worry about
509 // getting the function prototype right, since we may only need this
510 // declaration to fill in a vtable slot.
511 SmallString<256> Name;
512 MangleContext &MCtx = CGM.getCXXABI().getMangleContext();
513 llvm::raw_svector_ostream Out(Name);
514
515 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(Val: MD)) {
516 MCtx.mangleCXXDtorThunk(DD, Type: GD.getDtorType(), Thunk: TI,
517 /* elideOverrideInfo */ ElideOverrideInfo: false, Out);
518 } else
519 MCtx.mangleThunk(MD, Thunk: TI, /* elideOverrideInfo */ ElideOverrideInfo: false, Out);
520
521 if (CGM.getContext().useAbbreviatedThunkName(VirtualMethodDecl: GD, MangledName: Name.str())) {
522 Name = "";
523 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(Val: MD))
524 MCtx.mangleCXXDtorThunk(DD, Type: GD.getDtorType(), Thunk: TI,
525 /* elideOverrideInfo */ ElideOverrideInfo: true, Out);
526 else
527 MCtx.mangleThunk(MD, Thunk: TI, /* elideOverrideInfo */ ElideOverrideInfo: true, Out);
528 }
529
530 llvm::Type *ThunkVTableTy = CGM.getTypes().GetFunctionTypeForVTable(GD);
531 llvm::Constant *Thunk = CGM.GetAddrOfThunk(Name, FnTy: ThunkVTableTy, GD);
532
533 // If we don't need to emit a definition, return this declaration as is.
534 bool IsUnprototyped = !CGM.getTypes().isFuncTypeConvertible(
535 FT: MD->getType()->castAs<FunctionType>());
536 if (!shouldEmitVTableThunk(CGM, MD, IsUnprototyped, ForVTable))
537 return Thunk;
538
539 // Arrange a function prototype appropriate for a function definition. In some
540 // cases in the MS ABI, we may need to build an unprototyped musttail thunk.
541 const CGFunctionInfo &FnInfo =
542 IsUnprototyped ? CGM.getTypes().arrangeUnprototypedMustTailThunk(MD)
543 : CGM.getTypes().arrangeGlobalDeclaration(GD);
544 llvm::FunctionType *ThunkFnTy = CGM.getTypes().GetFunctionType(Info: FnInfo);
545
546 // If the type of the underlying GlobalValue is wrong, we'll have to replace
547 // it. It should be a declaration.
548 llvm::Function *ThunkFn = cast<llvm::Function>(Val: Thunk->stripPointerCasts());
549 if (ThunkFn->getFunctionType() != ThunkFnTy) {
550 llvm::GlobalValue *OldThunkFn = ThunkFn;
551
552 assert(OldThunkFn->isDeclaration() && "Shouldn't replace non-declaration");
553
554 // Remove the name from the old thunk function and get a new thunk.
555 OldThunkFn->setName(StringRef());
556 ThunkFn = llvm::Function::Create(Ty: ThunkFnTy, Linkage: llvm::Function::ExternalLinkage,
557 N: Name.str(), M: &CGM.getModule());
558 CGM.SetLLVMFunctionAttributes(GD: MD, Info: FnInfo, F: ThunkFn, /*IsThunk=*/false);
559
560 if (!OldThunkFn->use_empty()) {
561 OldThunkFn->replaceAllUsesWith(V: ThunkFn);
562 }
563
564 // Remove the old thunk.
565 OldThunkFn->eraseFromParent();
566 }
567
568 bool ABIHasKeyFunctions = CGM.getTarget().getCXXABI().hasKeyFunctions();
569 bool UseAvailableExternallyLinkage = ForVTable && ABIHasKeyFunctions;
570
571 if (!ThunkFn->isDeclaration()) {
572 if (!ABIHasKeyFunctions || UseAvailableExternallyLinkage) {
573 // There is already a thunk emitted for this function, do nothing.
574 return ThunkFn;
575 }
576
577 setThunkProperties(CGM, Thunk: TI, ThunkFn, ForVTable, GD);
578 return ThunkFn;
579 }
580
581 // If this will be unprototyped, add the "thunk" attribute so that LLVM knows
582 // that the return type is meaningless. These thunks can be used to call
583 // functions with differing return types, and the caller is required to cast
584 // the prototype appropriately to extract the correct value.
585 if (IsUnprototyped)
586 ThunkFn->addFnAttr(Kind: "thunk");
587
588 CGM.SetLLVMFunctionAttributesForDefinition(D: GD.getDecl(), F: ThunkFn);
589
590 // Thunks for variadic methods are special because in general variadic
591 // arguments cannot be perfectly forwarded. In the general case, clang
592 // implements such thunks by cloning the original function body. However, for
593 // thunks with no return adjustment on targets that support musttail, we can
594 // use musttail to perfectly forward the variadic arguments.
595 bool ShouldCloneVarArgs = false;
596 if (!IsUnprototyped && ThunkFn->isVarArg()) {
597 ShouldCloneVarArgs = true;
598 if (TI.Return.isEmpty()) {
599 switch (CGM.getTriple().getArch()) {
600 case llvm::Triple::x86_64:
601 case llvm::Triple::x86:
602 case llvm::Triple::aarch64:
603 ShouldCloneVarArgs = false;
604 break;
605 default:
606 break;
607 }
608 }
609 }
610
611 if (ShouldCloneVarArgs) {
612 if (UseAvailableExternallyLinkage)
613 return ThunkFn;
614 ThunkFn =
615 CodeGenFunction(CGM).GenerateVarArgsThunk(Fn: ThunkFn, FnInfo, GD, Thunk: TI);
616 } else {
617 // Normal thunk body generation.
618 CodeGenFunction(CGM).generateThunk(Fn: ThunkFn, FnInfo, GD, Thunk: TI, IsUnprototyped);
619 }
620
621 setThunkProperties(CGM, Thunk: TI, ThunkFn, ForVTable, GD);
622 return ThunkFn;
623}
624
625void CodeGenVTables::EmitThunks(GlobalDecl GD) {
626 const CXXMethodDecl *MD =
627 cast<CXXMethodDecl>(Val: GD.getDecl())->getCanonicalDecl();
628
629 // We don't need to generate thunks for the base destructor.
630 if (isa<CXXDestructorDecl>(Val: MD) && GD.getDtorType() == Dtor_Base)
631 return;
632
633 const VTableContextBase::ThunkInfoVectorTy *ThunkInfoVector =
634 VTContext->getThunkInfo(GD);
635
636 if (!ThunkInfoVector)
637 return;
638
639 for (const ThunkInfo& Thunk : *ThunkInfoVector)
640 maybeEmitThunk(GD, TI: Thunk, /*ForVTable=*/false);
641}
642
643void CodeGenVTables::addRelativeComponent(ConstantArrayBuilder &builder,
644 llvm::Constant *component,
645 unsigned vtableAddressPoint,
646 bool vtableHasLocalLinkage,
647 bool isCompleteDtor) const {
648 // No need to get the offset of a nullptr.
649 if (component->isNullValue())
650 return builder.add(value: llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: 0));
651
652 auto *globalVal =
653 cast<llvm::GlobalValue>(Val: component->stripPointerCastsAndAliases());
654 llvm::Module &module = CGM.getModule();
655
656 // We don't want to copy the linkage of the vtable exactly because we still
657 // want the stub/proxy to be emitted for properly calculating the offset.
658 // Examples where there would be no symbol emitted are available_externally
659 // and private linkages.
660 //
661 // `internal` linkage results in STB_LOCAL Elf binding while still manifesting a
662 // local symbol.
663 //
664 // `linkonce_odr` linkage results in a STB_DEFAULT Elf binding but also allows for
665 // the rtti_proxy to be transparently replaced with a GOTPCREL reloc by a
666 // target that supports this replacement.
667 auto stubLinkage = vtableHasLocalLinkage
668 ? llvm::GlobalValue::InternalLinkage
669 : llvm::GlobalValue::LinkOnceODRLinkage;
670
671 llvm::Constant *target;
672 if (auto *func = dyn_cast<llvm::Function>(Val: globalVal)) {
673 target = llvm::DSOLocalEquivalent::get(GV: func);
674 } else {
675 llvm::SmallString<16> rttiProxyName(globalVal->getName());
676 rttiProxyName.append(RHS: ".rtti_proxy");
677
678 // The RTTI component may not always be emitted in the same linkage unit as
679 // the vtable. As a general case, we can make a dso_local proxy to the RTTI
680 // that points to the actual RTTI struct somewhere. This will result in a
681 // GOTPCREL relocation when taking the relative offset to the proxy.
682 llvm::GlobalVariable *proxy = module.getNamedGlobal(Name: rttiProxyName);
683 if (!proxy) {
684 proxy = new llvm::GlobalVariable(module, globalVal->getType(),
685 /*isConstant=*/true, stubLinkage,
686 globalVal, rttiProxyName);
687 proxy->setDSOLocal(true);
688 proxy->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
689 if (!proxy->hasLocalLinkage()) {
690 proxy->setVisibility(llvm::GlobalValue::HiddenVisibility);
691 proxy->setComdat(module.getOrInsertComdat(Name: rttiProxyName));
692 }
693 // Do not instrument the rtti proxies with hwasan to avoid a duplicate
694 // symbol error. Aliases generated by hwasan will retain the same namebut
695 // the addresses they are set to may have different tags from different
696 // compilation units. We don't run into this without hwasan because the
697 // proxies are in comdat groups, but those aren't propagated to the alias.
698 RemoveHwasanMetadata(GV: proxy);
699 }
700 target = proxy;
701 }
702
703 builder.addRelativeOffsetToPosition(type: CGM.Int32Ty, target,
704 /*position=*/vtableAddressPoint);
705}
706
707static bool UseRelativeLayout(const CodeGenModule &CGM) {
708 return CGM.getTarget().getCXXABI().isItaniumFamily() &&
709 CGM.getItaniumVTableContext().isRelativeLayout();
710}
711
712bool CodeGenVTables::useRelativeLayout() const {
713 return UseRelativeLayout(CGM);
714}
715
716llvm::Type *CodeGenModule::getVTableComponentType() const {
717 if (UseRelativeLayout(CGM: *this))
718 return Int32Ty;
719 return GlobalsInt8PtrTy;
720}
721
722llvm::Type *CodeGenVTables::getVTableComponentType() const {
723 return CGM.getVTableComponentType();
724}
725
726static void AddPointerLayoutOffset(const CodeGenModule &CGM,
727 ConstantArrayBuilder &builder,
728 CharUnits offset) {
729 builder.add(value: llvm::ConstantExpr::getIntToPtr(
730 C: llvm::ConstantInt::get(Ty: CGM.PtrDiffTy, V: offset.getQuantity()),
731 Ty: CGM.GlobalsInt8PtrTy));
732}
733
734static void AddRelativeLayoutOffset(const CodeGenModule &CGM,
735 ConstantArrayBuilder &builder,
736 CharUnits offset) {
737 builder.add(value: llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: offset.getQuantity()));
738}
739
740void CodeGenVTables::addVTableComponent(ConstantArrayBuilder &builder,
741 const VTableLayout &layout,
742 unsigned componentIndex,
743 llvm::Constant *rtti,
744 unsigned &nextVTableThunkIndex,
745 unsigned vtableAddressPoint,
746 bool vtableHasLocalLinkage) {
747 auto &component = layout.vtable_components()[componentIndex];
748
749 auto addOffsetConstant =
750 useRelativeLayout() ? AddRelativeLayoutOffset : AddPointerLayoutOffset;
751
752 switch (component.getKind()) {
753 case VTableComponent::CK_VCallOffset:
754 return addOffsetConstant(CGM, builder, component.getVCallOffset());
755
756 case VTableComponent::CK_VBaseOffset:
757 return addOffsetConstant(CGM, builder, component.getVBaseOffset());
758
759 case VTableComponent::CK_OffsetToTop:
760 return addOffsetConstant(CGM, builder, component.getOffsetToTop());
761
762 case VTableComponent::CK_RTTI:
763 if (useRelativeLayout())
764 return addRelativeComponent(builder, component: rtti, vtableAddressPoint,
765 vtableHasLocalLinkage,
766 /*isCompleteDtor=*/false);
767 else
768 return builder.add(value: rtti);
769
770 case VTableComponent::CK_FunctionPointer:
771 case VTableComponent::CK_CompleteDtorPointer:
772 case VTableComponent::CK_DeletingDtorPointer: {
773 GlobalDecl GD = component.getGlobalDecl();
774
775 if (CGM.getLangOpts().CUDA) {
776 // Emit NULL for methods we can't codegen on this
777 // side. Otherwise we'd end up with vtable with unresolved
778 // references.
779 const CXXMethodDecl *MD = cast<CXXMethodDecl>(Val: GD.getDecl());
780 // OK on device side: functions w/ __device__ attribute
781 // OK on host side: anything except __device__-only functions.
782 bool CanEmitMethod =
783 CGM.getLangOpts().CUDAIsDevice
784 ? MD->hasAttr<CUDADeviceAttr>()
785 : (MD->hasAttr<CUDAHostAttr>() || !MD->hasAttr<CUDADeviceAttr>());
786 if (!CanEmitMethod)
787 return builder.add(
788 value: llvm::ConstantExpr::getNullValue(Ty: CGM.GlobalsInt8PtrTy));
789 // Method is acceptable, continue processing as usual.
790 }
791
792 auto getSpecialVirtualFn = [&](StringRef name) -> llvm::Constant * {
793 // FIXME(PR43094): When merging comdat groups, lld can select a local
794 // symbol as the signature symbol even though it cannot be accessed
795 // outside that symbol's TU. The relative vtables ABI would make
796 // __cxa_pure_virtual and __cxa_deleted_virtual local symbols, and
797 // depending on link order, the comdat groups could resolve to the one
798 // with the local symbol. As a temporary solution, fill these components
799 // with zero. We shouldn't be calling these in the first place anyway.
800 if (useRelativeLayout())
801 return llvm::ConstantPointerNull::get(T: CGM.GlobalsInt8PtrTy);
802
803 // For NVPTX devices in OpenMP emit special functon as null pointers,
804 // otherwise linking ends up with unresolved references.
805 if (CGM.getLangOpts().OpenMP && CGM.getLangOpts().OpenMPIsTargetDevice &&
806 CGM.getTriple().isNVPTX())
807 return llvm::ConstantPointerNull::get(T: CGM.GlobalsInt8PtrTy);
808 llvm::FunctionType *fnTy =
809 llvm::FunctionType::get(Result: CGM.VoidTy, /*isVarArg=*/false);
810 llvm::Constant *fn = cast<llvm::Constant>(
811 Val: CGM.CreateRuntimeFunction(Ty: fnTy, Name: name).getCallee());
812 if (auto f = dyn_cast<llvm::Function>(Val: fn))
813 f->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
814 return fn;
815 };
816
817 llvm::Constant *fnPtr;
818
819 // Pure virtual member functions.
820 if (cast<CXXMethodDecl>(Val: GD.getDecl())->isPureVirtual()) {
821 if (!PureVirtualFn)
822 PureVirtualFn =
823 getSpecialVirtualFn(CGM.getCXXABI().GetPureVirtualCallName());
824 fnPtr = PureVirtualFn;
825
826 // Deleted virtual member functions.
827 } else if (cast<CXXMethodDecl>(Val: GD.getDecl())->isDeleted()) {
828 if (!DeletedVirtualFn)
829 DeletedVirtualFn =
830 getSpecialVirtualFn(CGM.getCXXABI().GetDeletedVirtualCallName());
831 fnPtr = DeletedVirtualFn;
832
833 // Thunks.
834 } else if (nextVTableThunkIndex < layout.vtable_thunks().size() &&
835 layout.vtable_thunks()[nextVTableThunkIndex].first ==
836 componentIndex) {
837 auto &thunkInfo = layout.vtable_thunks()[nextVTableThunkIndex].second;
838
839 nextVTableThunkIndex++;
840 fnPtr = maybeEmitThunk(GD, TI: thunkInfo, /*ForVTable=*/true);
841 if (CGM.getCodeGenOpts().PointerAuth.CXXVirtualFunctionPointers) {
842 assert(thunkInfo.Method && "Method not set");
843 GD = GD.getWithDecl(D: thunkInfo.Method);
844 }
845
846 // Otherwise we can use the method definition directly.
847 } else {
848 llvm::Type *fnTy = CGM.getTypes().GetFunctionTypeForVTable(GD);
849 fnPtr = CGM.GetAddrOfFunction(GD, Ty: fnTy, /*ForVTable=*/true);
850 if (CGM.getCodeGenOpts().PointerAuth.CXXVirtualFunctionPointers)
851 GD = getItaniumVTableContext().findOriginalMethod(GD);
852 }
853
854 if (useRelativeLayout()) {
855 return addRelativeComponent(
856 builder, component: fnPtr, vtableAddressPoint, vtableHasLocalLinkage,
857 isCompleteDtor: component.getKind() == VTableComponent::CK_CompleteDtorPointer);
858 } else {
859 // TODO: this icky and only exists due to functions being in the generic
860 // address space, rather than the global one, even though they are
861 // globals; fixing said issue might be intrusive, and will be done
862 // later.
863 unsigned FnAS = fnPtr->getType()->getPointerAddressSpace();
864 unsigned GVAS = CGM.GlobalsInt8PtrTy->getPointerAddressSpace();
865
866 if (FnAS != GVAS)
867 fnPtr =
868 llvm::ConstantExpr::getAddrSpaceCast(C: fnPtr, Ty: CGM.GlobalsInt8PtrTy);
869 if (const auto &Schema =
870 CGM.getCodeGenOpts().PointerAuth.CXXVirtualFunctionPointers)
871 return builder.addSignedPointer(Pointer: fnPtr, Schema, CalleeDecl: GD, CalleeType: QualType());
872 return builder.add(value: fnPtr);
873 }
874 }
875
876 case VTableComponent::CK_UnusedFunctionPointer:
877 if (useRelativeLayout())
878 return builder.add(value: llvm::ConstantExpr::getNullValue(Ty: CGM.Int32Ty));
879 else
880 return builder.addNullPointer(ptrTy: CGM.GlobalsInt8PtrTy);
881 }
882
883 llvm_unreachable("Unexpected vtable component kind");
884}
885
886llvm::Type *CodeGenVTables::getVTableType(const VTableLayout &layout) {
887 SmallVector<llvm::Type *, 4> tys;
888 llvm::Type *componentType = getVTableComponentType();
889 for (unsigned i = 0, e = layout.getNumVTables(); i != e; ++i)
890 tys.push_back(Elt: llvm::ArrayType::get(ElementType: componentType, NumElements: layout.getVTableSize(i)));
891
892 return llvm::StructType::get(Context&: CGM.getLLVMContext(), Elements: tys);
893}
894
895void CodeGenVTables::createVTableInitializer(ConstantStructBuilder &builder,
896 const VTableLayout &layout,
897 llvm::Constant *rtti,
898 bool vtableHasLocalLinkage) {
899 llvm::Type *componentType = getVTableComponentType();
900
901 const auto &addressPoints = layout.getAddressPointIndices();
902 unsigned nextVTableThunkIndex = 0;
903 for (unsigned vtableIndex = 0, endIndex = layout.getNumVTables();
904 vtableIndex != endIndex; ++vtableIndex) {
905 auto vtableElem = builder.beginArray(eltTy: componentType);
906
907 size_t vtableStart = layout.getVTableOffset(i: vtableIndex);
908 size_t vtableEnd = vtableStart + layout.getVTableSize(i: vtableIndex);
909 for (size_t componentIndex = vtableStart; componentIndex < vtableEnd;
910 ++componentIndex) {
911 addVTableComponent(builder&: vtableElem, layout, componentIndex, rtti,
912 nextVTableThunkIndex, vtableAddressPoint: addressPoints[vtableIndex],
913 vtableHasLocalLinkage);
914 }
915 vtableElem.finishAndAddTo(parent&: builder);
916 }
917}
918
919llvm::GlobalVariable *CodeGenVTables::GenerateConstructionVTable(
920 const CXXRecordDecl *RD, const BaseSubobject &Base, bool BaseIsVirtual,
921 llvm::GlobalVariable::LinkageTypes Linkage,
922 VTableAddressPointsMapTy &AddressPoints) {
923 if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
924 DI->completeClassData(RD: Base.getBase());
925
926 std::unique_ptr<VTableLayout> VTLayout(
927 getItaniumVTableContext().createConstructionVTableLayout(
928 MostDerivedClass: Base.getBase(), MostDerivedClassOffset: Base.getBaseOffset(), MostDerivedClassIsVirtual: BaseIsVirtual, LayoutClass: RD));
929
930 // Add the address points.
931 AddressPoints = VTLayout->getAddressPoints();
932
933 // Get the mangled construction vtable name.
934 SmallString<256> OutName;
935 llvm::raw_svector_ostream Out(OutName);
936 cast<ItaniumMangleContext>(Val&: CGM.getCXXABI().getMangleContext())
937 .mangleCXXCtorVTable(RD, Offset: Base.getBaseOffset().getQuantity(),
938 Type: Base.getBase(), Out);
939 SmallString<256> Name(OutName);
940
941 bool UsingRelativeLayout = getItaniumVTableContext().isRelativeLayout();
942 bool VTableAliasExists =
943 UsingRelativeLayout && CGM.getModule().getNamedAlias(Name);
944 if (VTableAliasExists) {
945 // We previously made the vtable hidden and changed its name.
946 Name.append(RHS: ".local");
947 }
948
949 llvm::Type *VTType = getVTableType(layout: *VTLayout);
950
951 // Construction vtable symbols are not part of the Itanium ABI, so we cannot
952 // guarantee that they actually will be available externally. Instead, when
953 // emitting an available_externally VTT, we provide references to an internal
954 // linkage construction vtable. The ABI only requires complete-object vtables
955 // to be the same for all instances of a type, not construction vtables.
956 if (Linkage == llvm::GlobalVariable::AvailableExternallyLinkage)
957 Linkage = llvm::GlobalVariable::InternalLinkage;
958
959 llvm::Align Align = CGM.getDataLayout().getABITypeAlign(Ty: VTType);
960
961 // Create the variable that will hold the construction vtable.
962 llvm::GlobalVariable *VTable =
963 CGM.CreateOrReplaceCXXRuntimeVariable(Name, Ty: VTType, Linkage, Alignment: Align);
964
965 // V-tables are always unnamed_addr.
966 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
967
968 llvm::Constant *RTTI = CGM.GetAddrOfRTTIDescriptor(
969 Ty: CGM.getContext().getTagDeclType(Decl: Base.getBase()));
970
971 // Create and set the initializer.
972 ConstantInitBuilder builder(CGM);
973 auto components = builder.beginStruct();
974 createVTableInitializer(builder&: components, layout: *VTLayout, rtti: RTTI,
975 vtableHasLocalLinkage: VTable->hasLocalLinkage());
976 components.finishAndSetAsInitializer(global: VTable);
977
978 // Set properties only after the initializer has been set to ensure that the
979 // GV is treated as definition and not declaration.
980 assert(!VTable->isDeclaration() && "Shouldn't set properties on declaration");
981 CGM.setGVProperties(GV: VTable, D: RD);
982
983 CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout: *VTLayout.get());
984
985 if (UsingRelativeLayout) {
986 RemoveHwasanMetadata(GV: VTable);
987 if (!VTable->isDSOLocal())
988 GenerateRelativeVTableAlias(VTable, AliasNameRef: OutName);
989 }
990
991 return VTable;
992}
993
994// Ensure this vtable is not instrumented by hwasan. That is, a global alias is
995// not generated for it. This is mainly used by the relative-vtables ABI where
996// vtables instead contain 32-bit offsets between the vtable and function
997// pointers. Hwasan is disabled for these vtables for now because the tag in a
998// vtable pointer may fail the overflow check when resolving 32-bit PLT
999// relocations. A future alternative for this would be finding which usages of
1000// the vtable can continue to use the untagged hwasan value without any loss of
1001// value in hwasan.
1002void CodeGenVTables::RemoveHwasanMetadata(llvm::GlobalValue *GV) const {
1003 if (CGM.getLangOpts().Sanitize.has(K: SanitizerKind::HWAddress)) {
1004 llvm::GlobalValue::SanitizerMetadata Meta;
1005 if (GV->hasSanitizerMetadata())
1006 Meta = GV->getSanitizerMetadata();
1007 Meta.NoHWAddress = true;
1008 GV->setSanitizerMetadata(Meta);
1009 }
1010}
1011
1012// If the VTable is not dso_local, then we will not be able to indicate that
1013// the VTable does not need a relocation and move into rodata. A frequent
1014// time this can occur is for classes that should be made public from a DSO
1015// (like in libc++). For cases like these, we can make the vtable hidden or
1016// private and create a public alias with the same visibility and linkage as
1017// the original vtable type.
1018void CodeGenVTables::GenerateRelativeVTableAlias(llvm::GlobalVariable *VTable,
1019 llvm::StringRef AliasNameRef) {
1020 assert(getItaniumVTableContext().isRelativeLayout() &&
1021 "Can only use this if the relative vtable ABI is used");
1022 assert(!VTable->isDSOLocal() && "This should be called only if the vtable is "
1023 "not guaranteed to be dso_local");
1024
1025 // If the vtable is available_externally, we shouldn't (or need to) generate
1026 // an alias for it in the first place since the vtable won't actually by
1027 // emitted in this compilation unit.
1028 if (VTable->hasAvailableExternallyLinkage())
1029 return;
1030
1031 // Create a new string in the event the alias is already the name of the
1032 // vtable. Using the reference directly could lead to use of an inititialized
1033 // value in the module's StringMap.
1034 llvm::SmallString<256> AliasName(AliasNameRef);
1035 VTable->setName(AliasName + ".local");
1036
1037 auto Linkage = VTable->getLinkage();
1038 assert(llvm::GlobalAlias::isValidLinkage(Linkage) &&
1039 "Invalid vtable alias linkage");
1040
1041 llvm::GlobalAlias *VTableAlias = CGM.getModule().getNamedAlias(Name: AliasName);
1042 if (!VTableAlias) {
1043 VTableAlias = llvm::GlobalAlias::create(Ty: VTable->getValueType(),
1044 AddressSpace: VTable->getAddressSpace(), Linkage,
1045 Name: AliasName, Parent: &CGM.getModule());
1046 } else {
1047 assert(VTableAlias->getValueType() == VTable->getValueType());
1048 assert(VTableAlias->getLinkage() == Linkage);
1049 }
1050 VTableAlias->setVisibility(VTable->getVisibility());
1051 VTableAlias->setUnnamedAddr(VTable->getUnnamedAddr());
1052
1053 // Both of these imply dso_local for the vtable.
1054 if (!VTable->hasComdat()) {
1055 // If this is in a comdat, then we shouldn't make the linkage private due to
1056 // an issue in lld where private symbols can be used as the key symbol when
1057 // choosing the prevelant group. This leads to "relocation refers to a
1058 // symbol in a discarded section".
1059 VTable->setLinkage(llvm::GlobalValue::PrivateLinkage);
1060 } else {
1061 // We should at least make this hidden since we don't want to expose it.
1062 VTable->setVisibility(llvm::GlobalValue::HiddenVisibility);
1063 }
1064
1065 VTableAlias->setAliasee(VTable);
1066}
1067
1068static bool shouldEmitAvailableExternallyVTable(const CodeGenModule &CGM,
1069 const CXXRecordDecl *RD) {
1070 return CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1071 CGM.getCXXABI().canSpeculativelyEmitVTable(RD);
1072}
1073
1074/// Compute the required linkage of the vtable for the given class.
1075///
1076/// Note that we only call this at the end of the translation unit.
1077llvm::GlobalVariable::LinkageTypes
1078CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
1079 if (!RD->isExternallyVisible())
1080 return llvm::GlobalVariable::InternalLinkage;
1081
1082 // In windows, the linkage of vtable is not related to modules.
1083 bool IsInNamedModule = !getTarget().getCXXABI().isMicrosoft() &&
1084 RD->isInNamedModule();
1085 // If the CXXRecordDecl is not in a module unit, we need to get
1086 // its key function. We're at the end of the translation unit, so the current
1087 // key function is fully correct.
1088 const CXXMethodDecl *keyFunction =
1089 IsInNamedModule ? nullptr : Context.getCurrentKeyFunction(RD);
1090 if (IsInNamedModule || (keyFunction && !RD->hasAttr<DLLImportAttr>())) {
1091 // If this class has a key function, use that to determine the
1092 // linkage of the vtable.
1093 const FunctionDecl *def = nullptr;
1094 if (keyFunction && keyFunction->hasBody(Definition&: def))
1095 keyFunction = cast<CXXMethodDecl>(Val: def);
1096
1097 bool IsExternalDefinition =
1098 IsInNamedModule ? RD->shouldEmitInExternalSource() : !def;
1099
1100 TemplateSpecializationKind Kind =
1101 IsInNamedModule ? RD->getTemplateSpecializationKind()
1102 : keyFunction->getTemplateSpecializationKind();
1103
1104 switch (Kind) {
1105 case TSK_Undeclared:
1106 case TSK_ExplicitSpecialization:
1107 assert(
1108 (IsInNamedModule || def || CodeGenOpts.OptimizationLevel > 0 ||
1109 CodeGenOpts.getDebugInfo() != llvm::codegenoptions::NoDebugInfo) &&
1110 "Shouldn't query vtable linkage without the class in module units, "
1111 "key function, optimizations, or debug info");
1112 if (IsExternalDefinition && CodeGenOpts.OptimizationLevel > 0)
1113 return llvm::GlobalVariable::AvailableExternallyLinkage;
1114
1115 if (keyFunction && keyFunction->isInlined())
1116 return !Context.getLangOpts().AppleKext
1117 ? llvm::GlobalVariable::LinkOnceODRLinkage
1118 : llvm::Function::InternalLinkage;
1119
1120 return llvm::GlobalVariable::ExternalLinkage;
1121
1122 case TSK_ImplicitInstantiation:
1123 return !Context.getLangOpts().AppleKext ?
1124 llvm::GlobalVariable::LinkOnceODRLinkage :
1125 llvm::Function::InternalLinkage;
1126
1127 case TSK_ExplicitInstantiationDefinition:
1128 return !Context.getLangOpts().AppleKext ?
1129 llvm::GlobalVariable::WeakODRLinkage :
1130 llvm::Function::InternalLinkage;
1131
1132 case TSK_ExplicitInstantiationDeclaration:
1133 llvm_unreachable("Should not have been asked to emit this");
1134 }
1135 }
1136
1137 // -fapple-kext mode does not support weak linkage, so we must use
1138 // internal linkage.
1139 if (Context.getLangOpts().AppleKext)
1140 return llvm::Function::InternalLinkage;
1141
1142 llvm::GlobalVariable::LinkageTypes DiscardableODRLinkage =
1143 llvm::GlobalValue::LinkOnceODRLinkage;
1144 llvm::GlobalVariable::LinkageTypes NonDiscardableODRLinkage =
1145 llvm::GlobalValue::WeakODRLinkage;
1146 if (RD->hasAttr<DLLExportAttr>()) {
1147 // Cannot discard exported vtables.
1148 DiscardableODRLinkage = NonDiscardableODRLinkage;
1149 } else if (RD->hasAttr<DLLImportAttr>()) {
1150 // Imported vtables are available externally.
1151 DiscardableODRLinkage = llvm::GlobalVariable::AvailableExternallyLinkage;
1152 NonDiscardableODRLinkage = llvm::GlobalVariable::AvailableExternallyLinkage;
1153 }
1154
1155 switch (RD->getTemplateSpecializationKind()) {
1156 case TSK_Undeclared:
1157 case TSK_ExplicitSpecialization:
1158 case TSK_ImplicitInstantiation:
1159 return DiscardableODRLinkage;
1160
1161 case TSK_ExplicitInstantiationDeclaration:
1162 // Explicit instantiations in MSVC do not provide vtables, so we must emit
1163 // our own.
1164 if (getTarget().getCXXABI().isMicrosoft())
1165 return DiscardableODRLinkage;
1166 return shouldEmitAvailableExternallyVTable(CGM: *this, RD)
1167 ? llvm::GlobalVariable::AvailableExternallyLinkage
1168 : llvm::GlobalVariable::ExternalLinkage;
1169
1170 case TSK_ExplicitInstantiationDefinition:
1171 return NonDiscardableODRLinkage;
1172 }
1173
1174 llvm_unreachable("Invalid TemplateSpecializationKind!");
1175}
1176
1177/// This is a callback from Sema to tell us that a particular vtable is
1178/// required to be emitted in this translation unit.
1179///
1180/// This is only called for vtables that _must_ be emitted (mainly due to key
1181/// functions). For weak vtables, CodeGen tracks when they are needed and
1182/// emits them as-needed.
1183void CodeGenModule::EmitVTable(CXXRecordDecl *theClass) {
1184 VTables.GenerateClassData(RD: theClass);
1185}
1186
1187void
1188CodeGenVTables::GenerateClassData(const CXXRecordDecl *RD) {
1189 if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
1190 DI->completeClassData(RD);
1191
1192 if (RD->getNumVBases())
1193 CGM.getCXXABI().emitVirtualInheritanceTables(RD);
1194
1195 CGM.getCXXABI().emitVTableDefinitions(CGVT&: *this, RD);
1196}
1197
1198/// At this point in the translation unit, does it appear that can we
1199/// rely on the vtable being defined elsewhere in the program?
1200///
1201/// The response is really only definitive when called at the end of
1202/// the translation unit.
1203///
1204/// The only semantic restriction here is that the object file should
1205/// not contain a vtable definition when that vtable is defined
1206/// strongly elsewhere. Otherwise, we'd just like to avoid emitting
1207/// vtables when unnecessary.
1208bool CodeGenVTables::isVTableExternal(const CXXRecordDecl *RD) {
1209 assert(RD->isDynamicClass() && "Non-dynamic classes have no VTable.");
1210
1211 // We always synthesize vtables if they are needed in the MS ABI. MSVC doesn't
1212 // emit them even if there is an explicit template instantiation.
1213 if (CGM.getTarget().getCXXABI().isMicrosoft())
1214 return false;
1215
1216 // If we have an explicit instantiation declaration (and not a
1217 // definition), the vtable is defined elsewhere.
1218 TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind();
1219 if (TSK == TSK_ExplicitInstantiationDeclaration)
1220 return true;
1221
1222 // Otherwise, if the class is an instantiated template, the
1223 // vtable must be defined here.
1224 if (TSK == TSK_ImplicitInstantiation ||
1225 TSK == TSK_ExplicitInstantiationDefinition)
1226 return false;
1227
1228 // Otherwise, if the class is attached to a module, the tables are uniquely
1229 // emitted in the object for the module unit in which it is defined.
1230 if (RD->isInNamedModule())
1231 return RD->shouldEmitInExternalSource();
1232
1233 // Otherwise, if the class doesn't have a key function (possibly
1234 // anymore), the vtable must be defined here.
1235 const CXXMethodDecl *keyFunction = CGM.getContext().getCurrentKeyFunction(RD);
1236 if (!keyFunction)
1237 return false;
1238
1239 // Otherwise, if we don't have a definition of the key function, the
1240 // vtable must be defined somewhere else.
1241 return !keyFunction->hasBody();
1242}
1243
1244/// Given that we're currently at the end of the translation unit, and
1245/// we've emitted a reference to the vtable for this class, should
1246/// we define that vtable?
1247static bool shouldEmitVTableAtEndOfTranslationUnit(CodeGenModule &CGM,
1248 const CXXRecordDecl *RD) {
1249 // If vtable is internal then it has to be done.
1250 if (!CGM.getVTables().isVTableExternal(RD))
1251 return true;
1252
1253 // If it's external then maybe we will need it as available_externally.
1254 return shouldEmitAvailableExternallyVTable(CGM, RD);
1255}
1256
1257/// Given that at some point we emitted a reference to one or more
1258/// vtables, and that we are now at the end of the translation unit,
1259/// decide whether we should emit them.
1260void CodeGenModule::EmitDeferredVTables() {
1261#ifndef NDEBUG
1262 // Remember the size of DeferredVTables, because we're going to assume
1263 // that this entire operation doesn't modify it.
1264 size_t savedSize = DeferredVTables.size();
1265#endif
1266
1267 for (const CXXRecordDecl *RD : DeferredVTables)
1268 if (shouldEmitVTableAtEndOfTranslationUnit(CGM&: *this, RD))
1269 VTables.GenerateClassData(RD);
1270 else if (shouldOpportunisticallyEmitVTables())
1271 OpportunisticVTables.push_back(x: RD);
1272
1273 assert(savedSize == DeferredVTables.size() &&
1274 "deferred extra vtables during vtable emission?");
1275 DeferredVTables.clear();
1276}
1277
1278bool CodeGenModule::AlwaysHasLTOVisibilityPublic(const CXXRecordDecl *RD) {
1279 if (RD->hasAttr<LTOVisibilityPublicAttr>() || RD->hasAttr<UuidAttr>() ||
1280 RD->hasAttr<DLLExportAttr>() || RD->hasAttr<DLLImportAttr>())
1281 return true;
1282
1283 if (!getCodeGenOpts().LTOVisibilityPublicStd)
1284 return false;
1285
1286 const DeclContext *DC = RD;
1287 while (true) {
1288 auto *D = cast<Decl>(Val: DC);
1289 DC = DC->getParent();
1290 if (isa<TranslationUnitDecl>(Val: DC->getRedeclContext())) {
1291 if (auto *ND = dyn_cast<NamespaceDecl>(Val: D))
1292 if (const IdentifierInfo *II = ND->getIdentifier())
1293 if (II->isStr(Str: "std") || II->isStr(Str: "stdext"))
1294 return true;
1295 break;
1296 }
1297 }
1298
1299 return false;
1300}
1301
1302bool CodeGenModule::HasHiddenLTOVisibility(const CXXRecordDecl *RD) {
1303 LinkageInfo LV = RD->getLinkageAndVisibility();
1304 if (!isExternallyVisible(L: LV.getLinkage()))
1305 return true;
1306
1307 if (!getTriple().isOSBinFormatCOFF() &&
1308 LV.getVisibility() != HiddenVisibility)
1309 return false;
1310
1311 return !AlwaysHasLTOVisibilityPublic(RD);
1312}
1313
1314llvm::GlobalObject::VCallVisibility CodeGenModule::GetVCallVisibilityLevel(
1315 const CXXRecordDecl *RD, llvm::DenseSet<const CXXRecordDecl *> &Visited) {
1316 // If we have already visited this RD (which means this is a recursive call
1317 // since the initial call should have an empty Visited set), return the max
1318 // visibility. The recursive calls below compute the min between the result
1319 // of the recursive call and the current TypeVis, so returning the max here
1320 // ensures that it will have no effect on the current TypeVis.
1321 if (!Visited.insert(V: RD).second)
1322 return llvm::GlobalObject::VCallVisibilityTranslationUnit;
1323
1324 LinkageInfo LV = RD->getLinkageAndVisibility();
1325 llvm::GlobalObject::VCallVisibility TypeVis;
1326 if (!isExternallyVisible(L: LV.getLinkage()))
1327 TypeVis = llvm::GlobalObject::VCallVisibilityTranslationUnit;
1328 else if (HasHiddenLTOVisibility(RD))
1329 TypeVis = llvm::GlobalObject::VCallVisibilityLinkageUnit;
1330 else
1331 TypeVis = llvm::GlobalObject::VCallVisibilityPublic;
1332
1333 for (const auto &B : RD->bases())
1334 if (B.getType()->getAsCXXRecordDecl()->isDynamicClass())
1335 TypeVis = std::min(
1336 a: TypeVis,
1337 b: GetVCallVisibilityLevel(RD: B.getType()->getAsCXXRecordDecl(), Visited));
1338
1339 for (const auto &B : RD->vbases())
1340 if (B.getType()->getAsCXXRecordDecl()->isDynamicClass())
1341 TypeVis = std::min(
1342 a: TypeVis,
1343 b: GetVCallVisibilityLevel(RD: B.getType()->getAsCXXRecordDecl(), Visited));
1344
1345 return TypeVis;
1346}
1347
1348void CodeGenModule::EmitVTableTypeMetadata(const CXXRecordDecl *RD,
1349 llvm::GlobalVariable *VTable,
1350 const VTableLayout &VTLayout) {
1351 // Emit type metadata on vtables with LTO or IR instrumentation.
1352 // In IR instrumentation, the type metadata is used to find out vtable
1353 // definitions (for type profiling) among all global variables.
1354 if (!getCodeGenOpts().LTOUnit && !getCodeGenOpts().hasProfileIRInstr())
1355 return;
1356
1357 CharUnits ComponentWidth = GetTargetTypeStoreSize(Ty: getVTableComponentType());
1358
1359 struct AddressPoint {
1360 const CXXRecordDecl *Base;
1361 size_t Offset;
1362 std::string TypeName;
1363 bool operator<(const AddressPoint &RHS) const {
1364 int D = TypeName.compare(str: RHS.TypeName);
1365 return D < 0 || (D == 0 && Offset < RHS.Offset);
1366 }
1367 };
1368 std::vector<AddressPoint> AddressPoints;
1369 for (auto &&AP : VTLayout.getAddressPoints()) {
1370 AddressPoint N{.Base: AP.first.getBase(),
1371 .Offset: VTLayout.getVTableOffset(i: AP.second.VTableIndex) +
1372 AP.second.AddressPointIndex,
1373 .TypeName: {}};
1374 llvm::raw_string_ostream Stream(N.TypeName);
1375 getCXXABI().getMangleContext().mangleCanonicalTypeName(
1376 T: QualType(N.Base->getTypeForDecl(), 0), Stream);
1377 AddressPoints.push_back(x: std::move(N));
1378 }
1379
1380 // Sort the address points for determinism.
1381 llvm::sort(C&: AddressPoints);
1382
1383 ArrayRef<VTableComponent> Comps = VTLayout.vtable_components();
1384 for (auto AP : AddressPoints) {
1385 // Create type metadata for the address point.
1386 AddVTableTypeMetadata(VTable, Offset: ComponentWidth * AP.Offset, RD: AP.Base);
1387
1388 // The class associated with each address point could also potentially be
1389 // used for indirect calls via a member function pointer, so we need to
1390 // annotate the address of each function pointer with the appropriate member
1391 // function pointer type.
1392 for (unsigned I = 0; I != Comps.size(); ++I) {
1393 if (Comps[I].getKind() != VTableComponent::CK_FunctionPointer)
1394 continue;
1395 llvm::Metadata *MD = CreateMetadataIdentifierForVirtualMemPtrType(
1396 T: Context.getMemberPointerType(
1397 T: Comps[I].getFunctionDecl()->getType(),
1398 Cls: Context.getRecordType(Decl: AP.Base).getTypePtr()));
1399 VTable->addTypeMetadata(Offset: (ComponentWidth * I).getQuantity(), TypeID: MD);
1400 }
1401 }
1402
1403 if (getCodeGenOpts().VirtualFunctionElimination ||
1404 getCodeGenOpts().WholeProgramVTables) {
1405 llvm::DenseSet<const CXXRecordDecl *> Visited;
1406 llvm::GlobalObject::VCallVisibility TypeVis =
1407 GetVCallVisibilityLevel(RD, Visited);
1408 if (TypeVis != llvm::GlobalObject::VCallVisibilityPublic)
1409 VTable->setVCallVisibilityMetadata(TypeVis);
1410 }
1411}
1412