1 | //===--- CGCall.cpp - Encapsulate calling convention details --------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // These classes wrap the information about a call or function |
10 | // definition used to handle ABI compliancy. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "CGCall.h" |
15 | #include "ABIInfo.h" |
16 | #include "ABIInfoImpl.h" |
17 | #include "CGBlocks.h" |
18 | #include "CGCXXABI.h" |
19 | #include "CGCleanup.h" |
20 | #include "CGRecordLayout.h" |
21 | #include "CodeGenFunction.h" |
22 | #include "CodeGenModule.h" |
23 | #include "TargetInfo.h" |
24 | #include "clang/AST/Attr.h" |
25 | #include "clang/AST/Decl.h" |
26 | #include "clang/AST/DeclCXX.h" |
27 | #include "clang/AST/DeclObjC.h" |
28 | #include "clang/Basic/CodeGenOptions.h" |
29 | #include "clang/Basic/TargetInfo.h" |
30 | #include "clang/CodeGen/CGFunctionInfo.h" |
31 | #include "clang/CodeGen/SwiftCallingConv.h" |
32 | #include "llvm/ADT/StringExtras.h" |
33 | #include "llvm/Analysis/ValueTracking.h" |
34 | #include "llvm/IR/Assumptions.h" |
35 | #include "llvm/IR/AttributeMask.h" |
36 | #include "llvm/IR/Attributes.h" |
37 | #include "llvm/IR/CallingConv.h" |
38 | #include "llvm/IR/DataLayout.h" |
39 | #include "llvm/IR/InlineAsm.h" |
40 | #include "llvm/IR/IntrinsicInst.h" |
41 | #include "llvm/IR/Intrinsics.h" |
42 | #include "llvm/IR/Type.h" |
43 | #include "llvm/Transforms/Utils/Local.h" |
44 | #include <optional> |
45 | using namespace clang; |
46 | using namespace CodeGen; |
47 | |
48 | /***/ |
49 | |
50 | unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) { |
51 | switch (CC) { |
52 | default: return llvm::CallingConv::C; |
53 | case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; |
54 | case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; |
55 | case CC_X86RegCall: return llvm::CallingConv::X86_RegCall; |
56 | case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; |
57 | case CC_Win64: return llvm::CallingConv::Win64; |
58 | case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; |
59 | case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; |
60 | case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; |
61 | case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; |
62 | // TODO: Add support for __pascal to LLVM. |
63 | case CC_X86Pascal: return llvm::CallingConv::C; |
64 | // TODO: Add support for __vectorcall to LLVM. |
65 | case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; |
66 | case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall; |
67 | case CC_AArch64SVEPCS: return llvm::CallingConv::AArch64_SVE_VectorCall; |
68 | case CC_AMDGPUKernelCall: return llvm::CallingConv::AMDGPU_KERNEL; |
69 | case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; |
70 | case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv(); |
71 | case CC_PreserveMost: return llvm::CallingConv::PreserveMost; |
72 | case CC_PreserveAll: return llvm::CallingConv::PreserveAll; |
73 | case CC_Swift: return llvm::CallingConv::Swift; |
74 | case CC_SwiftAsync: return llvm::CallingConv::SwiftTail; |
75 | case CC_M68kRTD: return llvm::CallingConv::M68k_RTD; |
76 | case CC_PreserveNone: return llvm::CallingConv::PreserveNone; |
77 | // clang-format off |
78 | case CC_RISCVVectorCall: return llvm::CallingConv::RISCV_VectorCall; |
79 | // clang-format on |
80 | } |
81 | } |
82 | |
83 | /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR |
84 | /// qualification. Either or both of RD and MD may be null. A null RD indicates |
85 | /// that there is no meaningful 'this' type, and a null MD can occur when |
86 | /// calling a method pointer. |
87 | CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD, |
88 | const CXXMethodDecl *MD) { |
89 | QualType RecTy; |
90 | if (RD) |
91 | RecTy = Context.getTagDeclType(Decl: RD)->getCanonicalTypeInternal(); |
92 | else |
93 | RecTy = Context.VoidTy; |
94 | |
95 | if (MD) |
96 | RecTy = Context.getAddrSpaceQualType(T: RecTy, AddressSpace: MD->getMethodQualifiers().getAddressSpace()); |
97 | return Context.getPointerType(T: CanQualType::CreateUnsafe(Other: RecTy)); |
98 | } |
99 | |
100 | /// Returns the canonical formal type of the given C++ method. |
101 | static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { |
102 | return MD->getType()->getCanonicalTypeUnqualified() |
103 | .getAs<FunctionProtoType>(); |
104 | } |
105 | |
106 | /// Returns the "extra-canonicalized" return type, which discards |
107 | /// qualifiers on the return type. Codegen doesn't care about them, |
108 | /// and it makes ABI code a little easier to be able to assume that |
109 | /// all parameter and return types are top-level unqualified. |
110 | static CanQualType GetReturnType(QualType RetTy) { |
111 | return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); |
112 | } |
113 | |
114 | /// Arrange the argument and result information for a value of the given |
115 | /// unprototyped freestanding function type. |
116 | const CGFunctionInfo & |
117 | CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { |
118 | // When translating an unprototyped function type, always use a |
119 | // variadic type. |
120 | return arrangeLLVMFunctionInfo(returnType: FTNP->getReturnType().getUnqualifiedType(), |
121 | opts: FnInfoOpts::None, argTypes: std::nullopt, |
122 | info: FTNP->getExtInfo(), paramInfos: {}, args: RequiredArgs(0)); |
123 | } |
124 | |
125 | static void addExtParameterInfosForCall( |
126 | llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, |
127 | const FunctionProtoType *proto, |
128 | unsigned prefixArgs, |
129 | unsigned totalArgs) { |
130 | assert(proto->hasExtParameterInfos()); |
131 | assert(paramInfos.size() <= prefixArgs); |
132 | assert(proto->getNumParams() + prefixArgs <= totalArgs); |
133 | |
134 | paramInfos.reserve(N: totalArgs); |
135 | |
136 | // Add default infos for any prefix args that don't already have infos. |
137 | paramInfos.resize(N: prefixArgs); |
138 | |
139 | // Add infos for the prototype. |
140 | for (const auto &ParamInfo : proto->getExtParameterInfos()) { |
141 | paramInfos.push_back(Elt: ParamInfo); |
142 | // pass_object_size params have no parameter info. |
143 | if (ParamInfo.hasPassObjectSize()) |
144 | paramInfos.emplace_back(); |
145 | } |
146 | |
147 | assert(paramInfos.size() <= totalArgs && |
148 | "Did we forget to insert pass_object_size args?" ); |
149 | // Add default infos for the variadic and/or suffix arguments. |
150 | paramInfos.resize(N: totalArgs); |
151 | } |
152 | |
153 | /// Adds the formal parameters in FPT to the given prefix. If any parameter in |
154 | /// FPT has pass_object_size attrs, then we'll add parameters for those, too. |
155 | static void appendParameterTypes(const CodeGenTypes &CGT, |
156 | SmallVectorImpl<CanQualType> &prefix, |
157 | SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, |
158 | CanQual<FunctionProtoType> FPT) { |
159 | // Fast path: don't touch param info if we don't need to. |
160 | if (!FPT->hasExtParameterInfos()) { |
161 | assert(paramInfos.empty() && |
162 | "We have paramInfos, but the prototype doesn't?" ); |
163 | prefix.append(in_start: FPT->param_type_begin(), in_end: FPT->param_type_end()); |
164 | return; |
165 | } |
166 | |
167 | unsigned PrefixSize = prefix.size(); |
168 | // In the vast majority of cases, we'll have precisely FPT->getNumParams() |
169 | // parameters; the only thing that can change this is the presence of |
170 | // pass_object_size. So, we preallocate for the common case. |
171 | prefix.reserve(N: prefix.size() + FPT->getNumParams()); |
172 | |
173 | auto ExtInfos = FPT->getExtParameterInfos(); |
174 | assert(ExtInfos.size() == FPT->getNumParams()); |
175 | for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { |
176 | prefix.push_back(Elt: FPT->getParamType(i: I)); |
177 | if (ExtInfos[I].hasPassObjectSize()) |
178 | prefix.push_back(Elt: CGT.getContext().getSizeType()); |
179 | } |
180 | |
181 | addExtParameterInfosForCall(paramInfos, proto: FPT.getTypePtr(), prefixArgs: PrefixSize, |
182 | totalArgs: prefix.size()); |
183 | } |
184 | |
185 | /// Arrange the LLVM function layout for a value of the given function |
186 | /// type, on top of any implicit parameters already stored. |
187 | static const CGFunctionInfo & |
188 | arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, |
189 | SmallVectorImpl<CanQualType> &prefix, |
190 | CanQual<FunctionProtoType> FTP) { |
191 | SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; |
192 | RequiredArgs Required = RequiredArgs::forPrototypePlus(prototype: FTP, additional: prefix.size()); |
193 | // FIXME: Kill copy. |
194 | appendParameterTypes(CGT, prefix, paramInfos, FPT: FTP); |
195 | CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); |
196 | |
197 | FnInfoOpts opts = |
198 | instanceMethod ? FnInfoOpts::IsInstanceMethod : FnInfoOpts::None; |
199 | return CGT.arrangeLLVMFunctionInfo(returnType: resultType, opts, argTypes: prefix, |
200 | info: FTP->getExtInfo(), paramInfos, args: Required); |
201 | } |
202 | |
203 | /// Arrange the argument and result information for a value of the |
204 | /// given freestanding function type. |
205 | const CGFunctionInfo & |
206 | CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { |
207 | SmallVector<CanQualType, 16> argTypes; |
208 | return ::arrangeLLVMFunctionInfo(CGT&: *this, /*instanceMethod=*/false, prefix&: argTypes, |
209 | FTP); |
210 | } |
211 | |
212 | static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, |
213 | bool IsWindows) { |
214 | // Set the appropriate calling convention for the Function. |
215 | if (D->hasAttr<StdCallAttr>()) |
216 | return CC_X86StdCall; |
217 | |
218 | if (D->hasAttr<FastCallAttr>()) |
219 | return CC_X86FastCall; |
220 | |
221 | if (D->hasAttr<RegCallAttr>()) |
222 | return CC_X86RegCall; |
223 | |
224 | if (D->hasAttr<ThisCallAttr>()) |
225 | return CC_X86ThisCall; |
226 | |
227 | if (D->hasAttr<VectorCallAttr>()) |
228 | return CC_X86VectorCall; |
229 | |
230 | if (D->hasAttr<PascalAttr>()) |
231 | return CC_X86Pascal; |
232 | |
233 | if (PcsAttr *PCS = D->getAttr<PcsAttr>()) |
234 | return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); |
235 | |
236 | if (D->hasAttr<AArch64VectorPcsAttr>()) |
237 | return CC_AArch64VectorCall; |
238 | |
239 | if (D->hasAttr<AArch64SVEPcsAttr>()) |
240 | return CC_AArch64SVEPCS; |
241 | |
242 | if (D->hasAttr<AMDGPUKernelCallAttr>()) |
243 | return CC_AMDGPUKernelCall; |
244 | |
245 | if (D->hasAttr<IntelOclBiccAttr>()) |
246 | return CC_IntelOclBicc; |
247 | |
248 | if (D->hasAttr<MSABIAttr>()) |
249 | return IsWindows ? CC_C : CC_Win64; |
250 | |
251 | if (D->hasAttr<SysVABIAttr>()) |
252 | return IsWindows ? CC_X86_64SysV : CC_C; |
253 | |
254 | if (D->hasAttr<PreserveMostAttr>()) |
255 | return CC_PreserveMost; |
256 | |
257 | if (D->hasAttr<PreserveAllAttr>()) |
258 | return CC_PreserveAll; |
259 | |
260 | if (D->hasAttr<M68kRTDAttr>()) |
261 | return CC_M68kRTD; |
262 | |
263 | if (D->hasAttr<PreserveNoneAttr>()) |
264 | return CC_PreserveNone; |
265 | |
266 | if (D->hasAttr<RISCVVectorCCAttr>()) |
267 | return CC_RISCVVectorCall; |
268 | |
269 | return CC_C; |
270 | } |
271 | |
272 | /// Arrange the argument and result information for a call to an |
273 | /// unknown C++ non-static member function of the given abstract type. |
274 | /// (A null RD means we don't have any meaningful "this" argument type, |
275 | /// so fall back to a generic pointer type). |
276 | /// The member function must be an ordinary function, i.e. not a |
277 | /// constructor or destructor. |
278 | const CGFunctionInfo & |
279 | CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, |
280 | const FunctionProtoType *FTP, |
281 | const CXXMethodDecl *MD) { |
282 | SmallVector<CanQualType, 16> argTypes; |
283 | |
284 | // Add the 'this' pointer. |
285 | argTypes.push_back(Elt: DeriveThisType(RD, MD)); |
286 | |
287 | return ::arrangeLLVMFunctionInfo( |
288 | CGT&: *this, /*instanceMethod=*/true, prefix&: argTypes, |
289 | FTP: FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); |
290 | } |
291 | |
292 | /// Set calling convention for CUDA/HIP kernel. |
293 | static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, |
294 | const FunctionDecl *FD) { |
295 | if (FD->hasAttr<CUDAGlobalAttr>()) { |
296 | const FunctionType *FT = FTy->getAs<FunctionType>(); |
297 | CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT); |
298 | FTy = FT->getCanonicalTypeUnqualified(); |
299 | } |
300 | } |
301 | |
302 | /// Arrange the argument and result information for a declaration or |
303 | /// definition of the given C++ non-static member function. The |
304 | /// member function must be an ordinary function, i.e. not a |
305 | /// constructor or destructor. |
306 | const CGFunctionInfo & |
307 | CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { |
308 | assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!" ); |
309 | assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!" ); |
310 | |
311 | CanQualType FT = GetFormalType(MD).getAs<Type>(); |
312 | setCUDAKernelCallingConvention(FTy&: FT, CGM, FD: MD); |
313 | auto prototype = FT.getAs<FunctionProtoType>(); |
314 | |
315 | if (MD->isImplicitObjectMemberFunction()) { |
316 | // The abstract case is perfectly fine. |
317 | const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(GD: MD); |
318 | return arrangeCXXMethodType(RD: ThisType, FTP: prototype.getTypePtr(), MD); |
319 | } |
320 | |
321 | return arrangeFreeFunctionType(FTP: prototype); |
322 | } |
323 | |
324 | bool CodeGenTypes::inheritingCtorHasParams( |
325 | const InheritedConstructor &Inherited, CXXCtorType Type) { |
326 | // Parameters are unnecessary if we're constructing a base class subobject |
327 | // and the inherited constructor lives in a virtual base. |
328 | return Type == Ctor_Complete || |
329 | !Inherited.getShadowDecl()->constructsVirtualBase() || |
330 | !Target.getCXXABI().hasConstructorVariants(); |
331 | } |
332 | |
333 | const CGFunctionInfo & |
334 | CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) { |
335 | auto *MD = cast<CXXMethodDecl>(Val: GD.getDecl()); |
336 | |
337 | SmallVector<CanQualType, 16> argTypes; |
338 | SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; |
339 | |
340 | const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(GD); |
341 | argTypes.push_back(Elt: DeriveThisType(RD: ThisType, MD)); |
342 | |
343 | bool PassParams = true; |
344 | |
345 | if (auto *CD = dyn_cast<CXXConstructorDecl>(Val: MD)) { |
346 | // A base class inheriting constructor doesn't get forwarded arguments |
347 | // needed to construct a virtual base (or base class thereof). |
348 | if (auto Inherited = CD->getInheritedConstructor()) |
349 | PassParams = inheritingCtorHasParams(Inherited, Type: GD.getCtorType()); |
350 | } |
351 | |
352 | CanQual<FunctionProtoType> FTP = GetFormalType(MD); |
353 | |
354 | // Add the formal parameters. |
355 | if (PassParams) |
356 | appendParameterTypes(CGT: *this, prefix&: argTypes, paramInfos, FPT: FTP); |
357 | |
358 | CGCXXABI::AddedStructorArgCounts AddedArgs = |
359 | TheCXXABI.buildStructorSignature(GD, ArgTys&: argTypes); |
360 | if (!paramInfos.empty()) { |
361 | // Note: prefix implies after the first param. |
362 | if (AddedArgs.Prefix) |
363 | paramInfos.insert(I: paramInfos.begin() + 1, NumToInsert: AddedArgs.Prefix, |
364 | Elt: FunctionProtoType::ExtParameterInfo{}); |
365 | if (AddedArgs.Suffix) |
366 | paramInfos.append(NumInputs: AddedArgs.Suffix, |
367 | Elt: FunctionProtoType::ExtParameterInfo{}); |
368 | } |
369 | |
370 | RequiredArgs required = |
371 | (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size()) |
372 | : RequiredArgs::All); |
373 | |
374 | FunctionType::ExtInfo extInfo = FTP->getExtInfo(); |
375 | CanQualType resultType = TheCXXABI.HasThisReturn(GD) |
376 | ? argTypes.front() |
377 | : TheCXXABI.hasMostDerivedReturn(GD) |
378 | ? CGM.getContext().VoidPtrTy |
379 | : Context.VoidTy; |
380 | return arrangeLLVMFunctionInfo(returnType: resultType, opts: FnInfoOpts::IsInstanceMethod, |
381 | argTypes, info: extInfo, paramInfos, args: required); |
382 | } |
383 | |
384 | static SmallVector<CanQualType, 16> |
385 | getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { |
386 | SmallVector<CanQualType, 16> argTypes; |
387 | for (auto &arg : args) |
388 | argTypes.push_back(Elt: ctx.getCanonicalParamType(T: arg.Ty)); |
389 | return argTypes; |
390 | } |
391 | |
392 | static SmallVector<CanQualType, 16> |
393 | getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) { |
394 | SmallVector<CanQualType, 16> argTypes; |
395 | for (auto &arg : args) |
396 | argTypes.push_back(Elt: ctx.getCanonicalParamType(T: arg->getType())); |
397 | return argTypes; |
398 | } |
399 | |
400 | static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> |
401 | getExtParameterInfosForCall(const FunctionProtoType *proto, |
402 | unsigned prefixArgs, unsigned totalArgs) { |
403 | llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result; |
404 | if (proto->hasExtParameterInfos()) { |
405 | addExtParameterInfosForCall(paramInfos&: result, proto, prefixArgs, totalArgs); |
406 | } |
407 | return result; |
408 | } |
409 | |
410 | /// Arrange a call to a C++ method, passing the given arguments. |
411 | /// |
412 | /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this` |
413 | /// parameter. |
414 | /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of |
415 | /// args. |
416 | /// PassProtoArgs indicates whether `args` has args for the parameters in the |
417 | /// given CXXConstructorDecl. |
418 | const CGFunctionInfo & |
419 | CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, |
420 | const CXXConstructorDecl *D, |
421 | CXXCtorType CtorKind, |
422 | unsigned , |
423 | unsigned , |
424 | bool PassProtoArgs) { |
425 | // FIXME: Kill copy. |
426 | SmallVector<CanQualType, 16> ArgTypes; |
427 | for (const auto &Arg : args) |
428 | ArgTypes.push_back(Elt: Context.getCanonicalParamType(T: Arg.Ty)); |
429 | |
430 | // +1 for implicit this, which should always be args[0]. |
431 | unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs; |
432 | |
433 | CanQual<FunctionProtoType> FPT = GetFormalType(MD: D); |
434 | RequiredArgs Required = PassProtoArgs |
435 | ? RequiredArgs::forPrototypePlus( |
436 | prototype: FPT, additional: TotalPrefixArgs + ExtraSuffixArgs) |
437 | : RequiredArgs::All; |
438 | |
439 | GlobalDecl GD(D, CtorKind); |
440 | CanQualType ResultType = TheCXXABI.HasThisReturn(GD) |
441 | ? ArgTypes.front() |
442 | : TheCXXABI.hasMostDerivedReturn(GD) |
443 | ? CGM.getContext().VoidPtrTy |
444 | : Context.VoidTy; |
445 | |
446 | FunctionType::ExtInfo Info = FPT->getExtInfo(); |
447 | llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos; |
448 | // If the prototype args are elided, we should only have ABI-specific args, |
449 | // which never have param info. |
450 | if (PassProtoArgs && FPT->hasExtParameterInfos()) { |
451 | // ABI-specific suffix arguments are treated the same as variadic arguments. |
452 | addExtParameterInfosForCall(paramInfos&: ParamInfos, proto: FPT.getTypePtr(), prefixArgs: TotalPrefixArgs, |
453 | totalArgs: ArgTypes.size()); |
454 | } |
455 | |
456 | return arrangeLLVMFunctionInfo(returnType: ResultType, opts: FnInfoOpts::IsInstanceMethod, |
457 | argTypes: ArgTypes, info: Info, paramInfos: ParamInfos, args: Required); |
458 | } |
459 | |
460 | /// Arrange the argument and result information for the declaration or |
461 | /// definition of the given function. |
462 | const CGFunctionInfo & |
463 | CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { |
464 | if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: FD)) |
465 | if (MD->isImplicitObjectMemberFunction()) |
466 | return arrangeCXXMethodDeclaration(MD); |
467 | |
468 | CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); |
469 | |
470 | assert(isa<FunctionType>(FTy)); |
471 | setCUDAKernelCallingConvention(FTy, CGM, FD); |
472 | |
473 | // When declaring a function without a prototype, always use a |
474 | // non-variadic type. |
475 | if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) { |
476 | return arrangeLLVMFunctionInfo(returnType: noProto->getReturnType(), opts: FnInfoOpts::None, |
477 | argTypes: std::nullopt, info: noProto->getExtInfo(), paramInfos: {}, |
478 | args: RequiredArgs::All); |
479 | } |
480 | |
481 | return arrangeFreeFunctionType(FTP: FTy.castAs<FunctionProtoType>()); |
482 | } |
483 | |
484 | /// Arrange the argument and result information for the declaration or |
485 | /// definition of an Objective-C method. |
486 | const CGFunctionInfo & |
487 | CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { |
488 | // It happens that this is the same as a call with no optional |
489 | // arguments, except also using the formal 'self' type. |
490 | return arrangeObjCMessageSendSignature(MD, receiverType: MD->getSelfDecl()->getType()); |
491 | } |
492 | |
493 | /// Arrange the argument and result information for the function type |
494 | /// through which to perform a send to the given Objective-C method, |
495 | /// using the given receiver type. The receiver type is not always |
496 | /// the 'self' type of the method or even an Objective-C pointer type. |
497 | /// This is *not* the right method for actually performing such a |
498 | /// message send, due to the possibility of optional arguments. |
499 | const CGFunctionInfo & |
500 | CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, |
501 | QualType receiverType) { |
502 | SmallVector<CanQualType, 16> argTys; |
503 | SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos( |
504 | MD->isDirectMethod() ? 1 : 2); |
505 | argTys.push_back(Elt: Context.getCanonicalParamType(T: receiverType)); |
506 | if (!MD->isDirectMethod()) |
507 | argTys.push_back(Elt: Context.getCanonicalParamType(T: Context.getObjCSelType())); |
508 | // FIXME: Kill copy? |
509 | for (const auto *I : MD->parameters()) { |
510 | argTys.push_back(Elt: Context.getCanonicalParamType(T: I->getType())); |
511 | auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape( |
512 | NoEscape: I->hasAttr<NoEscapeAttr>()); |
513 | extParamInfos.push_back(Elt: extParamInfo); |
514 | } |
515 | |
516 | FunctionType::ExtInfo einfo; |
517 | bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); |
518 | einfo = einfo.withCallingConv(cc: getCallingConventionForDecl(D: MD, IsWindows)); |
519 | |
520 | if (getContext().getLangOpts().ObjCAutoRefCount && |
521 | MD->hasAttr<NSReturnsRetainedAttr>()) |
522 | einfo = einfo.withProducesResult(producesResult: true); |
523 | |
524 | RequiredArgs required = |
525 | (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); |
526 | |
527 | return arrangeLLVMFunctionInfo(returnType: GetReturnType(RetTy: MD->getReturnType()), |
528 | opts: FnInfoOpts::None, argTypes: argTys, info: einfo, paramInfos: extParamInfos, |
529 | args: required); |
530 | } |
531 | |
532 | const CGFunctionInfo & |
533 | CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType, |
534 | const CallArgList &args) { |
535 | auto argTypes = getArgTypesForCall(ctx&: Context, args); |
536 | FunctionType::ExtInfo einfo; |
537 | |
538 | return arrangeLLVMFunctionInfo(returnType: GetReturnType(RetTy: returnType), opts: FnInfoOpts::None, |
539 | argTypes, info: einfo, paramInfos: {}, args: RequiredArgs::All); |
540 | } |
541 | |
542 | const CGFunctionInfo & |
543 | CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { |
544 | // FIXME: Do we need to handle ObjCMethodDecl? |
545 | const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
546 | |
547 | if (isa<CXXConstructorDecl>(Val: GD.getDecl()) || |
548 | isa<CXXDestructorDecl>(Val: GD.getDecl())) |
549 | return arrangeCXXStructorDeclaration(GD); |
550 | |
551 | return arrangeFunctionDeclaration(FD); |
552 | } |
553 | |
554 | /// Arrange a thunk that takes 'this' as the first parameter followed by |
555 | /// varargs. Return a void pointer, regardless of the actual return type. |
556 | /// The body of the thunk will end in a musttail call to a function of the |
557 | /// correct type, and the caller will bitcast the function to the correct |
558 | /// prototype. |
559 | const CGFunctionInfo & |
560 | CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) { |
561 | assert(MD->isVirtual() && "only methods have thunks" ); |
562 | CanQual<FunctionProtoType> FTP = GetFormalType(MD); |
563 | CanQualType ArgTys[] = {DeriveThisType(RD: MD->getParent(), MD)}; |
564 | return arrangeLLVMFunctionInfo(returnType: Context.VoidTy, opts: FnInfoOpts::None, argTypes: ArgTys, |
565 | info: FTP->getExtInfo(), paramInfos: {}, args: RequiredArgs(1)); |
566 | } |
567 | |
568 | const CGFunctionInfo & |
569 | CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, |
570 | CXXCtorType CT) { |
571 | assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); |
572 | |
573 | CanQual<FunctionProtoType> FTP = GetFormalType(MD: CD); |
574 | SmallVector<CanQualType, 2> ArgTys; |
575 | const CXXRecordDecl *RD = CD->getParent(); |
576 | ArgTys.push_back(Elt: DeriveThisType(RD, MD: CD)); |
577 | if (CT == Ctor_CopyingClosure) |
578 | ArgTys.push_back(Elt: *FTP->param_type_begin()); |
579 | if (RD->getNumVBases() > 0) |
580 | ArgTys.push_back(Elt: Context.IntTy); |
581 | CallingConv CC = Context.getDefaultCallingConvention( |
582 | /*IsVariadic=*/false, /*IsCXXMethod=*/true); |
583 | return arrangeLLVMFunctionInfo(returnType: Context.VoidTy, opts: FnInfoOpts::IsInstanceMethod, |
584 | argTypes: ArgTys, info: FunctionType::ExtInfo(CC), paramInfos: {}, |
585 | args: RequiredArgs::All); |
586 | } |
587 | |
588 | /// Arrange a call as unto a free function, except possibly with an |
589 | /// additional number of formal parameters considered required. |
590 | static const CGFunctionInfo & |
591 | arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, |
592 | CodeGenModule &CGM, |
593 | const CallArgList &args, |
594 | const FunctionType *fnType, |
595 | unsigned , |
596 | bool chainCall) { |
597 | assert(args.size() >= numExtraRequiredArgs); |
598 | |
599 | llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; |
600 | |
601 | // In most cases, there are no optional arguments. |
602 | RequiredArgs required = RequiredArgs::All; |
603 | |
604 | // If we have a variadic prototype, the required arguments are the |
605 | // extra prefix plus the arguments in the prototype. |
606 | if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(Val: fnType)) { |
607 | if (proto->isVariadic()) |
608 | required = RequiredArgs::forPrototypePlus(prototype: proto, additional: numExtraRequiredArgs); |
609 | |
610 | if (proto->hasExtParameterInfos()) |
611 | addExtParameterInfosForCall(paramInfos, proto, prefixArgs: numExtraRequiredArgs, |
612 | totalArgs: args.size()); |
613 | |
614 | // If we don't have a prototype at all, but we're supposed to |
615 | // explicitly use the variadic convention for unprototyped calls, |
616 | // treat all of the arguments as required but preserve the nominal |
617 | // possibility of variadics. |
618 | } else if (CGM.getTargetCodeGenInfo() |
619 | .isNoProtoCallVariadic(args, |
620 | fnType: cast<FunctionNoProtoType>(Val: fnType))) { |
621 | required = RequiredArgs(args.size()); |
622 | } |
623 | |
624 | // FIXME: Kill copy. |
625 | SmallVector<CanQualType, 16> argTypes; |
626 | for (const auto &arg : args) |
627 | argTypes.push_back(Elt: CGT.getContext().getCanonicalParamType(T: arg.Ty)); |
628 | FnInfoOpts opts = chainCall ? FnInfoOpts::IsChainCall : FnInfoOpts::None; |
629 | return CGT.arrangeLLVMFunctionInfo(returnType: GetReturnType(RetTy: fnType->getReturnType()), |
630 | opts, argTypes, info: fnType->getExtInfo(), |
631 | paramInfos, args: required); |
632 | } |
633 | |
634 | /// Figure out the rules for calling a function with the given formal |
635 | /// type using the given arguments. The arguments are necessary |
636 | /// because the function might be unprototyped, in which case it's |
637 | /// target-dependent in crazy ways. |
638 | const CGFunctionInfo & |
639 | CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, |
640 | const FunctionType *fnType, |
641 | bool chainCall) { |
642 | return arrangeFreeFunctionLikeCall(CGT&: *this, CGM, args, fnType, |
643 | numExtraRequiredArgs: chainCall ? 1 : 0, chainCall); |
644 | } |
645 | |
646 | /// A block function is essentially a free function with an |
647 | /// extra implicit argument. |
648 | const CGFunctionInfo & |
649 | CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, |
650 | const FunctionType *fnType) { |
651 | return arrangeFreeFunctionLikeCall(CGT&: *this, CGM, args, fnType, numExtraRequiredArgs: 1, |
652 | /*chainCall=*/false); |
653 | } |
654 | |
655 | const CGFunctionInfo & |
656 | CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto, |
657 | const FunctionArgList ¶ms) { |
658 | auto paramInfos = getExtParameterInfosForCall(proto, prefixArgs: 1, totalArgs: params.size()); |
659 | auto argTypes = getArgTypesForDeclaration(ctx&: Context, args: params); |
660 | |
661 | return arrangeLLVMFunctionInfo(returnType: GetReturnType(RetTy: proto->getReturnType()), |
662 | opts: FnInfoOpts::None, argTypes, |
663 | info: proto->getExtInfo(), paramInfos, |
664 | args: RequiredArgs::forPrototypePlus(prototype: proto, additional: 1)); |
665 | } |
666 | |
667 | const CGFunctionInfo & |
668 | CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType, |
669 | const CallArgList &args) { |
670 | // FIXME: Kill copy. |
671 | SmallVector<CanQualType, 16> argTypes; |
672 | for (const auto &Arg : args) |
673 | argTypes.push_back(Elt: Context.getCanonicalParamType(T: Arg.Ty)); |
674 | return arrangeLLVMFunctionInfo(returnType: GetReturnType(RetTy: resultType), opts: FnInfoOpts::None, |
675 | argTypes, info: FunctionType::ExtInfo(), |
676 | /*paramInfos=*/{}, args: RequiredArgs::All); |
677 | } |
678 | |
679 | const CGFunctionInfo & |
680 | CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType, |
681 | const FunctionArgList &args) { |
682 | auto argTypes = getArgTypesForDeclaration(ctx&: Context, args); |
683 | |
684 | return arrangeLLVMFunctionInfo(returnType: GetReturnType(RetTy: resultType), opts: FnInfoOpts::None, |
685 | argTypes, info: FunctionType::ExtInfo(), paramInfos: {}, |
686 | args: RequiredArgs::All); |
687 | } |
688 | |
689 | const CGFunctionInfo & |
690 | CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType, |
691 | ArrayRef<CanQualType> argTypes) { |
692 | return arrangeLLVMFunctionInfo(returnType: resultType, opts: FnInfoOpts::None, argTypes, |
693 | info: FunctionType::ExtInfo(), paramInfos: {}, |
694 | args: RequiredArgs::All); |
695 | } |
696 | |
697 | /// Arrange a call to a C++ method, passing the given arguments. |
698 | /// |
699 | /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It |
700 | /// does not count `this`. |
701 | const CGFunctionInfo & |
702 | CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, |
703 | const FunctionProtoType *proto, |
704 | RequiredArgs required, |
705 | unsigned numPrefixArgs) { |
706 | assert(numPrefixArgs + 1 <= args.size() && |
707 | "Emitting a call with less args than the required prefix?" ); |
708 | // Add one to account for `this`. It's a bit awkward here, but we don't count |
709 | // `this` in similar places elsewhere. |
710 | auto paramInfos = |
711 | getExtParameterInfosForCall(proto, prefixArgs: numPrefixArgs + 1, totalArgs: args.size()); |
712 | |
713 | // FIXME: Kill copy. |
714 | auto argTypes = getArgTypesForCall(ctx&: Context, args); |
715 | |
716 | FunctionType::ExtInfo info = proto->getExtInfo(); |
717 | return arrangeLLVMFunctionInfo(returnType: GetReturnType(RetTy: proto->getReturnType()), |
718 | opts: FnInfoOpts::IsInstanceMethod, argTypes, info, |
719 | paramInfos, args: required); |
720 | } |
721 | |
722 | const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { |
723 | return arrangeLLVMFunctionInfo(returnType: getContext().VoidTy, opts: FnInfoOpts::None, |
724 | argTypes: std::nullopt, info: FunctionType::ExtInfo(), paramInfos: {}, |
725 | args: RequiredArgs::All); |
726 | } |
727 | |
728 | const CGFunctionInfo & |
729 | CodeGenTypes::arrangeCall(const CGFunctionInfo &signature, |
730 | const CallArgList &args) { |
731 | assert(signature.arg_size() <= args.size()); |
732 | if (signature.arg_size() == args.size()) |
733 | return signature; |
734 | |
735 | SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; |
736 | auto sigParamInfos = signature.getExtParameterInfos(); |
737 | if (!sigParamInfos.empty()) { |
738 | paramInfos.append(in_start: sigParamInfos.begin(), in_end: sigParamInfos.end()); |
739 | paramInfos.resize(N: args.size()); |
740 | } |
741 | |
742 | auto argTypes = getArgTypesForCall(ctx&: Context, args); |
743 | |
744 | assert(signature.getRequiredArgs().allowsOptionalArgs()); |
745 | FnInfoOpts opts = FnInfoOpts::None; |
746 | if (signature.isInstanceMethod()) |
747 | opts |= FnInfoOpts::IsInstanceMethod; |
748 | if (signature.isChainCall()) |
749 | opts |= FnInfoOpts::IsChainCall; |
750 | if (signature.isDelegateCall()) |
751 | opts |= FnInfoOpts::IsDelegateCall; |
752 | return arrangeLLVMFunctionInfo(returnType: signature.getReturnType(), opts, argTypes, |
753 | info: signature.getExtInfo(), paramInfos, |
754 | args: signature.getRequiredArgs()); |
755 | } |
756 | |
757 | namespace clang { |
758 | namespace CodeGen { |
759 | void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI); |
760 | } |
761 | } |
762 | |
763 | /// Arrange the argument and result information for an abstract value |
764 | /// of a given function type. This is the method which all of the |
765 | /// above functions ultimately defer to. |
766 | const CGFunctionInfo &CodeGenTypes::arrangeLLVMFunctionInfo( |
767 | CanQualType resultType, FnInfoOpts opts, ArrayRef<CanQualType> argTypes, |
768 | FunctionType::ExtInfo info, |
769 | ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos, |
770 | RequiredArgs required) { |
771 | assert(llvm::all_of(argTypes, |
772 | [](CanQualType T) { return T.isCanonicalAsParam(); })); |
773 | |
774 | // Lookup or create unique function info. |
775 | llvm::FoldingSetNodeID ID; |
776 | bool isInstanceMethod = |
777 | (opts & FnInfoOpts::IsInstanceMethod) == FnInfoOpts::IsInstanceMethod; |
778 | bool isChainCall = |
779 | (opts & FnInfoOpts::IsChainCall) == FnInfoOpts::IsChainCall; |
780 | bool isDelegateCall = |
781 | (opts & FnInfoOpts::IsDelegateCall) == FnInfoOpts::IsDelegateCall; |
782 | CGFunctionInfo::Profile(ID, InstanceMethod: isInstanceMethod, ChainCall: isChainCall, IsDelegateCall: isDelegateCall, |
783 | info, paramInfos, required, resultType, argTypes); |
784 | |
785 | void *insertPos = nullptr; |
786 | CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos&: insertPos); |
787 | if (FI) |
788 | return *FI; |
789 | |
790 | unsigned CC = ClangCallConvToLLVMCallConv(CC: info.getCC()); |
791 | |
792 | // Construct the function info. We co-allocate the ArgInfos. |
793 | FI = CGFunctionInfo::create(llvmCC: CC, instanceMethod: isInstanceMethod, chainCall: isChainCall, delegateCall: isDelegateCall, |
794 | extInfo: info, paramInfos, resultType, argTypes, required); |
795 | FunctionInfos.InsertNode(N: FI, InsertPos: insertPos); |
796 | |
797 | bool inserted = FunctionsBeingProcessed.insert(Ptr: FI).second; |
798 | (void)inserted; |
799 | assert(inserted && "Recursively being processed?" ); |
800 | |
801 | // Compute ABI information. |
802 | if (CC == llvm::CallingConv::SPIR_KERNEL) { |
803 | // Force target independent argument handling for the host visible |
804 | // kernel functions. |
805 | computeSPIRKernelABIInfo(CGM, FI&: *FI); |
806 | } else if (info.getCC() == CC_Swift || info.getCC() == CC_SwiftAsync) { |
807 | swiftcall::computeABIInfo(CGM, FI&: *FI); |
808 | } else { |
809 | getABIInfo().computeInfo(FI&: *FI); |
810 | } |
811 | |
812 | // Loop over all of the computed argument and return value info. If any of |
813 | // them are direct or extend without a specified coerce type, specify the |
814 | // default now. |
815 | ABIArgInfo &retInfo = FI->getReturnInfo(); |
816 | if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) |
817 | retInfo.setCoerceToType(ConvertType(T: FI->getReturnType())); |
818 | |
819 | for (auto &I : FI->arguments()) |
820 | if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) |
821 | I.info.setCoerceToType(ConvertType(T: I.type)); |
822 | |
823 | bool erased = FunctionsBeingProcessed.erase(Ptr: FI); (void)erased; |
824 | assert(erased && "Not in set?" ); |
825 | |
826 | return *FI; |
827 | } |
828 | |
829 | CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, bool instanceMethod, |
830 | bool chainCall, bool delegateCall, |
831 | const FunctionType::ExtInfo &info, |
832 | ArrayRef<ExtParameterInfo> paramInfos, |
833 | CanQualType resultType, |
834 | ArrayRef<CanQualType> argTypes, |
835 | RequiredArgs required) { |
836 | assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); |
837 | assert(!required.allowsOptionalArgs() || |
838 | required.getNumRequiredArgs() <= argTypes.size()); |
839 | |
840 | void *buffer = |
841 | operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>( |
842 | Counts: argTypes.size() + 1, Counts: paramInfos.size())); |
843 | |
844 | CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); |
845 | FI->CallingConvention = llvmCC; |
846 | FI->EffectiveCallingConvention = llvmCC; |
847 | FI->ASTCallingConvention = info.getCC(); |
848 | FI->InstanceMethod = instanceMethod; |
849 | FI->ChainCall = chainCall; |
850 | FI->DelegateCall = delegateCall; |
851 | FI->CmseNSCall = info.getCmseNSCall(); |
852 | FI->NoReturn = info.getNoReturn(); |
853 | FI->ReturnsRetained = info.getProducesResult(); |
854 | FI->NoCallerSavedRegs = info.getNoCallerSavedRegs(); |
855 | FI->NoCfCheck = info.getNoCfCheck(); |
856 | FI->Required = required; |
857 | FI->HasRegParm = info.getHasRegParm(); |
858 | FI->RegParm = info.getRegParm(); |
859 | FI->ArgStruct = nullptr; |
860 | FI->ArgStructAlign = 0; |
861 | FI->NumArgs = argTypes.size(); |
862 | FI->HasExtParameterInfos = !paramInfos.empty(); |
863 | FI->getArgsBuffer()[0].type = resultType; |
864 | FI->MaxVectorWidth = 0; |
865 | for (unsigned i = 0, e = argTypes.size(); i != e; ++i) |
866 | FI->getArgsBuffer()[i + 1].type = argTypes[i]; |
867 | for (unsigned i = 0, e = paramInfos.size(); i != e; ++i) |
868 | FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; |
869 | return FI; |
870 | } |
871 | |
872 | /***/ |
873 | |
874 | namespace { |
875 | // ABIArgInfo::Expand implementation. |
876 | |
877 | // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. |
878 | struct TypeExpansion { |
879 | enum TypeExpansionKind { |
880 | // Elements of constant arrays are expanded recursively. |
881 | TEK_ConstantArray, |
882 | // Record fields are expanded recursively (but if record is a union, only |
883 | // the field with the largest size is expanded). |
884 | TEK_Record, |
885 | // For complex types, real and imaginary parts are expanded recursively. |
886 | TEK_Complex, |
887 | // All other types are not expandable. |
888 | TEK_None |
889 | }; |
890 | |
891 | const TypeExpansionKind Kind; |
892 | |
893 | TypeExpansion(TypeExpansionKind K) : Kind(K) {} |
894 | virtual ~TypeExpansion() {} |
895 | }; |
896 | |
897 | struct ConstantArrayExpansion : TypeExpansion { |
898 | QualType EltTy; |
899 | uint64_t NumElts; |
900 | |
901 | ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) |
902 | : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} |
903 | static bool classof(const TypeExpansion *TE) { |
904 | return TE->Kind == TEK_ConstantArray; |
905 | } |
906 | }; |
907 | |
908 | struct RecordExpansion : TypeExpansion { |
909 | SmallVector<const CXXBaseSpecifier *, 1> Bases; |
910 | |
911 | SmallVector<const FieldDecl *, 1> Fields; |
912 | |
913 | RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, |
914 | SmallVector<const FieldDecl *, 1> &&Fields) |
915 | : TypeExpansion(TEK_Record), Bases(std::move(Bases)), |
916 | Fields(std::move(Fields)) {} |
917 | static bool classof(const TypeExpansion *TE) { |
918 | return TE->Kind == TEK_Record; |
919 | } |
920 | }; |
921 | |
922 | struct ComplexExpansion : TypeExpansion { |
923 | QualType EltTy; |
924 | |
925 | ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} |
926 | static bool classof(const TypeExpansion *TE) { |
927 | return TE->Kind == TEK_Complex; |
928 | } |
929 | }; |
930 | |
931 | struct NoExpansion : TypeExpansion { |
932 | NoExpansion() : TypeExpansion(TEK_None) {} |
933 | static bool classof(const TypeExpansion *TE) { |
934 | return TE->Kind == TEK_None; |
935 | } |
936 | }; |
937 | } // namespace |
938 | |
939 | static std::unique_ptr<TypeExpansion> |
940 | getTypeExpansion(QualType Ty, const ASTContext &Context) { |
941 | if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T: Ty)) { |
942 | return std::make_unique<ConstantArrayExpansion>(args: AT->getElementType(), |
943 | args: AT->getZExtSize()); |
944 | } |
945 | if (const RecordType *RT = Ty->getAs<RecordType>()) { |
946 | SmallVector<const CXXBaseSpecifier *, 1> Bases; |
947 | SmallVector<const FieldDecl *, 1> Fields; |
948 | const RecordDecl *RD = RT->getDecl(); |
949 | assert(!RD->hasFlexibleArrayMember() && |
950 | "Cannot expand structure with flexible array." ); |
951 | if (RD->isUnion()) { |
952 | // Unions can be here only in degenerative cases - all the fields are same |
953 | // after flattening. Thus we have to use the "largest" field. |
954 | const FieldDecl *LargestFD = nullptr; |
955 | CharUnits UnionSize = CharUnits::Zero(); |
956 | |
957 | for (const auto *FD : RD->fields()) { |
958 | if (FD->isZeroLengthBitField(Ctx: Context)) |
959 | continue; |
960 | assert(!FD->isBitField() && |
961 | "Cannot expand structure with bit-field members." ); |
962 | CharUnits FieldSize = Context.getTypeSizeInChars(T: FD->getType()); |
963 | if (UnionSize < FieldSize) { |
964 | UnionSize = FieldSize; |
965 | LargestFD = FD; |
966 | } |
967 | } |
968 | if (LargestFD) |
969 | Fields.push_back(Elt: LargestFD); |
970 | } else { |
971 | if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) { |
972 | assert(!CXXRD->isDynamicClass() && |
973 | "cannot expand vtable pointers in dynamic classes" ); |
974 | llvm::append_range(C&: Bases, R: llvm::make_pointer_range(Range: CXXRD->bases())); |
975 | } |
976 | |
977 | for (const auto *FD : RD->fields()) { |
978 | if (FD->isZeroLengthBitField(Ctx: Context)) |
979 | continue; |
980 | assert(!FD->isBitField() && |
981 | "Cannot expand structure with bit-field members." ); |
982 | Fields.push_back(Elt: FD); |
983 | } |
984 | } |
985 | return std::make_unique<RecordExpansion>(args: std::move(Bases), |
986 | args: std::move(Fields)); |
987 | } |
988 | if (const ComplexType *CT = Ty->getAs<ComplexType>()) { |
989 | return std::make_unique<ComplexExpansion>(args: CT->getElementType()); |
990 | } |
991 | return std::make_unique<NoExpansion>(); |
992 | } |
993 | |
994 | static int getExpansionSize(QualType Ty, const ASTContext &Context) { |
995 | auto Exp = getTypeExpansion(Ty, Context); |
996 | if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Val: Exp.get())) { |
997 | return CAExp->NumElts * getExpansionSize(Ty: CAExp->EltTy, Context); |
998 | } |
999 | if (auto RExp = dyn_cast<RecordExpansion>(Val: Exp.get())) { |
1000 | int Res = 0; |
1001 | for (auto BS : RExp->Bases) |
1002 | Res += getExpansionSize(Ty: BS->getType(), Context); |
1003 | for (auto FD : RExp->Fields) |
1004 | Res += getExpansionSize(Ty: FD->getType(), Context); |
1005 | return Res; |
1006 | } |
1007 | if (isa<ComplexExpansion>(Val: Exp.get())) |
1008 | return 2; |
1009 | assert(isa<NoExpansion>(Exp.get())); |
1010 | return 1; |
1011 | } |
1012 | |
1013 | void |
1014 | CodeGenTypes::getExpandedTypes(QualType Ty, |
1015 | SmallVectorImpl<llvm::Type *>::iterator &TI) { |
1016 | auto Exp = getTypeExpansion(Ty, Context); |
1017 | if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Val: Exp.get())) { |
1018 | for (int i = 0, n = CAExp->NumElts; i < n; i++) { |
1019 | getExpandedTypes(Ty: CAExp->EltTy, TI); |
1020 | } |
1021 | } else if (auto RExp = dyn_cast<RecordExpansion>(Val: Exp.get())) { |
1022 | for (auto BS : RExp->Bases) |
1023 | getExpandedTypes(Ty: BS->getType(), TI); |
1024 | for (auto FD : RExp->Fields) |
1025 | getExpandedTypes(Ty: FD->getType(), TI); |
1026 | } else if (auto CExp = dyn_cast<ComplexExpansion>(Val: Exp.get())) { |
1027 | llvm::Type *EltTy = ConvertType(T: CExp->EltTy); |
1028 | *TI++ = EltTy; |
1029 | *TI++ = EltTy; |
1030 | } else { |
1031 | assert(isa<NoExpansion>(Exp.get())); |
1032 | *TI++ = ConvertType(T: Ty); |
1033 | } |
1034 | } |
1035 | |
1036 | static void forConstantArrayExpansion(CodeGenFunction &CGF, |
1037 | ConstantArrayExpansion *CAE, |
1038 | Address BaseAddr, |
1039 | llvm::function_ref<void(Address)> Fn) { |
1040 | for (int i = 0, n = CAE->NumElts; i < n; i++) { |
1041 | Address EltAddr = CGF.Builder.CreateConstGEP2_32(Addr: BaseAddr, Idx0: 0, Idx1: i); |
1042 | Fn(EltAddr); |
1043 | } |
1044 | } |
1045 | |
1046 | void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, |
1047 | llvm::Function::arg_iterator &AI) { |
1048 | assert(LV.isSimple() && |
1049 | "Unexpected non-simple lvalue during struct expansion." ); |
1050 | |
1051 | auto Exp = getTypeExpansion(Ty, Context: getContext()); |
1052 | if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Val: Exp.get())) { |
1053 | forConstantArrayExpansion( |
1054 | CGF&: *this, CAE: CAExp, BaseAddr: LV.getAddress(), Fn: [&](Address EltAddr) { |
1055 | LValue LV = MakeAddrLValue(Addr: EltAddr, T: CAExp->EltTy); |
1056 | ExpandTypeFromArgs(Ty: CAExp->EltTy, LV, AI); |
1057 | }); |
1058 | } else if (auto RExp = dyn_cast<RecordExpansion>(Val: Exp.get())) { |
1059 | Address This = LV.getAddress(); |
1060 | for (const CXXBaseSpecifier *BS : RExp->Bases) { |
1061 | // Perform a single step derived-to-base conversion. |
1062 | Address Base = |
1063 | GetAddressOfBaseClass(Value: This, Derived: Ty->getAsCXXRecordDecl(), PathBegin: &BS, PathEnd: &BS + 1, |
1064 | /*NullCheckValue=*/false, Loc: SourceLocation()); |
1065 | LValue SubLV = MakeAddrLValue(Addr: Base, T: BS->getType()); |
1066 | |
1067 | // Recurse onto bases. |
1068 | ExpandTypeFromArgs(Ty: BS->getType(), LV: SubLV, AI); |
1069 | } |
1070 | for (auto FD : RExp->Fields) { |
1071 | // FIXME: What are the right qualifiers here? |
1072 | LValue SubLV = EmitLValueForFieldInitialization(Base: LV, Field: FD); |
1073 | ExpandTypeFromArgs(Ty: FD->getType(), LV: SubLV, AI); |
1074 | } |
1075 | } else if (isa<ComplexExpansion>(Val: Exp.get())) { |
1076 | auto realValue = &*AI++; |
1077 | auto imagValue = &*AI++; |
1078 | EmitStoreOfComplex(V: ComplexPairTy(realValue, imagValue), dest: LV, /*init*/ isInit: true); |
1079 | } else { |
1080 | // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a |
1081 | // primitive store. |
1082 | assert(isa<NoExpansion>(Exp.get())); |
1083 | llvm::Value *Arg = &*AI++; |
1084 | if (LV.isBitField()) { |
1085 | EmitStoreThroughLValue(Src: RValue::get(V: Arg), Dst: LV); |
1086 | } else { |
1087 | // TODO: currently there are some places are inconsistent in what LLVM |
1088 | // pointer type they use (see D118744). Once clang uses opaque pointers |
1089 | // all LLVM pointer types will be the same and we can remove this check. |
1090 | if (Arg->getType()->isPointerTy()) { |
1091 | Address Addr = LV.getAddress(); |
1092 | Arg = Builder.CreateBitCast(V: Arg, DestTy: Addr.getElementType()); |
1093 | } |
1094 | EmitStoreOfScalar(value: Arg, lvalue: LV); |
1095 | } |
1096 | } |
1097 | } |
1098 | |
1099 | void CodeGenFunction::ExpandTypeToArgs( |
1100 | QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy, |
1101 | SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { |
1102 | auto Exp = getTypeExpansion(Ty, Context: getContext()); |
1103 | if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Val: Exp.get())) { |
1104 | Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress() |
1105 | : Arg.getKnownRValue().getAggregateAddress(); |
1106 | forConstantArrayExpansion( |
1107 | CGF&: *this, CAE: CAExp, BaseAddr: Addr, Fn: [&](Address EltAddr) { |
1108 | CallArg EltArg = CallArg( |
1109 | convertTempToRValue(addr: EltAddr, type: CAExp->EltTy, Loc: SourceLocation()), |
1110 | CAExp->EltTy); |
1111 | ExpandTypeToArgs(Ty: CAExp->EltTy, Arg: EltArg, IRFuncTy, IRCallArgs, |
1112 | IRCallArgPos); |
1113 | }); |
1114 | } else if (auto RExp = dyn_cast<RecordExpansion>(Val: Exp.get())) { |
1115 | Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress() |
1116 | : Arg.getKnownRValue().getAggregateAddress(); |
1117 | for (const CXXBaseSpecifier *BS : RExp->Bases) { |
1118 | // Perform a single step derived-to-base conversion. |
1119 | Address Base = |
1120 | GetAddressOfBaseClass(Value: This, Derived: Ty->getAsCXXRecordDecl(), PathBegin: &BS, PathEnd: &BS + 1, |
1121 | /*NullCheckValue=*/false, Loc: SourceLocation()); |
1122 | CallArg BaseArg = CallArg(RValue::getAggregate(addr: Base), BS->getType()); |
1123 | |
1124 | // Recurse onto bases. |
1125 | ExpandTypeToArgs(Ty: BS->getType(), Arg: BaseArg, IRFuncTy, IRCallArgs, |
1126 | IRCallArgPos); |
1127 | } |
1128 | |
1129 | LValue LV = MakeAddrLValue(Addr: This, T: Ty); |
1130 | for (auto FD : RExp->Fields) { |
1131 | CallArg FldArg = |
1132 | CallArg(EmitRValueForField(LV, FD, Loc: SourceLocation()), FD->getType()); |
1133 | ExpandTypeToArgs(Ty: FD->getType(), Arg: FldArg, IRFuncTy, IRCallArgs, |
1134 | IRCallArgPos); |
1135 | } |
1136 | } else if (isa<ComplexExpansion>(Val: Exp.get())) { |
1137 | ComplexPairTy CV = Arg.getKnownRValue().getComplexVal(); |
1138 | IRCallArgs[IRCallArgPos++] = CV.first; |
1139 | IRCallArgs[IRCallArgPos++] = CV.second; |
1140 | } else { |
1141 | assert(isa<NoExpansion>(Exp.get())); |
1142 | auto RV = Arg.getKnownRValue(); |
1143 | assert(RV.isScalar() && |
1144 | "Unexpected non-scalar rvalue during struct expansion." ); |
1145 | |
1146 | // Insert a bitcast as needed. |
1147 | llvm::Value *V = RV.getScalarVal(); |
1148 | if (IRCallArgPos < IRFuncTy->getNumParams() && |
1149 | V->getType() != IRFuncTy->getParamType(i: IRCallArgPos)) |
1150 | V = Builder.CreateBitCast(V, DestTy: IRFuncTy->getParamType(i: IRCallArgPos)); |
1151 | |
1152 | IRCallArgs[IRCallArgPos++] = V; |
1153 | } |
1154 | } |
1155 | |
1156 | /// Create a temporary allocation for the purposes of coercion. |
1157 | static RawAddress CreateTempAllocaForCoercion(CodeGenFunction &CGF, |
1158 | llvm::Type *Ty, |
1159 | CharUnits MinAlign, |
1160 | const Twine &Name = "tmp" ) { |
1161 | // Don't use an alignment that's worse than what LLVM would prefer. |
1162 | auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(Ty); |
1163 | CharUnits Align = std::max(a: MinAlign, b: CharUnits::fromQuantity(Quantity: PrefAlign)); |
1164 | |
1165 | return CGF.CreateTempAlloca(Ty, align: Align, Name: Name + ".coerce" ); |
1166 | } |
1167 | |
1168 | /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are |
1169 | /// accessing some number of bytes out of it, try to gep into the struct to get |
1170 | /// at its inner goodness. Dive as deep as possible without entering an element |
1171 | /// with an in-memory size smaller than DstSize. |
1172 | static Address |
1173 | EnterStructPointerForCoercedAccess(Address SrcPtr, |
1174 | llvm::StructType *SrcSTy, |
1175 | uint64_t DstSize, CodeGenFunction &CGF) { |
1176 | // We can't dive into a zero-element struct. |
1177 | if (SrcSTy->getNumElements() == 0) return SrcPtr; |
1178 | |
1179 | llvm::Type *FirstElt = SrcSTy->getElementType(N: 0); |
1180 | |
1181 | // If the first elt is at least as large as what we're looking for, or if the |
1182 | // first element is the same size as the whole struct, we can enter it. The |
1183 | // comparison must be made on the store size and not the alloca size. Using |
1184 | // the alloca size may overstate the size of the load. |
1185 | uint64_t FirstEltSize = |
1186 | CGF.CGM.getDataLayout().getTypeStoreSize(Ty: FirstElt); |
1187 | if (FirstEltSize < DstSize && |
1188 | FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(Ty: SrcSTy)) |
1189 | return SrcPtr; |
1190 | |
1191 | // GEP into the first element. |
1192 | SrcPtr = CGF.Builder.CreateStructGEP(Addr: SrcPtr, Index: 0, Name: "coerce.dive" ); |
1193 | |
1194 | // If the first element is a struct, recurse. |
1195 | llvm::Type *SrcTy = SrcPtr.getElementType(); |
1196 | if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(Val: SrcTy)) |
1197 | return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); |
1198 | |
1199 | return SrcPtr; |
1200 | } |
1201 | |
1202 | /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both |
1203 | /// are either integers or pointers. This does a truncation of the value if it |
1204 | /// is too large or a zero extension if it is too small. |
1205 | /// |
1206 | /// This behaves as if the value were coerced through memory, so on big-endian |
1207 | /// targets the high bits are preserved in a truncation, while little-endian |
1208 | /// targets preserve the low bits. |
1209 | static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, |
1210 | llvm::Type *Ty, |
1211 | CodeGenFunction &CGF) { |
1212 | if (Val->getType() == Ty) |
1213 | return Val; |
1214 | |
1215 | if (isa<llvm::PointerType>(Val: Val->getType())) { |
1216 | // If this is Pointer->Pointer avoid conversion to and from int. |
1217 | if (isa<llvm::PointerType>(Val: Ty)) |
1218 | return CGF.Builder.CreateBitCast(V: Val, DestTy: Ty, Name: "coerce.val" ); |
1219 | |
1220 | // Convert the pointer to an integer so we can play with its width. |
1221 | Val = CGF.Builder.CreatePtrToInt(V: Val, DestTy: CGF.IntPtrTy, Name: "coerce.val.pi" ); |
1222 | } |
1223 | |
1224 | llvm::Type *DestIntTy = Ty; |
1225 | if (isa<llvm::PointerType>(Val: DestIntTy)) |
1226 | DestIntTy = CGF.IntPtrTy; |
1227 | |
1228 | if (Val->getType() != DestIntTy) { |
1229 | const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); |
1230 | if (DL.isBigEndian()) { |
1231 | // Preserve the high bits on big-endian targets. |
1232 | // That is what memory coercion does. |
1233 | uint64_t SrcSize = DL.getTypeSizeInBits(Ty: Val->getType()); |
1234 | uint64_t DstSize = DL.getTypeSizeInBits(Ty: DestIntTy); |
1235 | |
1236 | if (SrcSize > DstSize) { |
1237 | Val = CGF.Builder.CreateLShr(LHS: Val, RHS: SrcSize - DstSize, Name: "coerce.highbits" ); |
1238 | Val = CGF.Builder.CreateTrunc(V: Val, DestTy: DestIntTy, Name: "coerce.val.ii" ); |
1239 | } else { |
1240 | Val = CGF.Builder.CreateZExt(V: Val, DestTy: DestIntTy, Name: "coerce.val.ii" ); |
1241 | Val = CGF.Builder.CreateShl(LHS: Val, RHS: DstSize - SrcSize, Name: "coerce.highbits" ); |
1242 | } |
1243 | } else { |
1244 | // Little-endian targets preserve the low bits. No shifts required. |
1245 | Val = CGF.Builder.CreateIntCast(V: Val, DestTy: DestIntTy, isSigned: false, Name: "coerce.val.ii" ); |
1246 | } |
1247 | } |
1248 | |
1249 | if (isa<llvm::PointerType>(Val: Ty)) |
1250 | Val = CGF.Builder.CreateIntToPtr(V: Val, DestTy: Ty, Name: "coerce.val.ip" ); |
1251 | return Val; |
1252 | } |
1253 | |
1254 | |
1255 | |
1256 | /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as |
1257 | /// a pointer to an object of type \arg Ty, known to be aligned to |
1258 | /// \arg SrcAlign bytes. |
1259 | /// |
1260 | /// This safely handles the case when the src type is smaller than the |
1261 | /// destination type; in this situation the values of bits which not |
1262 | /// present in the src are undefined. |
1263 | static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, |
1264 | CodeGenFunction &CGF) { |
1265 | llvm::Type *SrcTy = Src.getElementType(); |
1266 | |
1267 | // If SrcTy and Ty are the same, just do a load. |
1268 | if (SrcTy == Ty) |
1269 | return CGF.Builder.CreateLoad(Addr: Src); |
1270 | |
1271 | llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); |
1272 | |
1273 | if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(Val: SrcTy)) { |
1274 | Src = EnterStructPointerForCoercedAccess(SrcPtr: Src, SrcSTy, |
1275 | DstSize: DstSize.getFixedValue(), CGF); |
1276 | SrcTy = Src.getElementType(); |
1277 | } |
1278 | |
1279 | llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty: SrcTy); |
1280 | |
1281 | // If the source and destination are integer or pointer types, just do an |
1282 | // extension or truncation to the desired type. |
1283 | if ((isa<llvm::IntegerType>(Val: Ty) || isa<llvm::PointerType>(Val: Ty)) && |
1284 | (isa<llvm::IntegerType>(Val: SrcTy) || isa<llvm::PointerType>(Val: SrcTy))) { |
1285 | llvm::Value *Load = CGF.Builder.CreateLoad(Addr: Src); |
1286 | return CoerceIntOrPtrToIntOrPtr(Val: Load, Ty, CGF); |
1287 | } |
1288 | |
1289 | // If load is legal, just bitcast the src pointer. |
1290 | if (!SrcSize.isScalable() && !DstSize.isScalable() && |
1291 | SrcSize.getFixedValue() >= DstSize.getFixedValue()) { |
1292 | // Generally SrcSize is never greater than DstSize, since this means we are |
1293 | // losing bits. However, this can happen in cases where the structure has |
1294 | // additional padding, for example due to a user specified alignment. |
1295 | // |
1296 | // FIXME: Assert that we aren't truncating non-padding bits when have access |
1297 | // to that information. |
1298 | Src = Src.withElementType(ElemTy: Ty); |
1299 | return CGF.Builder.CreateLoad(Addr: Src); |
1300 | } |
1301 | |
1302 | // If coercing a fixed vector to a scalable vector for ABI compatibility, and |
1303 | // the types match, use the llvm.vector.insert intrinsic to perform the |
1304 | // conversion. |
1305 | if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(Val: Ty)) { |
1306 | if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(Val: SrcTy)) { |
1307 | // If we are casting a fixed i8 vector to a scalable i1 predicate |
1308 | // vector, use a vector insert and bitcast the result. |
1309 | if (ScalableDstTy->getElementType()->isIntegerTy(Bitwidth: 1) && |
1310 | ScalableDstTy->getElementCount().isKnownMultipleOf(RHS: 8) && |
1311 | FixedSrcTy->getElementType()->isIntegerTy(Bitwidth: 8)) { |
1312 | ScalableDstTy = llvm::ScalableVectorType::get( |
1313 | ElementType: FixedSrcTy->getElementType(), |
1314 | MinNumElts: ScalableDstTy->getElementCount().getKnownMinValue() / 8); |
1315 | } |
1316 | if (ScalableDstTy->getElementType() == FixedSrcTy->getElementType()) { |
1317 | auto *Load = CGF.Builder.CreateLoad(Addr: Src); |
1318 | auto *UndefVec = llvm::UndefValue::get(T: ScalableDstTy); |
1319 | auto *Zero = llvm::Constant::getNullValue(Ty: CGF.CGM.Int64Ty); |
1320 | llvm::Value *Result = CGF.Builder.CreateInsertVector( |
1321 | DstType: ScalableDstTy, SrcVec: UndefVec, SubVec: Load, Idx: Zero, Name: "cast.scalable" ); |
1322 | if (ScalableDstTy != Ty) |
1323 | Result = CGF.Builder.CreateBitCast(V: Result, DestTy: Ty); |
1324 | return Result; |
1325 | } |
1326 | } |
1327 | } |
1328 | |
1329 | // Otherwise do coercion through memory. This is stupid, but simple. |
1330 | RawAddress Tmp = |
1331 | CreateTempAllocaForCoercion(CGF, Ty, MinAlign: Src.getAlignment(), Name: Src.getName()); |
1332 | CGF.Builder.CreateMemCpy( |
1333 | Dst: Tmp.getPointer(), DstAlign: Tmp.getAlignment().getAsAlign(), |
1334 | Src: Src.emitRawPointer(CGF), SrcAlign: Src.getAlignment().getAsAlign(), |
1335 | Size: llvm::ConstantInt::get(Ty: CGF.IntPtrTy, V: SrcSize.getKnownMinValue())); |
1336 | return CGF.Builder.CreateLoad(Addr: Tmp); |
1337 | } |
1338 | |
1339 | void CodeGenFunction::CreateCoercedStore(llvm::Value *Src, Address Dst, |
1340 | llvm::TypeSize DstSize, |
1341 | bool DstIsVolatile) { |
1342 | if (!DstSize) |
1343 | return; |
1344 | |
1345 | llvm::Type *SrcTy = Src->getType(); |
1346 | llvm::TypeSize SrcSize = CGM.getDataLayout().getTypeAllocSize(Ty: SrcTy); |
1347 | |
1348 | // GEP into structs to try to make types match. |
1349 | // FIXME: This isn't really that useful with opaque types, but it impacts a |
1350 | // lot of regression tests. |
1351 | if (SrcTy != Dst.getElementType()) { |
1352 | if (llvm::StructType *DstSTy = |
1353 | dyn_cast<llvm::StructType>(Val: Dst.getElementType())) { |
1354 | assert(!SrcSize.isScalable()); |
1355 | Dst = EnterStructPointerForCoercedAccess(SrcPtr: Dst, SrcSTy: DstSTy, |
1356 | DstSize: SrcSize.getFixedValue(), CGF&: *this); |
1357 | } |
1358 | } |
1359 | |
1360 | if (SrcSize.isScalable() || SrcSize <= DstSize) { |
1361 | if (SrcTy->isIntegerTy() && Dst.getElementType()->isPointerTy() && |
1362 | SrcSize == CGM.getDataLayout().getTypeAllocSize(Ty: Dst.getElementType())) { |
1363 | // If the value is supposed to be a pointer, convert it before storing it. |
1364 | Src = CoerceIntOrPtrToIntOrPtr(Val: Src, Ty: Dst.getElementType(), CGF&: *this); |
1365 | Builder.CreateStore(Val: Src, Addr: Dst, IsVolatile: DstIsVolatile); |
1366 | } else if (llvm::StructType *STy = |
1367 | dyn_cast<llvm::StructType>(Val: Src->getType())) { |
1368 | // Prefer scalar stores to first-class aggregate stores. |
1369 | Dst = Dst.withElementType(ElemTy: SrcTy); |
1370 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { |
1371 | Address EltPtr = Builder.CreateStructGEP(Addr: Dst, Index: i); |
1372 | llvm::Value *Elt = Builder.CreateExtractValue(Agg: Src, Idxs: i); |
1373 | Builder.CreateStore(Val: Elt, Addr: EltPtr, IsVolatile: DstIsVolatile); |
1374 | } |
1375 | } else { |
1376 | Builder.CreateStore(Val: Src, Addr: Dst.withElementType(ElemTy: SrcTy), IsVolatile: DstIsVolatile); |
1377 | } |
1378 | } else if (SrcTy->isIntegerTy()) { |
1379 | // If the source is a simple integer, coerce it directly. |
1380 | llvm::Type *DstIntTy = Builder.getIntNTy(N: DstSize.getFixedValue() * 8); |
1381 | Src = CoerceIntOrPtrToIntOrPtr(Val: Src, Ty: DstIntTy, CGF&: *this); |
1382 | Builder.CreateStore(Val: Src, Addr: Dst.withElementType(ElemTy: DstIntTy), IsVolatile: DstIsVolatile); |
1383 | } else { |
1384 | // Otherwise do coercion through memory. This is stupid, but |
1385 | // simple. |
1386 | |
1387 | // Generally SrcSize is never greater than DstSize, since this means we are |
1388 | // losing bits. However, this can happen in cases where the structure has |
1389 | // additional padding, for example due to a user specified alignment. |
1390 | // |
1391 | // FIXME: Assert that we aren't truncating non-padding bits when have access |
1392 | // to that information. |
1393 | RawAddress Tmp = |
1394 | CreateTempAllocaForCoercion(CGF&: *this, Ty: SrcTy, MinAlign: Dst.getAlignment()); |
1395 | Builder.CreateStore(Val: Src, Addr: Tmp); |
1396 | Builder.CreateMemCpy(Dst: Dst.emitRawPointer(CGF&: *this), |
1397 | DstAlign: Dst.getAlignment().getAsAlign(), Src: Tmp.getPointer(), |
1398 | SrcAlign: Tmp.getAlignment().getAsAlign(), |
1399 | Size: Builder.CreateTypeSize(DstType: IntPtrTy, Size: DstSize)); |
1400 | } |
1401 | } |
1402 | |
1403 | static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, |
1404 | const ABIArgInfo &info) { |
1405 | if (unsigned offset = info.getDirectOffset()) { |
1406 | addr = addr.withElementType(ElemTy: CGF.Int8Ty); |
1407 | addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr: addr, |
1408 | Offset: CharUnits::fromQuantity(Quantity: offset)); |
1409 | addr = addr.withElementType(ElemTy: info.getCoerceToType()); |
1410 | } |
1411 | return addr; |
1412 | } |
1413 | |
1414 | namespace { |
1415 | |
1416 | /// Encapsulates information about the way function arguments from |
1417 | /// CGFunctionInfo should be passed to actual LLVM IR function. |
1418 | class ClangToLLVMArgMapping { |
1419 | static const unsigned InvalidIndex = ~0U; |
1420 | unsigned InallocaArgNo; |
1421 | unsigned SRetArgNo; |
1422 | unsigned TotalIRArgs; |
1423 | |
1424 | /// Arguments of LLVM IR function corresponding to single Clang argument. |
1425 | struct IRArgs { |
1426 | unsigned PaddingArgIndex; |
1427 | // Argument is expanded to IR arguments at positions |
1428 | // [FirstArgIndex, FirstArgIndex + NumberOfArgs). |
1429 | unsigned FirstArgIndex; |
1430 | unsigned NumberOfArgs; |
1431 | |
1432 | IRArgs() |
1433 | : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), |
1434 | NumberOfArgs(0) {} |
1435 | }; |
1436 | |
1437 | SmallVector<IRArgs, 8> ArgInfo; |
1438 | |
1439 | public: |
1440 | ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, |
1441 | bool OnlyRequiredArgs = false) |
1442 | : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), |
1443 | ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { |
1444 | construct(Context, FI, OnlyRequiredArgs); |
1445 | } |
1446 | |
1447 | bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } |
1448 | unsigned getInallocaArgNo() const { |
1449 | assert(hasInallocaArg()); |
1450 | return InallocaArgNo; |
1451 | } |
1452 | |
1453 | bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } |
1454 | unsigned getSRetArgNo() const { |
1455 | assert(hasSRetArg()); |
1456 | return SRetArgNo; |
1457 | } |
1458 | |
1459 | unsigned totalIRArgs() const { return TotalIRArgs; } |
1460 | |
1461 | bool hasPaddingArg(unsigned ArgNo) const { |
1462 | assert(ArgNo < ArgInfo.size()); |
1463 | return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; |
1464 | } |
1465 | unsigned getPaddingArgNo(unsigned ArgNo) const { |
1466 | assert(hasPaddingArg(ArgNo)); |
1467 | return ArgInfo[ArgNo].PaddingArgIndex; |
1468 | } |
1469 | |
1470 | /// Returns index of first IR argument corresponding to ArgNo, and their |
1471 | /// quantity. |
1472 | std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { |
1473 | assert(ArgNo < ArgInfo.size()); |
1474 | return std::make_pair(x: ArgInfo[ArgNo].FirstArgIndex, |
1475 | y: ArgInfo[ArgNo].NumberOfArgs); |
1476 | } |
1477 | |
1478 | private: |
1479 | void construct(const ASTContext &Context, const CGFunctionInfo &FI, |
1480 | bool OnlyRequiredArgs); |
1481 | }; |
1482 | |
1483 | void ClangToLLVMArgMapping::construct(const ASTContext &Context, |
1484 | const CGFunctionInfo &FI, |
1485 | bool OnlyRequiredArgs) { |
1486 | unsigned IRArgNo = 0; |
1487 | bool SwapThisWithSRet = false; |
1488 | const ABIArgInfo &RetAI = FI.getReturnInfo(); |
1489 | |
1490 | if (RetAI.getKind() == ABIArgInfo::Indirect) { |
1491 | SwapThisWithSRet = RetAI.isSRetAfterThis(); |
1492 | SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; |
1493 | } |
1494 | |
1495 | unsigned ArgNo = 0; |
1496 | unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); |
1497 | for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; |
1498 | ++I, ++ArgNo) { |
1499 | assert(I != FI.arg_end()); |
1500 | QualType ArgType = I->type; |
1501 | const ABIArgInfo &AI = I->info; |
1502 | // Collect data about IR arguments corresponding to Clang argument ArgNo. |
1503 | auto &IRArgs = ArgInfo[ArgNo]; |
1504 | |
1505 | if (AI.getPaddingType()) |
1506 | IRArgs.PaddingArgIndex = IRArgNo++; |
1507 | |
1508 | switch (AI.getKind()) { |
1509 | case ABIArgInfo::Extend: |
1510 | case ABIArgInfo::Direct: { |
1511 | // FIXME: handle sseregparm someday... |
1512 | llvm::StructType *STy = dyn_cast<llvm::StructType>(Val: AI.getCoerceToType()); |
1513 | if (AI.isDirect() && AI.getCanBeFlattened() && STy) { |
1514 | IRArgs.NumberOfArgs = STy->getNumElements(); |
1515 | } else { |
1516 | IRArgs.NumberOfArgs = 1; |
1517 | } |
1518 | break; |
1519 | } |
1520 | case ABIArgInfo::Indirect: |
1521 | case ABIArgInfo::IndirectAliased: |
1522 | IRArgs.NumberOfArgs = 1; |
1523 | break; |
1524 | case ABIArgInfo::Ignore: |
1525 | case ABIArgInfo::InAlloca: |
1526 | // ignore and inalloca doesn't have matching LLVM parameters. |
1527 | IRArgs.NumberOfArgs = 0; |
1528 | break; |
1529 | case ABIArgInfo::CoerceAndExpand: |
1530 | IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size(); |
1531 | break; |
1532 | case ABIArgInfo::Expand: |
1533 | IRArgs.NumberOfArgs = getExpansionSize(Ty: ArgType, Context); |
1534 | break; |
1535 | } |
1536 | |
1537 | if (IRArgs.NumberOfArgs > 0) { |
1538 | IRArgs.FirstArgIndex = IRArgNo; |
1539 | IRArgNo += IRArgs.NumberOfArgs; |
1540 | } |
1541 | |
1542 | // Skip over the sret parameter when it comes second. We already handled it |
1543 | // above. |
1544 | if (IRArgNo == 1 && SwapThisWithSRet) |
1545 | IRArgNo++; |
1546 | } |
1547 | assert(ArgNo == ArgInfo.size()); |
1548 | |
1549 | if (FI.usesInAlloca()) |
1550 | InallocaArgNo = IRArgNo++; |
1551 | |
1552 | TotalIRArgs = IRArgNo; |
1553 | } |
1554 | } // namespace |
1555 | |
1556 | /***/ |
1557 | |
1558 | bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { |
1559 | const auto &RI = FI.getReturnInfo(); |
1560 | return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet()); |
1561 | } |
1562 | |
1563 | bool CodeGenModule::ReturnTypeHasInReg(const CGFunctionInfo &FI) { |
1564 | const auto &RI = FI.getReturnInfo(); |
1565 | return RI.getInReg(); |
1566 | } |
1567 | |
1568 | bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { |
1569 | return ReturnTypeUsesSRet(FI) && |
1570 | getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); |
1571 | } |
1572 | |
1573 | bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { |
1574 | if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { |
1575 | switch (BT->getKind()) { |
1576 | default: |
1577 | return false; |
1578 | case BuiltinType::Float: |
1579 | return getTarget().useObjCFPRetForRealType(T: FloatModeKind::Float); |
1580 | case BuiltinType::Double: |
1581 | return getTarget().useObjCFPRetForRealType(T: FloatModeKind::Double); |
1582 | case BuiltinType::LongDouble: |
1583 | return getTarget().useObjCFPRetForRealType(T: FloatModeKind::LongDouble); |
1584 | } |
1585 | } |
1586 | |
1587 | return false; |
1588 | } |
1589 | |
1590 | bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { |
1591 | if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { |
1592 | if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { |
1593 | if (BT->getKind() == BuiltinType::LongDouble) |
1594 | return getTarget().useObjCFP2RetForComplexLongDouble(); |
1595 | } |
1596 | } |
1597 | |
1598 | return false; |
1599 | } |
1600 | |
1601 | llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { |
1602 | const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); |
1603 | return GetFunctionType(Info: FI); |
1604 | } |
1605 | |
1606 | llvm::FunctionType * |
1607 | CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { |
1608 | |
1609 | bool Inserted = FunctionsBeingProcessed.insert(Ptr: &FI).second; |
1610 | (void)Inserted; |
1611 | assert(Inserted && "Recursively being processed?" ); |
1612 | |
1613 | llvm::Type *resultType = nullptr; |
1614 | const ABIArgInfo &retAI = FI.getReturnInfo(); |
1615 | switch (retAI.getKind()) { |
1616 | case ABIArgInfo::Expand: |
1617 | case ABIArgInfo::IndirectAliased: |
1618 | llvm_unreachable("Invalid ABI kind for return argument" ); |
1619 | |
1620 | case ABIArgInfo::Extend: |
1621 | case ABIArgInfo::Direct: |
1622 | resultType = retAI.getCoerceToType(); |
1623 | break; |
1624 | |
1625 | case ABIArgInfo::InAlloca: |
1626 | if (retAI.getInAllocaSRet()) { |
1627 | // sret things on win32 aren't void, they return the sret pointer. |
1628 | QualType ret = FI.getReturnType(); |
1629 | unsigned addressSpace = CGM.getTypes().getTargetAddressSpace(T: ret); |
1630 | resultType = llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: addressSpace); |
1631 | } else { |
1632 | resultType = llvm::Type::getVoidTy(C&: getLLVMContext()); |
1633 | } |
1634 | break; |
1635 | |
1636 | case ABIArgInfo::Indirect: |
1637 | case ABIArgInfo::Ignore: |
1638 | resultType = llvm::Type::getVoidTy(C&: getLLVMContext()); |
1639 | break; |
1640 | |
1641 | case ABIArgInfo::CoerceAndExpand: |
1642 | resultType = retAI.getUnpaddedCoerceAndExpandType(); |
1643 | break; |
1644 | } |
1645 | |
1646 | ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); |
1647 | SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); |
1648 | |
1649 | // Add type for sret argument. |
1650 | if (IRFunctionArgs.hasSRetArg()) { |
1651 | QualType Ret = FI.getReturnType(); |
1652 | unsigned AddressSpace = CGM.getTypes().getTargetAddressSpace(T: Ret); |
1653 | ArgTypes[IRFunctionArgs.getSRetArgNo()] = |
1654 | llvm::PointerType::get(C&: getLLVMContext(), AddressSpace); |
1655 | } |
1656 | |
1657 | // Add type for inalloca argument. |
1658 | if (IRFunctionArgs.hasInallocaArg()) |
1659 | ArgTypes[IRFunctionArgs.getInallocaArgNo()] = |
1660 | llvm::PointerType::getUnqual(C&: getLLVMContext()); |
1661 | |
1662 | // Add in all of the required arguments. |
1663 | unsigned ArgNo = 0; |
1664 | CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), |
1665 | ie = it + FI.getNumRequiredArgs(); |
1666 | for (; it != ie; ++it, ++ArgNo) { |
1667 | const ABIArgInfo &ArgInfo = it->info; |
1668 | |
1669 | // Insert a padding type to ensure proper alignment. |
1670 | if (IRFunctionArgs.hasPaddingArg(ArgNo)) |
1671 | ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = |
1672 | ArgInfo.getPaddingType(); |
1673 | |
1674 | unsigned FirstIRArg, NumIRArgs; |
1675 | std::tie(args&: FirstIRArg, args&: NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); |
1676 | |
1677 | switch (ArgInfo.getKind()) { |
1678 | case ABIArgInfo::Ignore: |
1679 | case ABIArgInfo::InAlloca: |
1680 | assert(NumIRArgs == 0); |
1681 | break; |
1682 | |
1683 | case ABIArgInfo::Indirect: |
1684 | assert(NumIRArgs == 1); |
1685 | // indirect arguments are always on the stack, which is alloca addr space. |
1686 | ArgTypes[FirstIRArg] = llvm::PointerType::get( |
1687 | C&: getLLVMContext(), AddressSpace: CGM.getDataLayout().getAllocaAddrSpace()); |
1688 | break; |
1689 | case ABIArgInfo::IndirectAliased: |
1690 | assert(NumIRArgs == 1); |
1691 | ArgTypes[FirstIRArg] = llvm::PointerType::get( |
1692 | C&: getLLVMContext(), AddressSpace: ArgInfo.getIndirectAddrSpace()); |
1693 | break; |
1694 | case ABIArgInfo::Extend: |
1695 | case ABIArgInfo::Direct: { |
1696 | // Fast-isel and the optimizer generally like scalar values better than |
1697 | // FCAs, so we flatten them if this is safe to do for this argument. |
1698 | llvm::Type *argType = ArgInfo.getCoerceToType(); |
1699 | llvm::StructType *st = dyn_cast<llvm::StructType>(Val: argType); |
1700 | if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { |
1701 | assert(NumIRArgs == st->getNumElements()); |
1702 | for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) |
1703 | ArgTypes[FirstIRArg + i] = st->getElementType(N: i); |
1704 | } else { |
1705 | assert(NumIRArgs == 1); |
1706 | ArgTypes[FirstIRArg] = argType; |
1707 | } |
1708 | break; |
1709 | } |
1710 | |
1711 | case ABIArgInfo::CoerceAndExpand: { |
1712 | auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; |
1713 | for (auto *EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) { |
1714 | *ArgTypesIter++ = EltTy; |
1715 | } |
1716 | assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); |
1717 | break; |
1718 | } |
1719 | |
1720 | case ABIArgInfo::Expand: |
1721 | auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; |
1722 | getExpandedTypes(Ty: it->type, TI&: ArgTypesIter); |
1723 | assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); |
1724 | break; |
1725 | } |
1726 | } |
1727 | |
1728 | bool Erased = FunctionsBeingProcessed.erase(Ptr: &FI); (void)Erased; |
1729 | assert(Erased && "Not in set?" ); |
1730 | |
1731 | return llvm::FunctionType::get(Result: resultType, Params: ArgTypes, isVarArg: FI.isVariadic()); |
1732 | } |
1733 | |
1734 | llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { |
1735 | const CXXMethodDecl *MD = cast<CXXMethodDecl>(Val: GD.getDecl()); |
1736 | const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); |
1737 | |
1738 | if (!isFuncTypeConvertible(FT: FPT)) |
1739 | return llvm::StructType::get(Context&: getLLVMContext()); |
1740 | |
1741 | return GetFunctionType(GD); |
1742 | } |
1743 | |
1744 | static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, |
1745 | llvm::AttrBuilder &FuncAttrs, |
1746 | const FunctionProtoType *FPT) { |
1747 | if (!FPT) |
1748 | return; |
1749 | |
1750 | if (!isUnresolvedExceptionSpec(ESpecType: FPT->getExceptionSpecType()) && |
1751 | FPT->isNothrow()) |
1752 | FuncAttrs.addAttribute(Val: llvm::Attribute::NoUnwind); |
1753 | |
1754 | unsigned SMEBits = FPT->getAArch64SMEAttributes(); |
1755 | if (SMEBits & FunctionType::SME_PStateSMEnabledMask) |
1756 | FuncAttrs.addAttribute(A: "aarch64_pstate_sm_enabled" ); |
1757 | if (SMEBits & FunctionType::SME_PStateSMCompatibleMask) |
1758 | FuncAttrs.addAttribute(A: "aarch64_pstate_sm_compatible" ); |
1759 | |
1760 | // ZA |
1761 | if (FunctionType::getArmZAState(AttrBits: SMEBits) == FunctionType::ARM_Preserves) |
1762 | FuncAttrs.addAttribute(A: "aarch64_preserves_za" ); |
1763 | if (FunctionType::getArmZAState(AttrBits: SMEBits) == FunctionType::ARM_In) |
1764 | FuncAttrs.addAttribute(A: "aarch64_in_za" ); |
1765 | if (FunctionType::getArmZAState(AttrBits: SMEBits) == FunctionType::ARM_Out) |
1766 | FuncAttrs.addAttribute(A: "aarch64_out_za" ); |
1767 | if (FunctionType::getArmZAState(AttrBits: SMEBits) == FunctionType::ARM_InOut) |
1768 | FuncAttrs.addAttribute(A: "aarch64_inout_za" ); |
1769 | |
1770 | // ZT0 |
1771 | if (FunctionType::getArmZT0State(AttrBits: SMEBits) == FunctionType::ARM_Preserves) |
1772 | FuncAttrs.addAttribute(A: "aarch64_preserves_zt0" ); |
1773 | if (FunctionType::getArmZT0State(AttrBits: SMEBits) == FunctionType::ARM_In) |
1774 | FuncAttrs.addAttribute(A: "aarch64_in_zt0" ); |
1775 | if (FunctionType::getArmZT0State(AttrBits: SMEBits) == FunctionType::ARM_Out) |
1776 | FuncAttrs.addAttribute(A: "aarch64_out_zt0" ); |
1777 | if (FunctionType::getArmZT0State(AttrBits: SMEBits) == FunctionType::ARM_InOut) |
1778 | FuncAttrs.addAttribute(A: "aarch64_inout_zt0" ); |
1779 | } |
1780 | |
1781 | static void AddAttributesFromOMPAssumes(llvm::AttrBuilder &FuncAttrs, |
1782 | const Decl *Callee) { |
1783 | if (!Callee) |
1784 | return; |
1785 | |
1786 | SmallVector<StringRef, 4> Attrs; |
1787 | |
1788 | for (const OMPAssumeAttr *AA : Callee->specific_attrs<OMPAssumeAttr>()) |
1789 | AA->getAssumption().split(A&: Attrs, Separator: "," ); |
1790 | |
1791 | if (!Attrs.empty()) |
1792 | FuncAttrs.addAttribute(A: llvm::AssumptionAttrKey, |
1793 | V: llvm::join(Begin: Attrs.begin(), End: Attrs.end(), Separator: "," )); |
1794 | } |
1795 | |
1796 | bool CodeGenModule::MayDropFunctionReturn(const ASTContext &Context, |
1797 | QualType ReturnType) const { |
1798 | // We can't just discard the return value for a record type with a |
1799 | // complex destructor or a non-trivially copyable type. |
1800 | if (const RecordType *RT = |
1801 | ReturnType.getCanonicalType()->getAs<RecordType>()) { |
1802 | if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(Val: RT->getDecl())) |
1803 | return ClassDecl->hasTrivialDestructor(); |
1804 | } |
1805 | return ReturnType.isTriviallyCopyableType(Context); |
1806 | } |
1807 | |
1808 | static bool HasStrictReturn(const CodeGenModule &Module, QualType RetTy, |
1809 | const Decl *TargetDecl) { |
1810 | // As-is msan can not tolerate noundef mismatch between caller and |
1811 | // implementation. Mismatch is possible for e.g. indirect calls from C-caller |
1812 | // into C++. Such mismatches lead to confusing false reports. To avoid |
1813 | // expensive workaround on msan we enforce initialization event in uncommon |
1814 | // cases where it's allowed. |
1815 | if (Module.getLangOpts().Sanitize.has(K: SanitizerKind::Memory)) |
1816 | return true; |
1817 | // C++ explicitly makes returning undefined values UB. C's rule only applies |
1818 | // to used values, so we never mark them noundef for now. |
1819 | if (!Module.getLangOpts().CPlusPlus) |
1820 | return false; |
1821 | if (TargetDecl) { |
1822 | if (const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(Val: TargetDecl)) { |
1823 | if (FDecl->isExternC()) |
1824 | return false; |
1825 | } else if (const VarDecl *VDecl = dyn_cast<VarDecl>(Val: TargetDecl)) { |
1826 | // Function pointer. |
1827 | if (VDecl->isExternC()) |
1828 | return false; |
1829 | } |
1830 | } |
1831 | |
1832 | // We don't want to be too aggressive with the return checking, unless |
1833 | // it's explicit in the code opts or we're using an appropriate sanitizer. |
1834 | // Try to respect what the programmer intended. |
1835 | return Module.getCodeGenOpts().StrictReturn || |
1836 | !Module.MayDropFunctionReturn(Context: Module.getContext(), ReturnType: RetTy) || |
1837 | Module.getLangOpts().Sanitize.has(K: SanitizerKind::Return); |
1838 | } |
1839 | |
1840 | /// Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the |
1841 | /// requested denormal behavior, accounting for the overriding behavior of the |
1842 | /// -f32 case. |
1843 | static void addDenormalModeAttrs(llvm::DenormalMode FPDenormalMode, |
1844 | llvm::DenormalMode FP32DenormalMode, |
1845 | llvm::AttrBuilder &FuncAttrs) { |
1846 | if (FPDenormalMode != llvm::DenormalMode::getDefault()) |
1847 | FuncAttrs.addAttribute(A: "denormal-fp-math" , V: FPDenormalMode.str()); |
1848 | |
1849 | if (FP32DenormalMode != FPDenormalMode && FP32DenormalMode.isValid()) |
1850 | FuncAttrs.addAttribute(A: "denormal-fp-math-f32" , V: FP32DenormalMode.str()); |
1851 | } |
1852 | |
1853 | /// Add default attributes to a function, which have merge semantics under |
1854 | /// -mlink-builtin-bitcode and should not simply overwrite any existing |
1855 | /// attributes in the linked library. |
1856 | static void |
1857 | addMergableDefaultFunctionAttributes(const CodeGenOptions &CodeGenOpts, |
1858 | llvm::AttrBuilder &FuncAttrs) { |
1859 | addDenormalModeAttrs(FPDenormalMode: CodeGenOpts.FPDenormalMode, FP32DenormalMode: CodeGenOpts.FP32DenormalMode, |
1860 | FuncAttrs); |
1861 | } |
1862 | |
1863 | static void getTrivialDefaultFunctionAttributes( |
1864 | StringRef Name, bool HasOptnone, const CodeGenOptions &CodeGenOpts, |
1865 | const LangOptions &LangOpts, bool AttrOnCallSite, |
1866 | llvm::AttrBuilder &FuncAttrs) { |
1867 | // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. |
1868 | if (!HasOptnone) { |
1869 | if (CodeGenOpts.OptimizeSize) |
1870 | FuncAttrs.addAttribute(Val: llvm::Attribute::OptimizeForSize); |
1871 | if (CodeGenOpts.OptimizeSize == 2) |
1872 | FuncAttrs.addAttribute(Val: llvm::Attribute::MinSize); |
1873 | } |
1874 | |
1875 | if (CodeGenOpts.DisableRedZone) |
1876 | FuncAttrs.addAttribute(Val: llvm::Attribute::NoRedZone); |
1877 | if (CodeGenOpts.IndirectTlsSegRefs) |
1878 | FuncAttrs.addAttribute(A: "indirect-tls-seg-refs" ); |
1879 | if (CodeGenOpts.NoImplicitFloat) |
1880 | FuncAttrs.addAttribute(Val: llvm::Attribute::NoImplicitFloat); |
1881 | |
1882 | if (AttrOnCallSite) { |
1883 | // Attributes that should go on the call site only. |
1884 | // FIXME: Look for 'BuiltinAttr' on the function rather than re-checking |
1885 | // the -fno-builtin-foo list. |
1886 | if (!CodeGenOpts.SimplifyLibCalls || LangOpts.isNoBuiltinFunc(Name)) |
1887 | FuncAttrs.addAttribute(Val: llvm::Attribute::NoBuiltin); |
1888 | if (!CodeGenOpts.TrapFuncName.empty()) |
1889 | FuncAttrs.addAttribute(A: "trap-func-name" , V: CodeGenOpts.TrapFuncName); |
1890 | } else { |
1891 | switch (CodeGenOpts.getFramePointer()) { |
1892 | case CodeGenOptions::FramePointerKind::None: |
1893 | // This is the default behavior. |
1894 | break; |
1895 | case CodeGenOptions::FramePointerKind::Reserved: |
1896 | case CodeGenOptions::FramePointerKind::NonLeaf: |
1897 | case CodeGenOptions::FramePointerKind::All: |
1898 | FuncAttrs.addAttribute(A: "frame-pointer" , |
1899 | V: CodeGenOptions::getFramePointerKindName( |
1900 | Kind: CodeGenOpts.getFramePointer())); |
1901 | } |
1902 | |
1903 | if (CodeGenOpts.LessPreciseFPMAD) |
1904 | FuncAttrs.addAttribute(A: "less-precise-fpmad" , V: "true" ); |
1905 | |
1906 | if (CodeGenOpts.NullPointerIsValid) |
1907 | FuncAttrs.addAttribute(Val: llvm::Attribute::NullPointerIsValid); |
1908 | |
1909 | if (LangOpts.getDefaultExceptionMode() == LangOptions::FPE_Ignore) |
1910 | FuncAttrs.addAttribute(A: "no-trapping-math" , V: "true" ); |
1911 | |
1912 | // TODO: Are these all needed? |
1913 | // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags. |
1914 | if (LangOpts.NoHonorInfs) |
1915 | FuncAttrs.addAttribute(A: "no-infs-fp-math" , V: "true" ); |
1916 | if (LangOpts.NoHonorNaNs) |
1917 | FuncAttrs.addAttribute(A: "no-nans-fp-math" , V: "true" ); |
1918 | if (LangOpts.ApproxFunc) |
1919 | FuncAttrs.addAttribute(A: "approx-func-fp-math" , V: "true" ); |
1920 | if (LangOpts.AllowFPReassoc && LangOpts.AllowRecip && |
1921 | LangOpts.NoSignedZero && LangOpts.ApproxFunc && |
1922 | (LangOpts.getDefaultFPContractMode() == |
1923 | LangOptions::FPModeKind::FPM_Fast || |
1924 | LangOpts.getDefaultFPContractMode() == |
1925 | LangOptions::FPModeKind::FPM_FastHonorPragmas)) |
1926 | FuncAttrs.addAttribute(A: "unsafe-fp-math" , V: "true" ); |
1927 | if (CodeGenOpts.SoftFloat) |
1928 | FuncAttrs.addAttribute(A: "use-soft-float" , V: "true" ); |
1929 | FuncAttrs.addAttribute(A: "stack-protector-buffer-size" , |
1930 | V: llvm::utostr(X: CodeGenOpts.SSPBufferSize)); |
1931 | if (LangOpts.NoSignedZero) |
1932 | FuncAttrs.addAttribute(A: "no-signed-zeros-fp-math" , V: "true" ); |
1933 | |
1934 | // TODO: Reciprocal estimate codegen options should apply to instructions? |
1935 | const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals; |
1936 | if (!Recips.empty()) |
1937 | FuncAttrs.addAttribute(A: "reciprocal-estimates" , |
1938 | V: llvm::join(R: Recips, Separator: "," )); |
1939 | |
1940 | if (!CodeGenOpts.PreferVectorWidth.empty() && |
1941 | CodeGenOpts.PreferVectorWidth != "none" ) |
1942 | FuncAttrs.addAttribute(A: "prefer-vector-width" , |
1943 | V: CodeGenOpts.PreferVectorWidth); |
1944 | |
1945 | if (CodeGenOpts.StackRealignment) |
1946 | FuncAttrs.addAttribute(A: "stackrealign" ); |
1947 | if (CodeGenOpts.Backchain) |
1948 | FuncAttrs.addAttribute(A: "backchain" ); |
1949 | if (CodeGenOpts.EnableSegmentedStacks) |
1950 | FuncAttrs.addAttribute(A: "split-stack" ); |
1951 | |
1952 | if (CodeGenOpts.SpeculativeLoadHardening) |
1953 | FuncAttrs.addAttribute(Val: llvm::Attribute::SpeculativeLoadHardening); |
1954 | |
1955 | // Add zero-call-used-regs attribute. |
1956 | switch (CodeGenOpts.getZeroCallUsedRegs()) { |
1957 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip: |
1958 | FuncAttrs.removeAttribute(A: "zero-call-used-regs" ); |
1959 | break; |
1960 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg: |
1961 | FuncAttrs.addAttribute(A: "zero-call-used-regs" , V: "used-gpr-arg" ); |
1962 | break; |
1963 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR: |
1964 | FuncAttrs.addAttribute(A: "zero-call-used-regs" , V: "used-gpr" ); |
1965 | break; |
1966 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg: |
1967 | FuncAttrs.addAttribute(A: "zero-call-used-regs" , V: "used-arg" ); |
1968 | break; |
1969 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used: |
1970 | FuncAttrs.addAttribute(A: "zero-call-used-regs" , V: "used" ); |
1971 | break; |
1972 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg: |
1973 | FuncAttrs.addAttribute(A: "zero-call-used-regs" , V: "all-gpr-arg" ); |
1974 | break; |
1975 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR: |
1976 | FuncAttrs.addAttribute(A: "zero-call-used-regs" , V: "all-gpr" ); |
1977 | break; |
1978 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg: |
1979 | FuncAttrs.addAttribute(A: "zero-call-used-regs" , V: "all-arg" ); |
1980 | break; |
1981 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All: |
1982 | FuncAttrs.addAttribute(A: "zero-call-used-regs" , V: "all" ); |
1983 | break; |
1984 | } |
1985 | } |
1986 | |
1987 | if (LangOpts.assumeFunctionsAreConvergent()) { |
1988 | // Conservatively, mark all functions and calls in CUDA and OpenCL as |
1989 | // convergent (meaning, they may call an intrinsically convergent op, such |
1990 | // as __syncthreads() / barrier(), and so can't have certain optimizations |
1991 | // applied around them). LLVM will remove this attribute where it safely |
1992 | // can. |
1993 | FuncAttrs.addAttribute(Val: llvm::Attribute::Convergent); |
1994 | } |
1995 | |
1996 | // TODO: NoUnwind attribute should be added for other GPU modes HIP, |
1997 | // OpenMP offload. AFAIK, neither of them support exceptions in device code. |
1998 | if ((LangOpts.CUDA && LangOpts.CUDAIsDevice) || LangOpts.OpenCL || |
1999 | LangOpts.SYCLIsDevice) { |
2000 | FuncAttrs.addAttribute(Val: llvm::Attribute::NoUnwind); |
2001 | } |
2002 | |
2003 | for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) { |
2004 | StringRef Var, Value; |
2005 | std::tie(args&: Var, args&: Value) = Attr.split(Separator: '='); |
2006 | FuncAttrs.addAttribute(A: Var, V: Value); |
2007 | } |
2008 | |
2009 | TargetInfo::BranchProtectionInfo BPI(LangOpts); |
2010 | TargetCodeGenInfo::initBranchProtectionFnAttributes(BPI, FuncAttrs); |
2011 | } |
2012 | |
2013 | /// Merges `target-features` from \TargetOpts and \F, and sets the result in |
2014 | /// \FuncAttr |
2015 | /// * features from \F are always kept |
2016 | /// * a feature from \TargetOpts is kept if itself and its opposite are absent |
2017 | /// from \F |
2018 | static void |
2019 | overrideFunctionFeaturesWithTargetFeatures(llvm::AttrBuilder &FuncAttr, |
2020 | const llvm::Function &F, |
2021 | const TargetOptions &TargetOpts) { |
2022 | auto FFeatures = F.getFnAttribute(Kind: "target-features" ); |
2023 | |
2024 | llvm::StringSet<> MergedNames; |
2025 | SmallVector<StringRef> MergedFeatures; |
2026 | MergedFeatures.reserve(N: TargetOpts.Features.size()); |
2027 | |
2028 | auto AddUnmergedFeatures = [&](auto &&FeatureRange) { |
2029 | for (StringRef Feature : FeatureRange) { |
2030 | if (Feature.empty()) |
2031 | continue; |
2032 | assert(Feature[0] == '+' || Feature[0] == '-'); |
2033 | StringRef Name = Feature.drop_front(N: 1); |
2034 | bool Merged = !MergedNames.insert(key: Name).second; |
2035 | if (!Merged) |
2036 | MergedFeatures.push_back(Elt: Feature); |
2037 | } |
2038 | }; |
2039 | |
2040 | if (FFeatures.isValid()) |
2041 | AddUnmergedFeatures(llvm::split(Str: FFeatures.getValueAsString(), Separator: ',')); |
2042 | AddUnmergedFeatures(TargetOpts.Features); |
2043 | |
2044 | if (!MergedFeatures.empty()) { |
2045 | llvm::sort(C&: MergedFeatures); |
2046 | FuncAttr.addAttribute(A: "target-features" , V: llvm::join(R&: MergedFeatures, Separator: "," )); |
2047 | } |
2048 | } |
2049 | |
2050 | void CodeGen::mergeDefaultFunctionDefinitionAttributes( |
2051 | llvm::Function &F, const CodeGenOptions &CodeGenOpts, |
2052 | const LangOptions &LangOpts, const TargetOptions &TargetOpts, |
2053 | bool WillInternalize) { |
2054 | |
2055 | llvm::AttrBuilder FuncAttrs(F.getContext()); |
2056 | // Here we only extract the options that are relevant compared to the version |
2057 | // from GetCPUAndFeaturesAttributes. |
2058 | if (!TargetOpts.CPU.empty()) |
2059 | FuncAttrs.addAttribute(A: "target-cpu" , V: TargetOpts.CPU); |
2060 | if (!TargetOpts.TuneCPU.empty()) |
2061 | FuncAttrs.addAttribute(A: "tune-cpu" , V: TargetOpts.TuneCPU); |
2062 | |
2063 | ::getTrivialDefaultFunctionAttributes(Name: F.getName(), HasOptnone: F.hasOptNone(), |
2064 | CodeGenOpts, LangOpts, |
2065 | /*AttrOnCallSite=*/false, FuncAttrs); |
2066 | |
2067 | if (!WillInternalize && F.isInterposable()) { |
2068 | // Do not promote "dynamic" denormal-fp-math to this translation unit's |
2069 | // setting for weak functions that won't be internalized. The user has no |
2070 | // real control for how builtin bitcode is linked, so we shouldn't assume |
2071 | // later copies will use a consistent mode. |
2072 | F.addFnAttrs(Attrs: FuncAttrs); |
2073 | return; |
2074 | } |
2075 | |
2076 | llvm::AttributeMask AttrsToRemove; |
2077 | |
2078 | llvm::DenormalMode DenormModeToMerge = F.getDenormalModeRaw(); |
2079 | llvm::DenormalMode DenormModeToMergeF32 = F.getDenormalModeF32Raw(); |
2080 | llvm::DenormalMode Merged = |
2081 | CodeGenOpts.FPDenormalMode.mergeCalleeMode(Callee: DenormModeToMerge); |
2082 | llvm::DenormalMode MergedF32 = CodeGenOpts.FP32DenormalMode; |
2083 | |
2084 | if (DenormModeToMergeF32.isValid()) { |
2085 | MergedF32 = |
2086 | CodeGenOpts.FP32DenormalMode.mergeCalleeMode(Callee: DenormModeToMergeF32); |
2087 | } |
2088 | |
2089 | if (Merged == llvm::DenormalMode::getDefault()) { |
2090 | AttrsToRemove.addAttribute(A: "denormal-fp-math" ); |
2091 | } else if (Merged != DenormModeToMerge) { |
2092 | // Overwrite existing attribute |
2093 | FuncAttrs.addAttribute(A: "denormal-fp-math" , |
2094 | V: CodeGenOpts.FPDenormalMode.str()); |
2095 | } |
2096 | |
2097 | if (MergedF32 == llvm::DenormalMode::getDefault()) { |
2098 | AttrsToRemove.addAttribute(A: "denormal-fp-math-f32" ); |
2099 | } else if (MergedF32 != DenormModeToMergeF32) { |
2100 | // Overwrite existing attribute |
2101 | FuncAttrs.addAttribute(A: "denormal-fp-math-f32" , |
2102 | V: CodeGenOpts.FP32DenormalMode.str()); |
2103 | } |
2104 | |
2105 | F.removeFnAttrs(Attrs: AttrsToRemove); |
2106 | addDenormalModeAttrs(FPDenormalMode: Merged, FP32DenormalMode: MergedF32, FuncAttrs); |
2107 | |
2108 | overrideFunctionFeaturesWithTargetFeatures(FuncAttr&: FuncAttrs, F, TargetOpts); |
2109 | |
2110 | F.addFnAttrs(Attrs: FuncAttrs); |
2111 | } |
2112 | |
2113 | void CodeGenModule::getTrivialDefaultFunctionAttributes( |
2114 | StringRef Name, bool HasOptnone, bool AttrOnCallSite, |
2115 | llvm::AttrBuilder &FuncAttrs) { |
2116 | ::getTrivialDefaultFunctionAttributes(Name, HasOptnone, CodeGenOpts: getCodeGenOpts(), |
2117 | LangOpts: getLangOpts(), AttrOnCallSite, |
2118 | FuncAttrs); |
2119 | } |
2120 | |
2121 | void CodeGenModule::getDefaultFunctionAttributes(StringRef Name, |
2122 | bool HasOptnone, |
2123 | bool AttrOnCallSite, |
2124 | llvm::AttrBuilder &FuncAttrs) { |
2125 | getTrivialDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, |
2126 | FuncAttrs); |
2127 | // If we're just getting the default, get the default values for mergeable |
2128 | // attributes. |
2129 | if (!AttrOnCallSite) |
2130 | addMergableDefaultFunctionAttributes(CodeGenOpts, FuncAttrs); |
2131 | } |
2132 | |
2133 | void CodeGenModule::addDefaultFunctionDefinitionAttributes( |
2134 | llvm::AttrBuilder &attrs) { |
2135 | getDefaultFunctionAttributes(/*function name*/ Name: "" , /*optnone*/ HasOptnone: false, |
2136 | /*for call*/ AttrOnCallSite: false, FuncAttrs&: attrs); |
2137 | GetCPUAndFeaturesAttributes(GD: GlobalDecl(), AttrBuilder&: attrs); |
2138 | } |
2139 | |
2140 | static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, |
2141 | const LangOptions &LangOpts, |
2142 | const NoBuiltinAttr *NBA = nullptr) { |
2143 | auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) { |
2144 | SmallString<32> AttributeName; |
2145 | AttributeName += "no-builtin-" ; |
2146 | AttributeName += BuiltinName; |
2147 | FuncAttrs.addAttribute(A: AttributeName); |
2148 | }; |
2149 | |
2150 | // First, handle the language options passed through -fno-builtin. |
2151 | if (LangOpts.NoBuiltin) { |
2152 | // -fno-builtin disables them all. |
2153 | FuncAttrs.addAttribute(A: "no-builtins" ); |
2154 | return; |
2155 | } |
2156 | |
2157 | // Then, add attributes for builtins specified through -fno-builtin-<name>. |
2158 | llvm::for_each(Range: LangOpts.NoBuiltinFuncs, F: AddNoBuiltinAttr); |
2159 | |
2160 | // Now, let's check the __attribute__((no_builtin("...")) attribute added to |
2161 | // the source. |
2162 | if (!NBA) |
2163 | return; |
2164 | |
2165 | // If there is a wildcard in the builtin names specified through the |
2166 | // attribute, disable them all. |
2167 | if (llvm::is_contained(Range: NBA->builtinNames(), Element: "*" )) { |
2168 | FuncAttrs.addAttribute(A: "no-builtins" ); |
2169 | return; |
2170 | } |
2171 | |
2172 | // And last, add the rest of the builtin names. |
2173 | llvm::for_each(Range: NBA->builtinNames(), F: AddNoBuiltinAttr); |
2174 | } |
2175 | |
2176 | static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, |
2177 | const llvm::DataLayout &DL, const ABIArgInfo &AI, |
2178 | bool CheckCoerce = true) { |
2179 | llvm::Type *Ty = Types.ConvertTypeForMem(T: QTy); |
2180 | if (AI.getKind() == ABIArgInfo::Indirect || |
2181 | AI.getKind() == ABIArgInfo::IndirectAliased) |
2182 | return true; |
2183 | if (AI.getKind() == ABIArgInfo::Extend) |
2184 | return true; |
2185 | if (!DL.typeSizeEqualsStoreSize(Ty)) |
2186 | // TODO: This will result in a modest amount of values not marked noundef |
2187 | // when they could be. We care about values that *invisibly* contain undef |
2188 | // bits from the perspective of LLVM IR. |
2189 | return false; |
2190 | if (CheckCoerce && AI.canHaveCoerceToType()) { |
2191 | llvm::Type *CoerceTy = AI.getCoerceToType(); |
2192 | if (llvm::TypeSize::isKnownGT(LHS: DL.getTypeSizeInBits(Ty: CoerceTy), |
2193 | RHS: DL.getTypeSizeInBits(Ty))) |
2194 | // If we're coercing to a type with a greater size than the canonical one, |
2195 | // we're introducing new undef bits. |
2196 | // Coercing to a type of smaller or equal size is ok, as we know that |
2197 | // there's no internal padding (typeSizeEqualsStoreSize). |
2198 | return false; |
2199 | } |
2200 | if (QTy->isBitIntType()) |
2201 | return true; |
2202 | if (QTy->isReferenceType()) |
2203 | return true; |
2204 | if (QTy->isNullPtrType()) |
2205 | return false; |
2206 | if (QTy->isMemberPointerType()) |
2207 | // TODO: Some member pointers are `noundef`, but it depends on the ABI. For |
2208 | // now, never mark them. |
2209 | return false; |
2210 | if (QTy->isScalarType()) { |
2211 | if (const ComplexType *Complex = dyn_cast<ComplexType>(Val&: QTy)) |
2212 | return DetermineNoUndef(QTy: Complex->getElementType(), Types, DL, AI, CheckCoerce: false); |
2213 | return true; |
2214 | } |
2215 | if (const VectorType *Vector = dyn_cast<VectorType>(Val&: QTy)) |
2216 | return DetermineNoUndef(QTy: Vector->getElementType(), Types, DL, AI, CheckCoerce: false); |
2217 | if (const MatrixType *Matrix = dyn_cast<MatrixType>(Val&: QTy)) |
2218 | return DetermineNoUndef(QTy: Matrix->getElementType(), Types, DL, AI, CheckCoerce: false); |
2219 | if (const ArrayType *Array = dyn_cast<ArrayType>(Val&: QTy)) |
2220 | return DetermineNoUndef(QTy: Array->getElementType(), Types, DL, AI, CheckCoerce: false); |
2221 | |
2222 | // TODO: Some structs may be `noundef`, in specific situations. |
2223 | return false; |
2224 | } |
2225 | |
2226 | /// Check if the argument of a function has maybe_undef attribute. |
2227 | static bool IsArgumentMaybeUndef(const Decl *TargetDecl, |
2228 | unsigned NumRequiredArgs, unsigned ArgNo) { |
2229 | const auto *FD = dyn_cast_or_null<FunctionDecl>(Val: TargetDecl); |
2230 | if (!FD) |
2231 | return false; |
2232 | |
2233 | // Assume variadic arguments do not have maybe_undef attribute. |
2234 | if (ArgNo >= NumRequiredArgs) |
2235 | return false; |
2236 | |
2237 | // Check if argument has maybe_undef attribute. |
2238 | if (ArgNo < FD->getNumParams()) { |
2239 | const ParmVarDecl *Param = FD->getParamDecl(i: ArgNo); |
2240 | if (Param && Param->hasAttr<MaybeUndefAttr>()) |
2241 | return true; |
2242 | } |
2243 | |
2244 | return false; |
2245 | } |
2246 | |
2247 | /// Test if it's legal to apply nofpclass for the given parameter type and it's |
2248 | /// lowered IR type. |
2249 | static bool canApplyNoFPClass(const ABIArgInfo &AI, QualType ParamType, |
2250 | bool IsReturn) { |
2251 | // Should only apply to FP types in the source, not ABI promoted. |
2252 | if (!ParamType->hasFloatingRepresentation()) |
2253 | return false; |
2254 | |
2255 | // The promoted-to IR type also needs to support nofpclass. |
2256 | llvm::Type *IRTy = AI.getCoerceToType(); |
2257 | if (llvm::AttributeFuncs::isNoFPClassCompatibleType(Ty: IRTy)) |
2258 | return true; |
2259 | |
2260 | if (llvm::StructType *ST = dyn_cast<llvm::StructType>(Val: IRTy)) { |
2261 | return !IsReturn && AI.getCanBeFlattened() && |
2262 | llvm::all_of(Range: ST->elements(), P: [](llvm::Type *Ty) { |
2263 | return llvm::AttributeFuncs::isNoFPClassCompatibleType(Ty); |
2264 | }); |
2265 | } |
2266 | |
2267 | return false; |
2268 | } |
2269 | |
2270 | /// Return the nofpclass mask that can be applied to floating-point parameters. |
2271 | static llvm::FPClassTest getNoFPClassTestMask(const LangOptions &LangOpts) { |
2272 | llvm::FPClassTest Mask = llvm::fcNone; |
2273 | if (LangOpts.NoHonorInfs) |
2274 | Mask |= llvm::fcInf; |
2275 | if (LangOpts.NoHonorNaNs) |
2276 | Mask |= llvm::fcNan; |
2277 | return Mask; |
2278 | } |
2279 | |
2280 | void CodeGenModule::AdjustMemoryAttribute(StringRef Name, |
2281 | CGCalleeInfo CalleeInfo, |
2282 | llvm::AttributeList &Attrs) { |
2283 | if (Attrs.getMemoryEffects().getModRef() == llvm::ModRefInfo::NoModRef) { |
2284 | Attrs = Attrs.removeFnAttribute(C&: getLLVMContext(), Kind: llvm::Attribute::Memory); |
2285 | llvm::Attribute MemoryAttr = llvm::Attribute::getWithMemoryEffects( |
2286 | Context&: getLLVMContext(), ME: llvm::MemoryEffects::writeOnly()); |
2287 | Attrs = Attrs.addFnAttribute(C&: getLLVMContext(), Attr: MemoryAttr); |
2288 | } |
2289 | } |
2290 | |
2291 | /// Construct the IR attribute list of a function or call. |
2292 | /// |
2293 | /// When adding an attribute, please consider where it should be handled: |
2294 | /// |
2295 | /// - getDefaultFunctionAttributes is for attributes that are essentially |
2296 | /// part of the global target configuration (but perhaps can be |
2297 | /// overridden on a per-function basis). Adding attributes there |
2298 | /// will cause them to also be set in frontends that build on Clang's |
2299 | /// target-configuration logic, as well as for code defined in library |
2300 | /// modules such as CUDA's libdevice. |
2301 | /// |
2302 | /// - ConstructAttributeList builds on top of getDefaultFunctionAttributes |
2303 | /// and adds declaration-specific, convention-specific, and |
2304 | /// frontend-specific logic. The last is of particular importance: |
2305 | /// attributes that restrict how the frontend generates code must be |
2306 | /// added here rather than getDefaultFunctionAttributes. |
2307 | /// |
2308 | void CodeGenModule::ConstructAttributeList(StringRef Name, |
2309 | const CGFunctionInfo &FI, |
2310 | CGCalleeInfo CalleeInfo, |
2311 | llvm::AttributeList &AttrList, |
2312 | unsigned &CallingConv, |
2313 | bool AttrOnCallSite, bool IsThunk) { |
2314 | llvm::AttrBuilder FuncAttrs(getLLVMContext()); |
2315 | llvm::AttrBuilder RetAttrs(getLLVMContext()); |
2316 | |
2317 | // Collect function IR attributes from the CC lowering. |
2318 | // We'll collect the paramete and result attributes later. |
2319 | CallingConv = FI.getEffectiveCallingConvention(); |
2320 | if (FI.isNoReturn()) |
2321 | FuncAttrs.addAttribute(Val: llvm::Attribute::NoReturn); |
2322 | if (FI.isCmseNSCall()) |
2323 | FuncAttrs.addAttribute(A: "cmse_nonsecure_call" ); |
2324 | |
2325 | // Collect function IR attributes from the callee prototype if we have one. |
2326 | AddAttributesFromFunctionProtoType(Ctx&: getContext(), FuncAttrs, |
2327 | FPT: CalleeInfo.getCalleeFunctionProtoType()); |
2328 | |
2329 | const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl(); |
2330 | |
2331 | // Attach assumption attributes to the declaration. If this is a call |
2332 | // site, attach assumptions from the caller to the call as well. |
2333 | AddAttributesFromOMPAssumes(FuncAttrs, Callee: TargetDecl); |
2334 | |
2335 | bool HasOptnone = false; |
2336 | // The NoBuiltinAttr attached to the target FunctionDecl. |
2337 | const NoBuiltinAttr *NBA = nullptr; |
2338 | |
2339 | // Some ABIs may result in additional accesses to arguments that may |
2340 | // otherwise not be present. |
2341 | auto AddPotentialArgAccess = [&]() { |
2342 | llvm::Attribute A = FuncAttrs.getAttribute(Kind: llvm::Attribute::Memory); |
2343 | if (A.isValid()) |
2344 | FuncAttrs.addMemoryAttr(ME: A.getMemoryEffects() | |
2345 | llvm::MemoryEffects::argMemOnly()); |
2346 | }; |
2347 | |
2348 | // Collect function IR attributes based on declaration-specific |
2349 | // information. |
2350 | // FIXME: handle sseregparm someday... |
2351 | if (TargetDecl) { |
2352 | if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) |
2353 | FuncAttrs.addAttribute(Val: llvm::Attribute::ReturnsTwice); |
2354 | if (TargetDecl->hasAttr<NoThrowAttr>()) |
2355 | FuncAttrs.addAttribute(Val: llvm::Attribute::NoUnwind); |
2356 | if (TargetDecl->hasAttr<NoReturnAttr>()) |
2357 | FuncAttrs.addAttribute(Val: llvm::Attribute::NoReturn); |
2358 | if (TargetDecl->hasAttr<ColdAttr>()) |
2359 | FuncAttrs.addAttribute(Val: llvm::Attribute::Cold); |
2360 | if (TargetDecl->hasAttr<HotAttr>()) |
2361 | FuncAttrs.addAttribute(Val: llvm::Attribute::Hot); |
2362 | if (TargetDecl->hasAttr<NoDuplicateAttr>()) |
2363 | FuncAttrs.addAttribute(Val: llvm::Attribute::NoDuplicate); |
2364 | if (TargetDecl->hasAttr<ConvergentAttr>()) |
2365 | FuncAttrs.addAttribute(Val: llvm::Attribute::Convergent); |
2366 | |
2367 | if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(Val: TargetDecl)) { |
2368 | AddAttributesFromFunctionProtoType( |
2369 | Ctx&: getContext(), FuncAttrs, FPT: Fn->getType()->getAs<FunctionProtoType>()); |
2370 | if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) { |
2371 | // A sane operator new returns a non-aliasing pointer. |
2372 | auto Kind = Fn->getDeclName().getCXXOverloadedOperator(); |
2373 | if (getCodeGenOpts().AssumeSaneOperatorNew && |
2374 | (Kind == OO_New || Kind == OO_Array_New)) |
2375 | RetAttrs.addAttribute(Val: llvm::Attribute::NoAlias); |
2376 | } |
2377 | const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: Fn); |
2378 | const bool IsVirtualCall = MD && MD->isVirtual(); |
2379 | // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a |
2380 | // virtual function. These attributes are not inherited by overloads. |
2381 | if (!(AttrOnCallSite && IsVirtualCall)) { |
2382 | if (Fn->isNoReturn()) |
2383 | FuncAttrs.addAttribute(Val: llvm::Attribute::NoReturn); |
2384 | NBA = Fn->getAttr<NoBuiltinAttr>(); |
2385 | } |
2386 | } |
2387 | |
2388 | if (isa<FunctionDecl>(Val: TargetDecl) || isa<VarDecl>(Val: TargetDecl)) { |
2389 | // Only place nomerge attribute on call sites, never functions. This |
2390 | // allows it to work on indirect virtual function calls. |
2391 | if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>()) |
2392 | FuncAttrs.addAttribute(Val: llvm::Attribute::NoMerge); |
2393 | } |
2394 | |
2395 | // 'const', 'pure' and 'noalias' attributed functions are also nounwind. |
2396 | if (TargetDecl->hasAttr<ConstAttr>()) { |
2397 | FuncAttrs.addMemoryAttr(ME: llvm::MemoryEffects::none()); |
2398 | FuncAttrs.addAttribute(Val: llvm::Attribute::NoUnwind); |
2399 | // gcc specifies that 'const' functions have greater restrictions than |
2400 | // 'pure' functions, so they also cannot have infinite loops. |
2401 | FuncAttrs.addAttribute(Val: llvm::Attribute::WillReturn); |
2402 | } else if (TargetDecl->hasAttr<PureAttr>()) { |
2403 | FuncAttrs.addMemoryAttr(ME: llvm::MemoryEffects::readOnly()); |
2404 | FuncAttrs.addAttribute(Val: llvm::Attribute::NoUnwind); |
2405 | // gcc specifies that 'pure' functions cannot have infinite loops. |
2406 | FuncAttrs.addAttribute(Val: llvm::Attribute::WillReturn); |
2407 | } else if (TargetDecl->hasAttr<NoAliasAttr>()) { |
2408 | FuncAttrs.addMemoryAttr(ME: llvm::MemoryEffects::inaccessibleOrArgMemOnly()); |
2409 | FuncAttrs.addAttribute(Val: llvm::Attribute::NoUnwind); |
2410 | } |
2411 | if (TargetDecl->hasAttr<RestrictAttr>()) |
2412 | RetAttrs.addAttribute(Val: llvm::Attribute::NoAlias); |
2413 | if (TargetDecl->hasAttr<ReturnsNonNullAttr>() && |
2414 | !CodeGenOpts.NullPointerIsValid) |
2415 | RetAttrs.addAttribute(Val: llvm::Attribute::NonNull); |
2416 | if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>()) |
2417 | FuncAttrs.addAttribute(A: "no_caller_saved_registers" ); |
2418 | if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>()) |
2419 | FuncAttrs.addAttribute(Val: llvm::Attribute::NoCfCheck); |
2420 | if (TargetDecl->hasAttr<LeafAttr>()) |
2421 | FuncAttrs.addAttribute(Val: llvm::Attribute::NoCallback); |
2422 | |
2423 | HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>(); |
2424 | if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) { |
2425 | std::optional<unsigned> NumElemsParam; |
2426 | if (AllocSize->getNumElemsParam().isValid()) |
2427 | NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex(); |
2428 | FuncAttrs.addAllocSizeAttr(ElemSizeArg: AllocSize->getElemSizeParam().getLLVMIndex(), |
2429 | NumElemsArg: NumElemsParam); |
2430 | } |
2431 | |
2432 | if (TargetDecl->hasAttr<OpenCLKernelAttr>()) { |
2433 | if (getLangOpts().OpenCLVersion <= 120) { |
2434 | // OpenCL v1.2 Work groups are always uniform |
2435 | FuncAttrs.addAttribute(A: "uniform-work-group-size" , V: "true" ); |
2436 | } else { |
2437 | // OpenCL v2.0 Work groups may be whether uniform or not. |
2438 | // '-cl-uniform-work-group-size' compile option gets a hint |
2439 | // to the compiler that the global work-size be a multiple of |
2440 | // the work-group size specified to clEnqueueNDRangeKernel |
2441 | // (i.e. work groups are uniform). |
2442 | FuncAttrs.addAttribute( |
2443 | A: "uniform-work-group-size" , |
2444 | V: llvm::toStringRef(B: getLangOpts().OffloadUniformBlock)); |
2445 | } |
2446 | } |
2447 | |
2448 | if (TargetDecl->hasAttr<CUDAGlobalAttr>() && |
2449 | getLangOpts().OffloadUniformBlock) |
2450 | FuncAttrs.addAttribute(A: "uniform-work-group-size" , V: "true" ); |
2451 | |
2452 | if (TargetDecl->hasAttr<ArmLocallyStreamingAttr>()) |
2453 | FuncAttrs.addAttribute(A: "aarch64_pstate_sm_body" ); |
2454 | } |
2455 | |
2456 | // Attach "no-builtins" attributes to: |
2457 | // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>". |
2458 | // * definitions: "no-builtins" or "no-builtin-<name>" only. |
2459 | // The attributes can come from: |
2460 | // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name> |
2461 | // * FunctionDecl attributes: __attribute__((no_builtin(...))) |
2462 | addNoBuiltinAttributes(FuncAttrs, LangOpts: getLangOpts(), NBA); |
2463 | |
2464 | // Collect function IR attributes based on global settiings. |
2465 | getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs); |
2466 | |
2467 | // Override some default IR attributes based on declaration-specific |
2468 | // information. |
2469 | if (TargetDecl) { |
2470 | if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>()) |
2471 | FuncAttrs.removeAttribute(Val: llvm::Attribute::SpeculativeLoadHardening); |
2472 | if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>()) |
2473 | FuncAttrs.addAttribute(Val: llvm::Attribute::SpeculativeLoadHardening); |
2474 | if (TargetDecl->hasAttr<NoSplitStackAttr>()) |
2475 | FuncAttrs.removeAttribute(A: "split-stack" ); |
2476 | if (TargetDecl->hasAttr<ZeroCallUsedRegsAttr>()) { |
2477 | // A function "__attribute__((...))" overrides the command-line flag. |
2478 | auto Kind = |
2479 | TargetDecl->getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs(); |
2480 | FuncAttrs.removeAttribute(A: "zero-call-used-regs" ); |
2481 | FuncAttrs.addAttribute( |
2482 | A: "zero-call-used-regs" , |
2483 | V: ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Val: Kind)); |
2484 | } |
2485 | |
2486 | // Add NonLazyBind attribute to function declarations when -fno-plt |
2487 | // is used. |
2488 | // FIXME: what if we just haven't processed the function definition |
2489 | // yet, or if it's an external definition like C99 inline? |
2490 | if (CodeGenOpts.NoPLT) { |
2491 | if (auto *Fn = dyn_cast<FunctionDecl>(Val: TargetDecl)) { |
2492 | if (!Fn->isDefined() && !AttrOnCallSite) { |
2493 | FuncAttrs.addAttribute(Val: llvm::Attribute::NonLazyBind); |
2494 | } |
2495 | } |
2496 | } |
2497 | } |
2498 | |
2499 | // Add "sample-profile-suffix-elision-policy" attribute for internal linkage |
2500 | // functions with -funique-internal-linkage-names. |
2501 | if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) { |
2502 | if (const auto *FD = dyn_cast_or_null<FunctionDecl>(Val: TargetDecl)) { |
2503 | if (!FD->isExternallyVisible()) |
2504 | FuncAttrs.addAttribute(A: "sample-profile-suffix-elision-policy" , |
2505 | V: "selected" ); |
2506 | } |
2507 | } |
2508 | |
2509 | // Collect non-call-site function IR attributes from declaration-specific |
2510 | // information. |
2511 | if (!AttrOnCallSite) { |
2512 | if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>()) |
2513 | FuncAttrs.addAttribute(A: "cmse_nonsecure_entry" ); |
2514 | |
2515 | // Whether tail calls are enabled. |
2516 | auto shouldDisableTailCalls = [&] { |
2517 | // Should this be honored in getDefaultFunctionAttributes? |
2518 | if (CodeGenOpts.DisableTailCalls) |
2519 | return true; |
2520 | |
2521 | if (!TargetDecl) |
2522 | return false; |
2523 | |
2524 | if (TargetDecl->hasAttr<DisableTailCallsAttr>() || |
2525 | TargetDecl->hasAttr<AnyX86InterruptAttr>()) |
2526 | return true; |
2527 | |
2528 | if (CodeGenOpts.NoEscapingBlockTailCalls) { |
2529 | if (const auto *BD = dyn_cast<BlockDecl>(Val: TargetDecl)) |
2530 | if (!BD->doesNotEscape()) |
2531 | return true; |
2532 | } |
2533 | |
2534 | return false; |
2535 | }; |
2536 | if (shouldDisableTailCalls()) |
2537 | FuncAttrs.addAttribute(A: "disable-tail-calls" , V: "true" ); |
2538 | |
2539 | // CPU/feature overrides. addDefaultFunctionDefinitionAttributes |
2540 | // handles these separately to set them based on the global defaults. |
2541 | GetCPUAndFeaturesAttributes(GD: CalleeInfo.getCalleeDecl(), AttrBuilder&: FuncAttrs); |
2542 | } |
2543 | |
2544 | // Collect attributes from arguments and return values. |
2545 | ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); |
2546 | |
2547 | QualType RetTy = FI.getReturnType(); |
2548 | const ABIArgInfo &RetAI = FI.getReturnInfo(); |
2549 | const llvm::DataLayout &DL = getDataLayout(); |
2550 | |
2551 | // Determine if the return type could be partially undef |
2552 | if (CodeGenOpts.EnableNoundefAttrs && |
2553 | HasStrictReturn(Module: *this, RetTy, TargetDecl)) { |
2554 | if (!RetTy->isVoidType() && RetAI.getKind() != ABIArgInfo::Indirect && |
2555 | DetermineNoUndef(QTy: RetTy, Types&: getTypes(), DL, AI: RetAI)) |
2556 | RetAttrs.addAttribute(Val: llvm::Attribute::NoUndef); |
2557 | } |
2558 | |
2559 | switch (RetAI.getKind()) { |
2560 | case ABIArgInfo::Extend: |
2561 | if (RetAI.isSignExt()) |
2562 | RetAttrs.addAttribute(Val: llvm::Attribute::SExt); |
2563 | else |
2564 | RetAttrs.addAttribute(Val: llvm::Attribute::ZExt); |
2565 | [[fallthrough]]; |
2566 | case ABIArgInfo::Direct: |
2567 | if (RetAI.getInReg()) |
2568 | RetAttrs.addAttribute(Val: llvm::Attribute::InReg); |
2569 | |
2570 | if (canApplyNoFPClass(AI: RetAI, ParamType: RetTy, IsReturn: true)) |
2571 | RetAttrs.addNoFPClassAttr(NoFPClassMask: getNoFPClassTestMask(LangOpts: getLangOpts())); |
2572 | |
2573 | break; |
2574 | case ABIArgInfo::Ignore: |
2575 | break; |
2576 | |
2577 | case ABIArgInfo::InAlloca: |
2578 | case ABIArgInfo::Indirect: { |
2579 | // inalloca and sret disable readnone and readonly |
2580 | AddPotentialArgAccess(); |
2581 | break; |
2582 | } |
2583 | |
2584 | case ABIArgInfo::CoerceAndExpand: |
2585 | break; |
2586 | |
2587 | case ABIArgInfo::Expand: |
2588 | case ABIArgInfo::IndirectAliased: |
2589 | llvm_unreachable("Invalid ABI kind for return argument" ); |
2590 | } |
2591 | |
2592 | if (!IsThunk) { |
2593 | // FIXME: fix this properly, https://reviews.llvm.org/D100388 |
2594 | if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { |
2595 | QualType PTy = RefTy->getPointeeType(); |
2596 | if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) |
2597 | RetAttrs.addDereferenceableAttr( |
2598 | Bytes: getMinimumObjectSize(Ty: PTy).getQuantity()); |
2599 | if (getTypes().getTargetAddressSpace(T: PTy) == 0 && |
2600 | !CodeGenOpts.NullPointerIsValid) |
2601 | RetAttrs.addAttribute(Val: llvm::Attribute::NonNull); |
2602 | if (PTy->isObjectType()) { |
2603 | llvm::Align Alignment = |
2604 | getNaturalPointeeTypeAlignment(T: RetTy).getAsAlign(); |
2605 | RetAttrs.addAlignmentAttr(Align: Alignment); |
2606 | } |
2607 | } |
2608 | } |
2609 | |
2610 | bool hasUsedSRet = false; |
2611 | SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs()); |
2612 | |
2613 | // Attach attributes to sret. |
2614 | if (IRFunctionArgs.hasSRetArg()) { |
2615 | llvm::AttrBuilder SRETAttrs(getLLVMContext()); |
2616 | SRETAttrs.addStructRetAttr(Ty: getTypes().ConvertTypeForMem(T: RetTy)); |
2617 | SRETAttrs.addAttribute(Val: llvm::Attribute::Writable); |
2618 | SRETAttrs.addAttribute(Val: llvm::Attribute::DeadOnUnwind); |
2619 | hasUsedSRet = true; |
2620 | if (RetAI.getInReg()) |
2621 | SRETAttrs.addAttribute(Val: llvm::Attribute::InReg); |
2622 | SRETAttrs.addAlignmentAttr(Align: RetAI.getIndirectAlign().getQuantity()); |
2623 | ArgAttrs[IRFunctionArgs.getSRetArgNo()] = |
2624 | llvm::AttributeSet::get(C&: getLLVMContext(), B: SRETAttrs); |
2625 | } |
2626 | |
2627 | // Attach attributes to inalloca argument. |
2628 | if (IRFunctionArgs.hasInallocaArg()) { |
2629 | llvm::AttrBuilder Attrs(getLLVMContext()); |
2630 | Attrs.addInAllocaAttr(Ty: FI.getArgStruct()); |
2631 | ArgAttrs[IRFunctionArgs.getInallocaArgNo()] = |
2632 | llvm::AttributeSet::get(C&: getLLVMContext(), B: Attrs); |
2633 | } |
2634 | |
2635 | // Apply `nonnull`, `dereferencable(N)` and `align N` to the `this` argument, |
2636 | // unless this is a thunk function. |
2637 | // FIXME: fix this properly, https://reviews.llvm.org/D100388 |
2638 | if (FI.isInstanceMethod() && !IRFunctionArgs.hasInallocaArg() && |
2639 | !FI.arg_begin()->type->isVoidPointerType() && !IsThunk) { |
2640 | auto IRArgs = IRFunctionArgs.getIRArgs(ArgNo: 0); |
2641 | |
2642 | assert(IRArgs.second == 1 && "Expected only a single `this` pointer." ); |
2643 | |
2644 | llvm::AttrBuilder Attrs(getLLVMContext()); |
2645 | |
2646 | QualType ThisTy = |
2647 | FI.arg_begin()->type.getTypePtr()->getPointeeType(); |
2648 | |
2649 | if (!CodeGenOpts.NullPointerIsValid && |
2650 | getTypes().getTargetAddressSpace(T: FI.arg_begin()->type) == 0) { |
2651 | Attrs.addAttribute(Val: llvm::Attribute::NonNull); |
2652 | Attrs.addDereferenceableAttr(Bytes: getMinimumObjectSize(Ty: ThisTy).getQuantity()); |
2653 | } else { |
2654 | // FIXME dereferenceable should be correct here, regardless of |
2655 | // NullPointerIsValid. However, dereferenceable currently does not always |
2656 | // respect NullPointerIsValid and may imply nonnull and break the program. |
2657 | // See https://reviews.llvm.org/D66618 for discussions. |
2658 | Attrs.addDereferenceableOrNullAttr( |
2659 | Bytes: getMinimumObjectSize( |
2660 | Ty: FI.arg_begin()->type.castAs<PointerType>()->getPointeeType()) |
2661 | .getQuantity()); |
2662 | } |
2663 | |
2664 | llvm::Align Alignment = |
2665 | getNaturalTypeAlignment(T: ThisTy, /*BaseInfo=*/nullptr, |
2666 | /*TBAAInfo=*/nullptr, /*forPointeeType=*/true) |
2667 | .getAsAlign(); |
2668 | Attrs.addAlignmentAttr(Align: Alignment); |
2669 | |
2670 | ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(C&: getLLVMContext(), B: Attrs); |
2671 | } |
2672 | |
2673 | unsigned ArgNo = 0; |
2674 | for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), |
2675 | E = FI.arg_end(); |
2676 | I != E; ++I, ++ArgNo) { |
2677 | QualType ParamType = I->type; |
2678 | const ABIArgInfo &AI = I->info; |
2679 | llvm::AttrBuilder Attrs(getLLVMContext()); |
2680 | |
2681 | // Add attribute for padding argument, if necessary. |
2682 | if (IRFunctionArgs.hasPaddingArg(ArgNo)) { |
2683 | if (AI.getPaddingInReg()) { |
2684 | ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = |
2685 | llvm::AttributeSet::get( |
2686 | C&: getLLVMContext(), |
2687 | B: llvm::AttrBuilder(getLLVMContext()).addAttribute(Val: llvm::Attribute::InReg)); |
2688 | } |
2689 | } |
2690 | |
2691 | // Decide whether the argument we're handling could be partially undef |
2692 | if (CodeGenOpts.EnableNoundefAttrs && |
2693 | DetermineNoUndef(QTy: ParamType, Types&: getTypes(), DL, AI)) { |
2694 | Attrs.addAttribute(Val: llvm::Attribute::NoUndef); |
2695 | } |
2696 | |
2697 | // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we |
2698 | // have the corresponding parameter variable. It doesn't make |
2699 | // sense to do it here because parameters are so messed up. |
2700 | switch (AI.getKind()) { |
2701 | case ABIArgInfo::Extend: |
2702 | if (AI.isSignExt()) |
2703 | Attrs.addAttribute(Val: llvm::Attribute::SExt); |
2704 | else |
2705 | Attrs.addAttribute(Val: llvm::Attribute::ZExt); |
2706 | [[fallthrough]]; |
2707 | case ABIArgInfo::Direct: |
2708 | if (ArgNo == 0 && FI.isChainCall()) |
2709 | Attrs.addAttribute(Val: llvm::Attribute::Nest); |
2710 | else if (AI.getInReg()) |
2711 | Attrs.addAttribute(Val: llvm::Attribute::InReg); |
2712 | Attrs.addStackAlignmentAttr(Align: llvm::MaybeAlign(AI.getDirectAlign())); |
2713 | |
2714 | if (canApplyNoFPClass(AI, ParamType, IsReturn: false)) |
2715 | Attrs.addNoFPClassAttr(NoFPClassMask: getNoFPClassTestMask(LangOpts: getLangOpts())); |
2716 | break; |
2717 | case ABIArgInfo::Indirect: { |
2718 | if (AI.getInReg()) |
2719 | Attrs.addAttribute(Val: llvm::Attribute::InReg); |
2720 | |
2721 | if (AI.getIndirectByVal()) |
2722 | Attrs.addByValAttr(Ty: getTypes().ConvertTypeForMem(T: ParamType)); |
2723 | |
2724 | auto *Decl = ParamType->getAsRecordDecl(); |
2725 | if (CodeGenOpts.PassByValueIsNoAlias && Decl && |
2726 | Decl->getArgPassingRestrictions() == |
2727 | RecordArgPassingKind::CanPassInRegs) |
2728 | // When calling the function, the pointer passed in will be the only |
2729 | // reference to the underlying object. Mark it accordingly. |
2730 | Attrs.addAttribute(Val: llvm::Attribute::NoAlias); |
2731 | |
2732 | // TODO: We could add the byref attribute if not byval, but it would |
2733 | // require updating many testcases. |
2734 | |
2735 | CharUnits Align = AI.getIndirectAlign(); |
2736 | |
2737 | // In a byval argument, it is important that the required |
2738 | // alignment of the type is honored, as LLVM might be creating a |
2739 | // *new* stack object, and needs to know what alignment to give |
2740 | // it. (Sometimes it can deduce a sensible alignment on its own, |
2741 | // but not if clang decides it must emit a packed struct, or the |
2742 | // user specifies increased alignment requirements.) |
2743 | // |
2744 | // This is different from indirect *not* byval, where the object |
2745 | // exists already, and the align attribute is purely |
2746 | // informative. |
2747 | assert(!Align.isZero()); |
2748 | |
2749 | // For now, only add this when we have a byval argument. |
2750 | // TODO: be less lazy about updating test cases. |
2751 | if (AI.getIndirectByVal()) |
2752 | Attrs.addAlignmentAttr(Align: Align.getQuantity()); |
2753 | |
2754 | // byval disables readnone and readonly. |
2755 | AddPotentialArgAccess(); |
2756 | break; |
2757 | } |
2758 | case ABIArgInfo::IndirectAliased: { |
2759 | CharUnits Align = AI.getIndirectAlign(); |
2760 | Attrs.addByRefAttr(Ty: getTypes().ConvertTypeForMem(T: ParamType)); |
2761 | Attrs.addAlignmentAttr(Align: Align.getQuantity()); |
2762 | break; |
2763 | } |
2764 | case ABIArgInfo::Ignore: |
2765 | case ABIArgInfo::Expand: |
2766 | case ABIArgInfo::CoerceAndExpand: |
2767 | break; |
2768 | |
2769 | case ABIArgInfo::InAlloca: |
2770 | // inalloca disables readnone and readonly. |
2771 | AddPotentialArgAccess(); |
2772 | continue; |
2773 | } |
2774 | |
2775 | if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { |
2776 | QualType PTy = RefTy->getPointeeType(); |
2777 | if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) |
2778 | Attrs.addDereferenceableAttr( |
2779 | Bytes: getMinimumObjectSize(Ty: PTy).getQuantity()); |
2780 | if (getTypes().getTargetAddressSpace(T: PTy) == 0 && |
2781 | !CodeGenOpts.NullPointerIsValid) |
2782 | Attrs.addAttribute(Val: llvm::Attribute::NonNull); |
2783 | if (PTy->isObjectType()) { |
2784 | llvm::Align Alignment = |
2785 | getNaturalPointeeTypeAlignment(T: ParamType).getAsAlign(); |
2786 | Attrs.addAlignmentAttr(Align: Alignment); |
2787 | } |
2788 | } |
2789 | |
2790 | // From OpenCL spec v3.0.10 section 6.3.5 Alignment of Types: |
2791 | // > For arguments to a __kernel function declared to be a pointer to a |
2792 | // > data type, the OpenCL compiler can assume that the pointee is always |
2793 | // > appropriately aligned as required by the data type. |
2794 | if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>() && |
2795 | ParamType->isPointerType()) { |
2796 | QualType PTy = ParamType->getPointeeType(); |
2797 | if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { |
2798 | llvm::Align Alignment = |
2799 | getNaturalPointeeTypeAlignment(T: ParamType).getAsAlign(); |
2800 | Attrs.addAlignmentAttr(Align: Alignment); |
2801 | } |
2802 | } |
2803 | |
2804 | switch (FI.getExtParameterInfo(argIndex: ArgNo).getABI()) { |
2805 | case ParameterABI::Ordinary: |
2806 | break; |
2807 | |
2808 | case ParameterABI::SwiftIndirectResult: { |
2809 | // Add 'sret' if we haven't already used it for something, but |
2810 | // only if the result is void. |
2811 | if (!hasUsedSRet && RetTy->isVoidType()) { |
2812 | Attrs.addStructRetAttr(Ty: getTypes().ConvertTypeForMem(T: ParamType)); |
2813 | hasUsedSRet = true; |
2814 | } |
2815 | |
2816 | // Add 'noalias' in either case. |
2817 | Attrs.addAttribute(Val: llvm::Attribute::NoAlias); |
2818 | |
2819 | // Add 'dereferenceable' and 'alignment'. |
2820 | auto PTy = ParamType->getPointeeType(); |
2821 | if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { |
2822 | auto info = getContext().getTypeInfoInChars(T: PTy); |
2823 | Attrs.addDereferenceableAttr(Bytes: info.Width.getQuantity()); |
2824 | Attrs.addAlignmentAttr(Align: info.Align.getAsAlign()); |
2825 | } |
2826 | break; |
2827 | } |
2828 | |
2829 | case ParameterABI::SwiftErrorResult: |
2830 | Attrs.addAttribute(Val: llvm::Attribute::SwiftError); |
2831 | break; |
2832 | |
2833 | case ParameterABI::SwiftContext: |
2834 | Attrs.addAttribute(Val: llvm::Attribute::SwiftSelf); |
2835 | break; |
2836 | |
2837 | case ParameterABI::SwiftAsyncContext: |
2838 | Attrs.addAttribute(Val: llvm::Attribute::SwiftAsync); |
2839 | break; |
2840 | } |
2841 | |
2842 | if (FI.getExtParameterInfo(argIndex: ArgNo).isNoEscape()) |
2843 | Attrs.addAttribute(Val: llvm::Attribute::NoCapture); |
2844 | |
2845 | if (Attrs.hasAttributes()) { |
2846 | unsigned FirstIRArg, NumIRArgs; |
2847 | std::tie(args&: FirstIRArg, args&: NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); |
2848 | for (unsigned i = 0; i < NumIRArgs; i++) |
2849 | ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes( |
2850 | C&: getLLVMContext(), AS: llvm::AttributeSet::get(C&: getLLVMContext(), B: Attrs)); |
2851 | } |
2852 | } |
2853 | assert(ArgNo == FI.arg_size()); |
2854 | |
2855 | AttrList = llvm::AttributeList::get( |
2856 | C&: getLLVMContext(), FnAttrs: llvm::AttributeSet::get(C&: getLLVMContext(), B: FuncAttrs), |
2857 | RetAttrs: llvm::AttributeSet::get(C&: getLLVMContext(), B: RetAttrs), ArgAttrs); |
2858 | } |
2859 | |
2860 | /// An argument came in as a promoted argument; demote it back to its |
2861 | /// declared type. |
2862 | static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, |
2863 | const VarDecl *var, |
2864 | llvm::Value *value) { |
2865 | llvm::Type *varType = CGF.ConvertType(T: var->getType()); |
2866 | |
2867 | // This can happen with promotions that actually don't change the |
2868 | // underlying type, like the enum promotions. |
2869 | if (value->getType() == varType) return value; |
2870 | |
2871 | assert((varType->isIntegerTy() || varType->isFloatingPointTy()) |
2872 | && "unexpected promotion type" ); |
2873 | |
2874 | if (isa<llvm::IntegerType>(Val: varType)) |
2875 | return CGF.Builder.CreateTrunc(V: value, DestTy: varType, Name: "arg.unpromote" ); |
2876 | |
2877 | return CGF.Builder.CreateFPCast(V: value, DestTy: varType, Name: "arg.unpromote" ); |
2878 | } |
2879 | |
2880 | /// Returns the attribute (either parameter attribute, or function |
2881 | /// attribute), which declares argument ArgNo to be non-null. |
2882 | static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, |
2883 | QualType ArgType, unsigned ArgNo) { |
2884 | // FIXME: __attribute__((nonnull)) can also be applied to: |
2885 | // - references to pointers, where the pointee is known to be |
2886 | // nonnull (apparently a Clang extension) |
2887 | // - transparent unions containing pointers |
2888 | // In the former case, LLVM IR cannot represent the constraint. In |
2889 | // the latter case, we have no guarantee that the transparent union |
2890 | // is in fact passed as a pointer. |
2891 | if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) |
2892 | return nullptr; |
2893 | // First, check attribute on parameter itself. |
2894 | if (PVD) { |
2895 | if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) |
2896 | return ParmNNAttr; |
2897 | } |
2898 | // Check function attributes. |
2899 | if (!FD) |
2900 | return nullptr; |
2901 | for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { |
2902 | if (NNAttr->isNonNull(IdxAST: ArgNo)) |
2903 | return NNAttr; |
2904 | } |
2905 | return nullptr; |
2906 | } |
2907 | |
2908 | namespace { |
2909 | struct CopyBackSwiftError final : EHScopeStack::Cleanup { |
2910 | Address Temp; |
2911 | Address Arg; |
2912 | CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {} |
2913 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
2914 | llvm::Value *errorValue = CGF.Builder.CreateLoad(Addr: Temp); |
2915 | CGF.Builder.CreateStore(Val: errorValue, Addr: Arg); |
2916 | } |
2917 | }; |
2918 | } |
2919 | |
2920 | void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, |
2921 | llvm::Function *Fn, |
2922 | const FunctionArgList &Args) { |
2923 | if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) |
2924 | // Naked functions don't have prologues. |
2925 | return; |
2926 | |
2927 | // If this is an implicit-return-zero function, go ahead and |
2928 | // initialize the return value. TODO: it might be nice to have |
2929 | // a more general mechanism for this that didn't require synthesized |
2930 | // return statements. |
2931 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Val: CurCodeDecl)) { |
2932 | if (FD->hasImplicitReturnZero()) { |
2933 | QualType RetTy = FD->getReturnType().getUnqualifiedType(); |
2934 | llvm::Type* LLVMTy = CGM.getTypes().ConvertType(T: RetTy); |
2935 | llvm::Constant* Zero = llvm::Constant::getNullValue(Ty: LLVMTy); |
2936 | Builder.CreateStore(Val: Zero, Addr: ReturnValue); |
2937 | } |
2938 | } |
2939 | |
2940 | // FIXME: We no longer need the types from FunctionArgList; lift up and |
2941 | // simplify. |
2942 | |
2943 | ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); |
2944 | assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs()); |
2945 | |
2946 | // If we're using inalloca, all the memory arguments are GEPs off of the last |
2947 | // parameter, which is a pointer to the complete memory area. |
2948 | Address ArgStruct = Address::invalid(); |
2949 | if (IRFunctionArgs.hasInallocaArg()) |
2950 | ArgStruct = Address(Fn->getArg(i: IRFunctionArgs.getInallocaArgNo()), |
2951 | FI.getArgStruct(), FI.getArgStructAlignment()); |
2952 | |
2953 | // Name the struct return parameter. |
2954 | if (IRFunctionArgs.hasSRetArg()) { |
2955 | auto AI = Fn->getArg(i: IRFunctionArgs.getSRetArgNo()); |
2956 | AI->setName("agg.result" ); |
2957 | AI->addAttr(Kind: llvm::Attribute::NoAlias); |
2958 | } |
2959 | |
2960 | // Track if we received the parameter as a pointer (indirect, byval, or |
2961 | // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it |
2962 | // into a local alloca for us. |
2963 | SmallVector<ParamValue, 16> ArgVals; |
2964 | ArgVals.reserve(N: Args.size()); |
2965 | |
2966 | // Create a pointer value for every parameter declaration. This usually |
2967 | // entails copying one or more LLVM IR arguments into an alloca. Don't push |
2968 | // any cleanups or do anything that might unwind. We do that separately, so |
2969 | // we can push the cleanups in the correct order for the ABI. |
2970 | assert(FI.arg_size() == Args.size() && |
2971 | "Mismatch between function signature & arguments." ); |
2972 | unsigned ArgNo = 0; |
2973 | CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); |
2974 | for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); |
2975 | i != e; ++i, ++info_it, ++ArgNo) { |
2976 | const VarDecl *Arg = *i; |
2977 | const ABIArgInfo &ArgI = info_it->info; |
2978 | |
2979 | bool isPromoted = |
2980 | isa<ParmVarDecl>(Val: Arg) && cast<ParmVarDecl>(Val: Arg)->isKNRPromoted(); |
2981 | // We are converting from ABIArgInfo type to VarDecl type directly, unless |
2982 | // the parameter is promoted. In this case we convert to |
2983 | // CGFunctionInfo::ArgInfo type with subsequent argument demotion. |
2984 | QualType Ty = isPromoted ? info_it->type : Arg->getType(); |
2985 | assert(hasScalarEvaluationKind(Ty) == |
2986 | hasScalarEvaluationKind(Arg->getType())); |
2987 | |
2988 | unsigned FirstIRArg, NumIRArgs; |
2989 | std::tie(args&: FirstIRArg, args&: NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); |
2990 | |
2991 | switch (ArgI.getKind()) { |
2992 | case ABIArgInfo::InAlloca: { |
2993 | assert(NumIRArgs == 0); |
2994 | auto FieldIndex = ArgI.getInAllocaFieldIndex(); |
2995 | Address V = |
2996 | Builder.CreateStructGEP(Addr: ArgStruct, Index: FieldIndex, Name: Arg->getName()); |
2997 | if (ArgI.getInAllocaIndirect()) |
2998 | V = Address(Builder.CreateLoad(Addr: V), ConvertTypeForMem(T: Ty), |
2999 | getContext().getTypeAlignInChars(T: Ty)); |
3000 | ArgVals.push_back(Elt: ParamValue::forIndirect(addr: V)); |
3001 | break; |
3002 | } |
3003 | |
3004 | case ABIArgInfo::Indirect: |
3005 | case ABIArgInfo::IndirectAliased: { |
3006 | assert(NumIRArgs == 1); |
3007 | Address ParamAddr = makeNaturalAddressForPointer( |
3008 | Ptr: Fn->getArg(i: FirstIRArg), T: Ty, Alignment: ArgI.getIndirectAlign(), ForPointeeType: false, BaseInfo: nullptr, |
3009 | TBAAInfo: nullptr, IsKnownNonNull: KnownNonNull); |
3010 | |
3011 | if (!hasScalarEvaluationKind(T: Ty)) { |
3012 | // Aggregates and complex variables are accessed by reference. All we |
3013 | // need to do is realign the value, if requested. Also, if the address |
3014 | // may be aliased, copy it to ensure that the parameter variable is |
3015 | // mutable and has a unique adress, as C requires. |
3016 | if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) { |
3017 | RawAddress AlignedTemp = CreateMemTemp(T: Ty, Name: "coerce" ); |
3018 | |
3019 | // Copy from the incoming argument pointer to the temporary with the |
3020 | // appropriate alignment. |
3021 | // |
3022 | // FIXME: We should have a common utility for generating an aggregate |
3023 | // copy. |
3024 | CharUnits Size = getContext().getTypeSizeInChars(T: Ty); |
3025 | Builder.CreateMemCpy( |
3026 | Dst: AlignedTemp.getPointer(), DstAlign: AlignedTemp.getAlignment().getAsAlign(), |
3027 | Src: ParamAddr.emitRawPointer(CGF&: *this), |
3028 | SrcAlign: ParamAddr.getAlignment().getAsAlign(), |
3029 | Size: llvm::ConstantInt::get(Ty: IntPtrTy, V: Size.getQuantity())); |
3030 | ParamAddr = AlignedTemp; |
3031 | } |
3032 | ArgVals.push_back(Elt: ParamValue::forIndirect(addr: ParamAddr)); |
3033 | } else { |
3034 | // Load scalar value from indirect argument. |
3035 | llvm::Value *V = |
3036 | EmitLoadOfScalar(Addr: ParamAddr, Volatile: false, Ty, Loc: Arg->getBeginLoc()); |
3037 | |
3038 | if (isPromoted) |
3039 | V = emitArgumentDemotion(CGF&: *this, var: Arg, value: V); |
3040 | ArgVals.push_back(Elt: ParamValue::forDirect(value: V)); |
3041 | } |
3042 | break; |
3043 | } |
3044 | |
3045 | case ABIArgInfo::Extend: |
3046 | case ABIArgInfo::Direct: { |
3047 | auto AI = Fn->getArg(i: FirstIRArg); |
3048 | llvm::Type *LTy = ConvertType(T: Arg->getType()); |
3049 | |
3050 | // Prepare parameter attributes. So far, only attributes for pointer |
3051 | // parameters are prepared. See |
3052 | // http://llvm.org/docs/LangRef.html#paramattrs. |
3053 | if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() && |
3054 | ArgI.getCoerceToType()->isPointerTy()) { |
3055 | assert(NumIRArgs == 1); |
3056 | |
3057 | if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Val: Arg)) { |
3058 | // Set `nonnull` attribute if any. |
3059 | if (getNonNullAttr(FD: CurCodeDecl, PVD, ArgType: PVD->getType(), |
3060 | ArgNo: PVD->getFunctionScopeIndex()) && |
3061 | !CGM.getCodeGenOpts().NullPointerIsValid) |
3062 | AI->addAttr(Kind: llvm::Attribute::NonNull); |
3063 | |
3064 | QualType OTy = PVD->getOriginalType(); |
3065 | if (const auto *ArrTy = |
3066 | getContext().getAsConstantArrayType(T: OTy)) { |
3067 | // A C99 array parameter declaration with the static keyword also |
3068 | // indicates dereferenceability, and if the size is constant we can |
3069 | // use the dereferenceable attribute (which requires the size in |
3070 | // bytes). |
3071 | if (ArrTy->getSizeModifier() == ArraySizeModifier::Static) { |
3072 | QualType ETy = ArrTy->getElementType(); |
3073 | llvm::Align Alignment = |
3074 | CGM.getNaturalTypeAlignment(T: ETy).getAsAlign(); |
3075 | AI->addAttrs(B&: llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Align: Alignment)); |
3076 | uint64_t ArrSize = ArrTy->getZExtSize(); |
3077 | if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && |
3078 | ArrSize) { |
3079 | llvm::AttrBuilder Attrs(getLLVMContext()); |
3080 | Attrs.addDereferenceableAttr( |
3081 | Bytes: getContext().getTypeSizeInChars(T: ETy).getQuantity() * |
3082 | ArrSize); |
3083 | AI->addAttrs(B&: Attrs); |
3084 | } else if (getContext().getTargetInfo().getNullPointerValue( |
3085 | AddrSpace: ETy.getAddressSpace()) == 0 && |
3086 | !CGM.getCodeGenOpts().NullPointerIsValid) { |
3087 | AI->addAttr(Kind: llvm::Attribute::NonNull); |
3088 | } |
3089 | } |
3090 | } else if (const auto *ArrTy = |
3091 | getContext().getAsVariableArrayType(T: OTy)) { |
3092 | // For C99 VLAs with the static keyword, we don't know the size so |
3093 | // we can't use the dereferenceable attribute, but in addrspace(0) |
3094 | // we know that it must be nonnull. |
3095 | if (ArrTy->getSizeModifier() == ArraySizeModifier::Static) { |
3096 | QualType ETy = ArrTy->getElementType(); |
3097 | llvm::Align Alignment = |
3098 | CGM.getNaturalTypeAlignment(T: ETy).getAsAlign(); |
3099 | AI->addAttrs(B&: llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Align: Alignment)); |
3100 | if (!getTypes().getTargetAddressSpace(T: ETy) && |
3101 | !CGM.getCodeGenOpts().NullPointerIsValid) |
3102 | AI->addAttr(Kind: llvm::Attribute::NonNull); |
3103 | } |
3104 | } |
3105 | |
3106 | // Set `align` attribute if any. |
3107 | const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); |
3108 | if (!AVAttr) |
3109 | if (const auto *TOTy = OTy->getAs<TypedefType>()) |
3110 | AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); |
3111 | if (AVAttr && !SanOpts.has(K: SanitizerKind::Alignment)) { |
3112 | // If alignment-assumption sanitizer is enabled, we do *not* add |
3113 | // alignment attribute here, but emit normal alignment assumption, |
3114 | // so the UBSAN check could function. |
3115 | llvm::ConstantInt *AlignmentCI = |
3116 | cast<llvm::ConstantInt>(Val: EmitScalarExpr(E: AVAttr->getAlignment())); |
3117 | uint64_t AlignmentInt = |
3118 | AlignmentCI->getLimitedValue(Limit: llvm::Value::MaximumAlignment); |
3119 | if (AI->getParamAlign().valueOrOne() < AlignmentInt) { |
3120 | AI->removeAttr(Kind: llvm::Attribute::AttrKind::Alignment); |
3121 | AI->addAttrs(B&: llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr( |
3122 | Align: llvm::Align(AlignmentInt))); |
3123 | } |
3124 | } |
3125 | } |
3126 | |
3127 | // Set 'noalias' if an argument type has the `restrict` qualifier. |
3128 | if (Arg->getType().isRestrictQualified()) |
3129 | AI->addAttr(Kind: llvm::Attribute::NoAlias); |
3130 | } |
3131 | |
3132 | // Prepare the argument value. If we have the trivial case, handle it |
3133 | // with no muss and fuss. |
3134 | if (!isa<llvm::StructType>(Val: ArgI.getCoerceToType()) && |
3135 | ArgI.getCoerceToType() == ConvertType(T: Ty) && |
3136 | ArgI.getDirectOffset() == 0) { |
3137 | assert(NumIRArgs == 1); |
3138 | |
3139 | // LLVM expects swifterror parameters to be used in very restricted |
3140 | // ways. Copy the value into a less-restricted temporary. |
3141 | llvm::Value *V = AI; |
3142 | if (FI.getExtParameterInfo(argIndex: ArgNo).getABI() |
3143 | == ParameterABI::SwiftErrorResult) { |
3144 | QualType pointeeTy = Ty->getPointeeType(); |
3145 | assert(pointeeTy->isPointerType()); |
3146 | RawAddress temp = |
3147 | CreateMemTemp(T: pointeeTy, Align: getPointerAlign(), Name: "swifterror.temp" ); |
3148 | Address arg = makeNaturalAddressForPointer( |
3149 | Ptr: V, T: pointeeTy, Alignment: getContext().getTypeAlignInChars(T: pointeeTy)); |
3150 | llvm::Value *incomingErrorValue = Builder.CreateLoad(Addr: arg); |
3151 | Builder.CreateStore(Val: incomingErrorValue, Addr: temp); |
3152 | V = temp.getPointer(); |
3153 | |
3154 | // Push a cleanup to copy the value back at the end of the function. |
3155 | // The convention does not guarantee that the value will be written |
3156 | // back if the function exits with an unwind exception. |
3157 | EHStack.pushCleanup<CopyBackSwiftError>(Kind: NormalCleanup, A: temp, A: arg); |
3158 | } |
3159 | |
3160 | // Ensure the argument is the correct type. |
3161 | if (V->getType() != ArgI.getCoerceToType()) |
3162 | V = Builder.CreateBitCast(V, DestTy: ArgI.getCoerceToType()); |
3163 | |
3164 | if (isPromoted) |
3165 | V = emitArgumentDemotion(CGF&: *this, var: Arg, value: V); |
3166 | |
3167 | // Because of merging of function types from multiple decls it is |
3168 | // possible for the type of an argument to not match the corresponding |
3169 | // type in the function type. Since we are codegening the callee |
3170 | // in here, add a cast to the argument type. |
3171 | llvm::Type *LTy = ConvertType(T: Arg->getType()); |
3172 | if (V->getType() != LTy) |
3173 | V = Builder.CreateBitCast(V, DestTy: LTy); |
3174 | |
3175 | ArgVals.push_back(Elt: ParamValue::forDirect(value: V)); |
3176 | break; |
3177 | } |
3178 | |
3179 | // VLST arguments are coerced to VLATs at the function boundary for |
3180 | // ABI consistency. If this is a VLST that was coerced to |
3181 | // a VLAT at the function boundary and the types match up, use |
3182 | // llvm.vector.extract to convert back to the original VLST. |
3183 | if (auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(Val: ConvertType(T: Ty))) { |
3184 | llvm::Value *Coerced = Fn->getArg(i: FirstIRArg); |
3185 | if (auto *VecTyFrom = |
3186 | dyn_cast<llvm::ScalableVectorType>(Val: Coerced->getType())) { |
3187 | // If we are casting a scalable i1 predicate vector to a fixed i8 |
3188 | // vector, bitcast the source and use a vector extract. |
3189 | if (VecTyFrom->getElementType()->isIntegerTy(Bitwidth: 1) && |
3190 | VecTyFrom->getElementCount().isKnownMultipleOf(RHS: 8) && |
3191 | VecTyTo->getElementType() == Builder.getInt8Ty()) { |
3192 | VecTyFrom = llvm::ScalableVectorType::get( |
3193 | ElementType: VecTyTo->getElementType(), |
3194 | MinNumElts: VecTyFrom->getElementCount().getKnownMinValue() / 8); |
3195 | Coerced = Builder.CreateBitCast(V: Coerced, DestTy: VecTyFrom); |
3196 | } |
3197 | if (VecTyFrom->getElementType() == VecTyTo->getElementType()) { |
3198 | llvm::Value *Zero = llvm::Constant::getNullValue(Ty: CGM.Int64Ty); |
3199 | |
3200 | assert(NumIRArgs == 1); |
3201 | Coerced->setName(Arg->getName() + ".coerce" ); |
3202 | ArgVals.push_back(Elt: ParamValue::forDirect(value: Builder.CreateExtractVector( |
3203 | DstType: VecTyTo, SrcVec: Coerced, Idx: Zero, Name: "cast.fixed" ))); |
3204 | break; |
3205 | } |
3206 | } |
3207 | } |
3208 | |
3209 | llvm::StructType *STy = |
3210 | dyn_cast<llvm::StructType>(Val: ArgI.getCoerceToType()); |
3211 | if (ArgI.isDirect() && !ArgI.getCanBeFlattened() && STy && |
3212 | STy->getNumElements() > 1) { |
3213 | [[maybe_unused]] llvm::TypeSize StructSize = |
3214 | CGM.getDataLayout().getTypeAllocSize(Ty: STy); |
3215 | [[maybe_unused]] llvm::TypeSize PtrElementSize = |
3216 | CGM.getDataLayout().getTypeAllocSize(Ty: ConvertTypeForMem(T: Ty)); |
3217 | if (STy->containsHomogeneousScalableVectorTypes()) { |
3218 | assert(StructSize == PtrElementSize && |
3219 | "Only allow non-fractional movement of structure with" |
3220 | "homogeneous scalable vector type" ); |
3221 | |
3222 | ArgVals.push_back(Elt: ParamValue::forDirect(value: AI)); |
3223 | break; |
3224 | } |
3225 | } |
3226 | |
3227 | Address Alloca = CreateMemTemp(T: Ty, Align: getContext().getDeclAlign(D: Arg), |
3228 | Name: Arg->getName()); |
3229 | |
3230 | // Pointer to store into. |
3231 | Address Ptr = emitAddressAtOffset(CGF&: *this, addr: Alloca, info: ArgI); |
3232 | |
3233 | // Fast-isel and the optimizer generally like scalar values better than |
3234 | // FCAs, so we flatten them if this is safe to do for this argument. |
3235 | if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && |
3236 | STy->getNumElements() > 1) { |
3237 | llvm::TypeSize StructSize = CGM.getDataLayout().getTypeAllocSize(Ty: STy); |
3238 | llvm::TypeSize PtrElementSize = |
3239 | CGM.getDataLayout().getTypeAllocSize(Ty: Ptr.getElementType()); |
3240 | if (StructSize.isScalable()) { |
3241 | assert(STy->containsHomogeneousScalableVectorTypes() && |
3242 | "ABI only supports structure with homogeneous scalable vector " |
3243 | "type" ); |
3244 | assert(StructSize == PtrElementSize && |
3245 | "Only allow non-fractional movement of structure with" |
3246 | "homogeneous scalable vector type" ); |
3247 | assert(STy->getNumElements() == NumIRArgs); |
3248 | |
3249 | llvm::Value *LoadedStructValue = llvm::PoisonValue::get(T: STy); |
3250 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { |
3251 | auto *AI = Fn->getArg(i: FirstIRArg + i); |
3252 | AI->setName(Arg->getName() + ".coerce" + Twine(i)); |
3253 | LoadedStructValue = |
3254 | Builder.CreateInsertValue(Agg: LoadedStructValue, Val: AI, Idxs: i); |
3255 | } |
3256 | |
3257 | Builder.CreateStore(Val: LoadedStructValue, Addr: Ptr); |
3258 | } else { |
3259 | uint64_t SrcSize = StructSize.getFixedValue(); |
3260 | uint64_t DstSize = PtrElementSize.getFixedValue(); |
3261 | |
3262 | Address AddrToStoreInto = Address::invalid(); |
3263 | if (SrcSize <= DstSize) { |
3264 | AddrToStoreInto = Ptr.withElementType(ElemTy: STy); |
3265 | } else { |
3266 | AddrToStoreInto = |
3267 | CreateTempAlloca(Ty: STy, align: Alloca.getAlignment(), Name: "coerce" ); |
3268 | } |
3269 | |
3270 | assert(STy->getNumElements() == NumIRArgs); |
3271 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { |
3272 | auto AI = Fn->getArg(i: FirstIRArg + i); |
3273 | AI->setName(Arg->getName() + ".coerce" + Twine(i)); |
3274 | Address EltPtr = Builder.CreateStructGEP(Addr: AddrToStoreInto, Index: i); |
3275 | Builder.CreateStore(Val: AI, Addr: EltPtr); |
3276 | } |
3277 | |
3278 | if (SrcSize > DstSize) { |
3279 | Builder.CreateMemCpy(Dest: Ptr, Src: AddrToStoreInto, Size: DstSize); |
3280 | } |
3281 | } |
3282 | } else { |
3283 | // Simple case, just do a coerced store of the argument into the alloca. |
3284 | assert(NumIRArgs == 1); |
3285 | auto AI = Fn->getArg(i: FirstIRArg); |
3286 | AI->setName(Arg->getName() + ".coerce" ); |
3287 | CreateCoercedStore( |
3288 | Src: AI, Dst: Ptr, |
3289 | DstSize: llvm::TypeSize::getFixed( |
3290 | ExactSize: getContext().getTypeSizeInChars(T: Ty).getQuantity() - |
3291 | ArgI.getDirectOffset()), |
3292 | /*DstIsVolatile=*/false); |
3293 | } |
3294 | |
3295 | // Match to what EmitParmDecl is expecting for this type. |
3296 | if (CodeGenFunction::hasScalarEvaluationKind(T: Ty)) { |
3297 | llvm::Value *V = |
3298 | EmitLoadOfScalar(Addr: Alloca, Volatile: false, Ty, Loc: Arg->getBeginLoc()); |
3299 | if (isPromoted) |
3300 | V = emitArgumentDemotion(CGF&: *this, var: Arg, value: V); |
3301 | ArgVals.push_back(Elt: ParamValue::forDirect(value: V)); |
3302 | } else { |
3303 | ArgVals.push_back(Elt: ParamValue::forIndirect(addr: Alloca)); |
3304 | } |
3305 | break; |
3306 | } |
3307 | |
3308 | case ABIArgInfo::CoerceAndExpand: { |
3309 | // Reconstruct into a temporary. |
3310 | Address alloca = CreateMemTemp(T: Ty, Align: getContext().getDeclAlign(D: Arg)); |
3311 | ArgVals.push_back(Elt: ParamValue::forIndirect(addr: alloca)); |
3312 | |
3313 | auto coercionType = ArgI.getCoerceAndExpandType(); |
3314 | alloca = alloca.withElementType(ElemTy: coercionType); |
3315 | |
3316 | unsigned argIndex = FirstIRArg; |
3317 | for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { |
3318 | llvm::Type *eltType = coercionType->getElementType(N: i); |
3319 | if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) |
3320 | continue; |
3321 | |
3322 | auto eltAddr = Builder.CreateStructGEP(Addr: alloca, Index: i); |
3323 | auto elt = Fn->getArg(i: argIndex++); |
3324 | Builder.CreateStore(Val: elt, Addr: eltAddr); |
3325 | } |
3326 | assert(argIndex == FirstIRArg + NumIRArgs); |
3327 | break; |
3328 | } |
3329 | |
3330 | case ABIArgInfo::Expand: { |
3331 | // If this structure was expanded into multiple arguments then |
3332 | // we need to create a temporary and reconstruct it from the |
3333 | // arguments. |
3334 | Address Alloca = CreateMemTemp(T: Ty, Align: getContext().getDeclAlign(D: Arg)); |
3335 | LValue LV = MakeAddrLValue(Addr: Alloca, T: Ty); |
3336 | ArgVals.push_back(Elt: ParamValue::forIndirect(addr: Alloca)); |
3337 | |
3338 | auto FnArgIter = Fn->arg_begin() + FirstIRArg; |
3339 | ExpandTypeFromArgs(Ty, LV, AI&: FnArgIter); |
3340 | assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs); |
3341 | for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { |
3342 | auto AI = Fn->getArg(i: FirstIRArg + i); |
3343 | AI->setName(Arg->getName() + "." + Twine(i)); |
3344 | } |
3345 | break; |
3346 | } |
3347 | |
3348 | case ABIArgInfo::Ignore: |
3349 | assert(NumIRArgs == 0); |
3350 | // Initialize the local variable appropriately. |
3351 | if (!hasScalarEvaluationKind(T: Ty)) { |
3352 | ArgVals.push_back(Elt: ParamValue::forIndirect(addr: CreateMemTemp(T: Ty))); |
3353 | } else { |
3354 | llvm::Value *U = llvm::UndefValue::get(T: ConvertType(T: Arg->getType())); |
3355 | ArgVals.push_back(Elt: ParamValue::forDirect(value: U)); |
3356 | } |
3357 | break; |
3358 | } |
3359 | } |
3360 | |
3361 | if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { |
3362 | for (int I = Args.size() - 1; I >= 0; --I) |
3363 | EmitParmDecl(D: *Args[I], Arg: ArgVals[I], ArgNo: I + 1); |
3364 | } else { |
3365 | for (unsigned I = 0, E = Args.size(); I != E; ++I) |
3366 | EmitParmDecl(D: *Args[I], Arg: ArgVals[I], ArgNo: I + 1); |
3367 | } |
3368 | } |
3369 | |
3370 | static void eraseUnusedBitCasts(llvm::Instruction *insn) { |
3371 | while (insn->use_empty()) { |
3372 | llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(Val: insn); |
3373 | if (!bitcast) return; |
3374 | |
3375 | // This is "safe" because we would have used a ConstantExpr otherwise. |
3376 | insn = cast<llvm::Instruction>(Val: bitcast->getOperand(i_nocapture: 0)); |
3377 | bitcast->eraseFromParent(); |
3378 | } |
3379 | } |
3380 | |
3381 | /// Try to emit a fused autorelease of a return result. |
3382 | static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, |
3383 | llvm::Value *result) { |
3384 | // We must be immediately followed the cast. |
3385 | llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); |
3386 | if (BB->empty()) return nullptr; |
3387 | if (&BB->back() != result) return nullptr; |
3388 | |
3389 | llvm::Type *resultType = result->getType(); |
3390 | |
3391 | // result is in a BasicBlock and is therefore an Instruction. |
3392 | llvm::Instruction *generator = cast<llvm::Instruction>(Val: result); |
3393 | |
3394 | SmallVector<llvm::Instruction *, 4> InstsToKill; |
3395 | |
3396 | // Look for: |
3397 | // %generator = bitcast %type1* %generator2 to %type2* |
3398 | while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(Val: generator)) { |
3399 | // We would have emitted this as a constant if the operand weren't |
3400 | // an Instruction. |
3401 | generator = cast<llvm::Instruction>(Val: bitcast->getOperand(i_nocapture: 0)); |
3402 | |
3403 | // Require the generator to be immediately followed by the cast. |
3404 | if (generator->getNextNode() != bitcast) |
3405 | return nullptr; |
3406 | |
3407 | InstsToKill.push_back(Elt: bitcast); |
3408 | } |
3409 | |
3410 | // Look for: |
3411 | // %generator = call i8* @objc_retain(i8* %originalResult) |
3412 | // or |
3413 | // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) |
3414 | llvm::CallInst *call = dyn_cast<llvm::CallInst>(Val: generator); |
3415 | if (!call) return nullptr; |
3416 | |
3417 | bool doRetainAutorelease; |
3418 | |
3419 | if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) { |
3420 | doRetainAutorelease = true; |
3421 | } else if (call->getCalledOperand() == |
3422 | CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) { |
3423 | doRetainAutorelease = false; |
3424 | |
3425 | // If we emitted an assembly marker for this call (and the |
3426 | // ARCEntrypoints field should have been set if so), go looking |
3427 | // for that call. If we can't find it, we can't do this |
3428 | // optimization. But it should always be the immediately previous |
3429 | // instruction, unless we needed bitcasts around the call. |
3430 | if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) { |
3431 | llvm::Instruction *prev = call->getPrevNode(); |
3432 | assert(prev); |
3433 | if (isa<llvm::BitCastInst>(Val: prev)) { |
3434 | prev = prev->getPrevNode(); |
3435 | assert(prev); |
3436 | } |
3437 | assert(isa<llvm::CallInst>(prev)); |
3438 | assert(cast<llvm::CallInst>(prev)->getCalledOperand() == |
3439 | CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker); |
3440 | InstsToKill.push_back(Elt: prev); |
3441 | } |
3442 | } else { |
3443 | return nullptr; |
3444 | } |
3445 | |
3446 | result = call->getArgOperand(i: 0); |
3447 | InstsToKill.push_back(Elt: call); |
3448 | |
3449 | // Keep killing bitcasts, for sanity. Note that we no longer care |
3450 | // about precise ordering as long as there's exactly one use. |
3451 | while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(Val: result)) { |
3452 | if (!bitcast->hasOneUse()) break; |
3453 | InstsToKill.push_back(Elt: bitcast); |
3454 | result = bitcast->getOperand(i_nocapture: 0); |
3455 | } |
3456 | |
3457 | // Delete all the unnecessary instructions, from latest to earliest. |
3458 | for (auto *I : InstsToKill) |
3459 | I->eraseFromParent(); |
3460 | |
3461 | // Do the fused retain/autorelease if we were asked to. |
3462 | if (doRetainAutorelease) |
3463 | result = CGF.EmitARCRetainAutoreleaseReturnValue(value: result); |
3464 | |
3465 | // Cast back to the result type. |
3466 | return CGF.Builder.CreateBitCast(V: result, DestTy: resultType); |
3467 | } |
3468 | |
3469 | /// If this is a +1 of the value of an immutable 'self', remove it. |
3470 | static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, |
3471 | llvm::Value *result) { |
3472 | // This is only applicable to a method with an immutable 'self'. |
3473 | const ObjCMethodDecl *method = |
3474 | dyn_cast_or_null<ObjCMethodDecl>(Val: CGF.CurCodeDecl); |
3475 | if (!method) return nullptr; |
3476 | const VarDecl *self = method->getSelfDecl(); |
3477 | if (!self->getType().isConstQualified()) return nullptr; |
3478 | |
3479 | // Look for a retain call. Note: stripPointerCasts looks through returned arg |
3480 | // functions, which would cause us to miss the retain. |
3481 | llvm::CallInst *retainCall = dyn_cast<llvm::CallInst>(Val: result); |
3482 | if (!retainCall || retainCall->getCalledOperand() != |
3483 | CGF.CGM.getObjCEntrypoints().objc_retain) |
3484 | return nullptr; |
3485 | |
3486 | // Look for an ordinary load of 'self'. |
3487 | llvm::Value *retainedValue = retainCall->getArgOperand(i: 0); |
3488 | llvm::LoadInst *load = |
3489 | dyn_cast<llvm::LoadInst>(Val: retainedValue->stripPointerCasts()); |
3490 | if (!load || load->isAtomic() || load->isVolatile() || |
3491 | load->getPointerOperand() != CGF.GetAddrOfLocalVar(VD: self).getBasePointer()) |
3492 | return nullptr; |
3493 | |
3494 | // Okay! Burn it all down. This relies for correctness on the |
3495 | // assumption that the retain is emitted as part of the return and |
3496 | // that thereafter everything is used "linearly". |
3497 | llvm::Type *resultType = result->getType(); |
3498 | eraseUnusedBitCasts(insn: cast<llvm::Instruction>(Val: result)); |
3499 | assert(retainCall->use_empty()); |
3500 | retainCall->eraseFromParent(); |
3501 | eraseUnusedBitCasts(insn: cast<llvm::Instruction>(Val: retainedValue)); |
3502 | |
3503 | return CGF.Builder.CreateBitCast(V: load, DestTy: resultType); |
3504 | } |
3505 | |
3506 | /// Emit an ARC autorelease of the result of a function. |
3507 | /// |
3508 | /// \return the value to actually return from the function |
3509 | static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, |
3510 | llvm::Value *result) { |
3511 | // If we're returning 'self', kill the initial retain. This is a |
3512 | // heuristic attempt to "encourage correctness" in the really unfortunate |
3513 | // case where we have a return of self during a dealloc and we desperately |
3514 | // need to avoid the possible autorelease. |
3515 | if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) |
3516 | return self; |
3517 | |
3518 | // At -O0, try to emit a fused retain/autorelease. |
3519 | if (CGF.shouldUseFusedARCCalls()) |
3520 | if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) |
3521 | return fused; |
3522 | |
3523 | return CGF.EmitARCAutoreleaseReturnValue(value: result); |
3524 | } |
3525 | |
3526 | /// Heuristically search for a dominating store to the return-value slot. |
3527 | static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { |
3528 | llvm::Value *ReturnValuePtr = CGF.ReturnValue.getBasePointer(); |
3529 | |
3530 | // Check if a User is a store which pointerOperand is the ReturnValue. |
3531 | // We are looking for stores to the ReturnValue, not for stores of the |
3532 | // ReturnValue to some other location. |
3533 | auto GetStoreIfValid = [&CGF, |
3534 | ReturnValuePtr](llvm::User *U) -> llvm::StoreInst * { |
3535 | auto *SI = dyn_cast<llvm::StoreInst>(Val: U); |
3536 | if (!SI || SI->getPointerOperand() != ReturnValuePtr || |
3537 | SI->getValueOperand()->getType() != CGF.ReturnValue.getElementType()) |
3538 | return nullptr; |
3539 | // These aren't actually possible for non-coerced returns, and we |
3540 | // only care about non-coerced returns on this code path. |
3541 | // All memory instructions inside __try block are volatile. |
3542 | assert(!SI->isAtomic() && |
3543 | (!SI->isVolatile() || CGF.currentFunctionUsesSEHTry())); |
3544 | return SI; |
3545 | }; |
3546 | // If there are multiple uses of the return-value slot, just check |
3547 | // for something immediately preceding the IP. Sometimes this can |
3548 | // happen with how we generate implicit-returns; it can also happen |
3549 | // with noreturn cleanups. |
3550 | if (!ReturnValuePtr->hasOneUse()) { |
3551 | llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); |
3552 | if (IP->empty()) return nullptr; |
3553 | |
3554 | // Look at directly preceding instruction, skipping bitcasts and lifetime |
3555 | // markers. |
3556 | for (llvm::Instruction &I : make_range(x: IP->rbegin(), y: IP->rend())) { |
3557 | if (isa<llvm::BitCastInst>(Val: &I)) |
3558 | continue; |
3559 | if (auto *II = dyn_cast<llvm::IntrinsicInst>(Val: &I)) |
3560 | if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end) |
3561 | continue; |
3562 | |
3563 | return GetStoreIfValid(&I); |
3564 | } |
3565 | return nullptr; |
3566 | } |
3567 | |
3568 | llvm::StoreInst *store = GetStoreIfValid(ReturnValuePtr->user_back()); |
3569 | if (!store) return nullptr; |
3570 | |
3571 | // Now do a first-and-dirty dominance check: just walk up the |
3572 | // single-predecessors chain from the current insertion point. |
3573 | llvm::BasicBlock *StoreBB = store->getParent(); |
3574 | llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); |
3575 | llvm::SmallPtrSet<llvm::BasicBlock *, 4> SeenBBs; |
3576 | while (IP != StoreBB) { |
3577 | if (!SeenBBs.insert(Ptr: IP).second || !(IP = IP->getSinglePredecessor())) |
3578 | return nullptr; |
3579 | } |
3580 | |
3581 | // Okay, the store's basic block dominates the insertion point; we |
3582 | // can do our thing. |
3583 | return store; |
3584 | } |
3585 | |
3586 | // Helper functions for EmitCMSEClearRecord |
3587 | |
3588 | // Set the bits corresponding to a field having width `BitWidth` and located at |
3589 | // offset `BitOffset` (from the least significant bit) within a storage unit of |
3590 | // `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte. |
3591 | // Use little-endian layout, i.e.`Bits[0]` is the LSB. |
3592 | static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset, |
3593 | int BitWidth, int CharWidth) { |
3594 | assert(CharWidth <= 64); |
3595 | assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth); |
3596 | |
3597 | int Pos = 0; |
3598 | if (BitOffset >= CharWidth) { |
3599 | Pos += BitOffset / CharWidth; |
3600 | BitOffset = BitOffset % CharWidth; |
3601 | } |
3602 | |
3603 | const uint64_t Used = (uint64_t(1) << CharWidth) - 1; |
3604 | if (BitOffset + BitWidth >= CharWidth) { |
3605 | Bits[Pos++] |= (Used << BitOffset) & Used; |
3606 | BitWidth -= CharWidth - BitOffset; |
3607 | BitOffset = 0; |
3608 | } |
3609 | |
3610 | while (BitWidth >= CharWidth) { |
3611 | Bits[Pos++] = Used; |
3612 | BitWidth -= CharWidth; |
3613 | } |
3614 | |
3615 | if (BitWidth > 0) |
3616 | Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset; |
3617 | } |
3618 | |
3619 | // Set the bits corresponding to a field having width `BitWidth` and located at |
3620 | // offset `BitOffset` (from the least significant bit) within a storage unit of |
3621 | // `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of |
3622 | // `Bits` corresponds to one target byte. Use target endian layout. |
3623 | static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset, |
3624 | int StorageSize, int BitOffset, int BitWidth, |
3625 | int CharWidth, bool BigEndian) { |
3626 | |
3627 | SmallVector<uint64_t, 8> TmpBits(StorageSize); |
3628 | setBitRange(Bits&: TmpBits, BitOffset, BitWidth, CharWidth); |
3629 | |
3630 | if (BigEndian) |
3631 | std::reverse(first: TmpBits.begin(), last: TmpBits.end()); |
3632 | |
3633 | for (uint64_t V : TmpBits) |
3634 | Bits[StorageOffset++] |= V; |
3635 | } |
3636 | |
3637 | static void setUsedBits(CodeGenModule &, QualType, int, |
3638 | SmallVectorImpl<uint64_t> &); |
3639 | |
3640 | // Set the bits in `Bits`, which correspond to the value representations of |
3641 | // the actual members of the record type `RTy`. Note that this function does |
3642 | // not handle base classes, virtual tables, etc, since they cannot happen in |
3643 | // CMSE function arguments or return. The bit mask corresponds to the target |
3644 | // memory layout, i.e. it's endian dependent. |
3645 | static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset, |
3646 | SmallVectorImpl<uint64_t> &Bits) { |
3647 | ASTContext &Context = CGM.getContext(); |
3648 | int CharWidth = Context.getCharWidth(); |
3649 | const RecordDecl *RD = RTy->getDecl()->getDefinition(); |
3650 | const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(D: RD); |
3651 | const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD); |
3652 | |
3653 | int Idx = 0; |
3654 | for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) { |
3655 | const FieldDecl *F = *I; |
3656 | |
3657 | if (F->isUnnamedBitField() || F->isZeroLengthBitField(Ctx: Context) || |
3658 | F->getType()->isIncompleteArrayType()) |
3659 | continue; |
3660 | |
3661 | if (F->isBitField()) { |
3662 | const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(FD: F); |
3663 | setBitRange(Bits, StorageOffset: Offset + BFI.StorageOffset.getQuantity(), |
3664 | StorageSize: BFI.StorageSize / CharWidth, BitOffset: BFI.Offset, |
3665 | BitWidth: BFI.Size, CharWidth, |
3666 | BigEndian: CGM.getDataLayout().isBigEndian()); |
3667 | continue; |
3668 | } |
3669 | |
3670 | setUsedBits(CGM, F->getType(), |
3671 | Offset + ASTLayout.getFieldOffset(FieldNo: Idx) / CharWidth, Bits); |
3672 | } |
3673 | } |
3674 | |
3675 | // Set the bits in `Bits`, which correspond to the value representations of |
3676 | // the elements of an array type `ATy`. |
3677 | static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy, |
3678 | int Offset, SmallVectorImpl<uint64_t> &Bits) { |
3679 | const ASTContext &Context = CGM.getContext(); |
3680 | |
3681 | QualType ETy = Context.getBaseElementType(VAT: ATy); |
3682 | int Size = Context.getTypeSizeInChars(T: ETy).getQuantity(); |
3683 | SmallVector<uint64_t, 4> TmpBits(Size); |
3684 | setUsedBits(CGM, ETy, 0, TmpBits); |
3685 | |
3686 | for (int I = 0, N = Context.getConstantArrayElementCount(CA: ATy); I < N; ++I) { |
3687 | auto Src = TmpBits.begin(); |
3688 | auto Dst = Bits.begin() + Offset + I * Size; |
3689 | for (int J = 0; J < Size; ++J) |
3690 | *Dst++ |= *Src++; |
3691 | } |
3692 | } |
3693 | |
3694 | // Set the bits in `Bits`, which correspond to the value representations of |
3695 | // the type `QTy`. |
3696 | static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset, |
3697 | SmallVectorImpl<uint64_t> &Bits) { |
3698 | if (const auto *RTy = QTy->getAs<RecordType>()) |
3699 | return setUsedBits(CGM, RTy, Offset, Bits); |
3700 | |
3701 | ASTContext &Context = CGM.getContext(); |
3702 | if (const auto *ATy = Context.getAsConstantArrayType(T: QTy)) |
3703 | return setUsedBits(CGM, ATy, Offset, Bits); |
3704 | |
3705 | int Size = Context.getTypeSizeInChars(T: QTy).getQuantity(); |
3706 | if (Size <= 0) |
3707 | return; |
3708 | |
3709 | std::fill_n(Bits.begin() + Offset, Size, |
3710 | (uint64_t(1) << Context.getCharWidth()) - 1); |
3711 | } |
3712 | |
3713 | static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits, |
3714 | int Pos, int Size, int CharWidth, |
3715 | bool BigEndian) { |
3716 | assert(Size > 0); |
3717 | uint64_t Mask = 0; |
3718 | if (BigEndian) { |
3719 | for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E; |
3720 | ++P) |
3721 | Mask = (Mask << CharWidth) | *P; |
3722 | } else { |
3723 | auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos; |
3724 | do |
3725 | Mask = (Mask << CharWidth) | *--P; |
3726 | while (P != End); |
3727 | } |
3728 | return Mask; |
3729 | } |
3730 | |
3731 | // Emit code to clear the bits in a record, which aren't a part of any user |
3732 | // declared member, when the record is a function return. |
3733 | llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, |
3734 | llvm::IntegerType *ITy, |
3735 | QualType QTy) { |
3736 | assert(Src->getType() == ITy); |
3737 | assert(ITy->getScalarSizeInBits() <= 64); |
3738 | |
3739 | const llvm::DataLayout &DataLayout = CGM.getDataLayout(); |
3740 | int Size = DataLayout.getTypeStoreSize(Ty: ITy); |
3741 | SmallVector<uint64_t, 4> Bits(Size); |
3742 | setUsedBits(CGM, RTy: QTy->castAs<RecordType>(), Offset: 0, Bits); |
3743 | |
3744 | int CharWidth = CGM.getContext().getCharWidth(); |
3745 | uint64_t Mask = |
3746 | buildMultiCharMask(Bits, Pos: 0, Size, CharWidth, BigEndian: DataLayout.isBigEndian()); |
3747 | |
3748 | return Builder.CreateAnd(LHS: Src, RHS: Mask, Name: "cmse.clear" ); |
3749 | } |
3750 | |
3751 | // Emit code to clear the bits in a record, which aren't a part of any user |
3752 | // declared member, when the record is a function argument. |
3753 | llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, |
3754 | llvm::ArrayType *ATy, |
3755 | QualType QTy) { |
3756 | const llvm::DataLayout &DataLayout = CGM.getDataLayout(); |
3757 | int Size = DataLayout.getTypeStoreSize(Ty: ATy); |
3758 | SmallVector<uint64_t, 16> Bits(Size); |
3759 | setUsedBits(CGM, RTy: QTy->castAs<RecordType>(), Offset: 0, Bits); |
3760 | |
3761 | // Clear each element of the LLVM array. |
3762 | int CharWidth = CGM.getContext().getCharWidth(); |
3763 | int CharsPerElt = |
3764 | ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth; |
3765 | int MaskIndex = 0; |
3766 | llvm::Value *R = llvm::PoisonValue::get(T: ATy); |
3767 | for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) { |
3768 | uint64_t Mask = buildMultiCharMask(Bits, Pos: MaskIndex, Size: CharsPerElt, CharWidth, |
3769 | BigEndian: DataLayout.isBigEndian()); |
3770 | MaskIndex += CharsPerElt; |
3771 | llvm::Value *T0 = Builder.CreateExtractValue(Agg: Src, Idxs: I); |
3772 | llvm::Value *T1 = Builder.CreateAnd(LHS: T0, RHS: Mask, Name: "cmse.clear" ); |
3773 | R = Builder.CreateInsertValue(Agg: R, Val: T1, Idxs: I); |
3774 | } |
3775 | |
3776 | return R; |
3777 | } |
3778 | |
3779 | void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, |
3780 | bool EmitRetDbgLoc, |
3781 | SourceLocation EndLoc) { |
3782 | if (FI.isNoReturn()) { |
3783 | // Noreturn functions don't return. |
3784 | EmitUnreachable(Loc: EndLoc); |
3785 | return; |
3786 | } |
3787 | |
3788 | if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { |
3789 | // Naked functions don't have epilogues. |
3790 | Builder.CreateUnreachable(); |
3791 | return; |
3792 | } |
3793 | |
3794 | // Functions with no result always return void. |
3795 | if (!ReturnValue.isValid()) { |
3796 | Builder.CreateRetVoid(); |
3797 | return; |
3798 | } |
3799 | |
3800 | llvm::DebugLoc RetDbgLoc; |
3801 | llvm::Value *RV = nullptr; |
3802 | QualType RetTy = FI.getReturnType(); |
3803 | const ABIArgInfo &RetAI = FI.getReturnInfo(); |
3804 | |
3805 | switch (RetAI.getKind()) { |
3806 | case ABIArgInfo::InAlloca: |
3807 | // Aggregates get evaluated directly into the destination. Sometimes we |
3808 | // need to return the sret value in a register, though. |
3809 | assert(hasAggregateEvaluationKind(RetTy)); |
3810 | if (RetAI.getInAllocaSRet()) { |
3811 | llvm::Function::arg_iterator EI = CurFn->arg_end(); |
3812 | --EI; |
3813 | llvm::Value *ArgStruct = &*EI; |
3814 | llvm::Value *SRet = Builder.CreateStructGEP( |
3815 | Ty: FI.getArgStruct(), Ptr: ArgStruct, Idx: RetAI.getInAllocaFieldIndex()); |
3816 | llvm::Type *Ty = |
3817 | cast<llvm::GetElementPtrInst>(Val: SRet)->getResultElementType(); |
3818 | RV = Builder.CreateAlignedLoad(Ty, Addr: SRet, Align: getPointerAlign(), Name: "sret" ); |
3819 | } |
3820 | break; |
3821 | |
3822 | case ABIArgInfo::Indirect: { |
3823 | auto AI = CurFn->arg_begin(); |
3824 | if (RetAI.isSRetAfterThis()) |
3825 | ++AI; |
3826 | switch (getEvaluationKind(T: RetTy)) { |
3827 | case TEK_Complex: { |
3828 | ComplexPairTy RT = |
3829 | EmitLoadOfComplex(src: MakeAddrLValue(Addr: ReturnValue, T: RetTy), loc: EndLoc); |
3830 | EmitStoreOfComplex(V: RT, dest: MakeNaturalAlignAddrLValue(V: &*AI, T: RetTy), |
3831 | /*isInit*/ true); |
3832 | break; |
3833 | } |
3834 | case TEK_Aggregate: |
3835 | // Do nothing; aggregates get evaluated directly into the destination. |
3836 | break; |
3837 | case TEK_Scalar: { |
3838 | LValueBaseInfo BaseInfo; |
3839 | TBAAAccessInfo TBAAInfo; |
3840 | CharUnits Alignment = |
3841 | CGM.getNaturalTypeAlignment(T: RetTy, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo); |
3842 | Address ArgAddr(&*AI, ConvertType(T: RetTy), Alignment); |
3843 | LValue ArgVal = |
3844 | LValue::MakeAddr(Addr: ArgAddr, type: RetTy, Context&: getContext(), BaseInfo, TBAAInfo); |
3845 | EmitStoreOfScalar( |
3846 | value: EmitLoadOfScalar(lvalue: MakeAddrLValue(Addr: ReturnValue, T: RetTy), Loc: EndLoc), lvalue: ArgVal, |
3847 | /*isInit*/ true); |
3848 | break; |
3849 | } |
3850 | } |
3851 | break; |
3852 | } |
3853 | |
3854 | case ABIArgInfo::Extend: |
3855 | case ABIArgInfo::Direct: |
3856 | if (RetAI.getCoerceToType() == ConvertType(T: RetTy) && |
3857 | RetAI.getDirectOffset() == 0) { |
3858 | // The internal return value temp always will have pointer-to-return-type |
3859 | // type, just do a load. |
3860 | |
3861 | // If there is a dominating store to ReturnValue, we can elide |
3862 | // the load, zap the store, and usually zap the alloca. |
3863 | if (llvm::StoreInst *SI = |
3864 | findDominatingStoreToReturnValue(CGF&: *this)) { |
3865 | // Reuse the debug location from the store unless there is |
3866 | // cleanup code to be emitted between the store and return |
3867 | // instruction. |
3868 | if (EmitRetDbgLoc && !AutoreleaseResult) |
3869 | RetDbgLoc = SI->getDebugLoc(); |
3870 | // Get the stored value and nuke the now-dead store. |
3871 | RV = SI->getValueOperand(); |
3872 | SI->eraseFromParent(); |
3873 | |
3874 | // Otherwise, we have to do a simple load. |
3875 | } else { |
3876 | RV = Builder.CreateLoad(Addr: ReturnValue); |
3877 | } |
3878 | } else { |
3879 | // If the value is offset in memory, apply the offset now. |
3880 | Address V = emitAddressAtOffset(CGF&: *this, addr: ReturnValue, info: RetAI); |
3881 | |
3882 | RV = CreateCoercedLoad(Src: V, Ty: RetAI.getCoerceToType(), CGF&: *this); |
3883 | } |
3884 | |
3885 | // In ARC, end functions that return a retainable type with a call |
3886 | // to objc_autoreleaseReturnValue. |
3887 | if (AutoreleaseResult) { |
3888 | #ifndef NDEBUG |
3889 | // Type::isObjCRetainabletype has to be called on a QualType that hasn't |
3890 | // been stripped of the typedefs, so we cannot use RetTy here. Get the |
3891 | // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from |
3892 | // CurCodeDecl or BlockInfo. |
3893 | QualType RT; |
3894 | |
3895 | if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl)) |
3896 | RT = FD->getReturnType(); |
3897 | else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl)) |
3898 | RT = MD->getReturnType(); |
3899 | else if (isa<BlockDecl>(CurCodeDecl)) |
3900 | RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType(); |
3901 | else |
3902 | llvm_unreachable("Unexpected function/method type" ); |
3903 | |
3904 | assert(getLangOpts().ObjCAutoRefCount && |
3905 | !FI.isReturnsRetained() && |
3906 | RT->isObjCRetainableType()); |
3907 | #endif |
3908 | RV = emitAutoreleaseOfResult(CGF&: *this, result: RV); |
3909 | } |
3910 | |
3911 | break; |
3912 | |
3913 | case ABIArgInfo::Ignore: |
3914 | break; |
3915 | |
3916 | case ABIArgInfo::CoerceAndExpand: { |
3917 | auto coercionType = RetAI.getCoerceAndExpandType(); |
3918 | |
3919 | // Load all of the coerced elements out into results. |
3920 | llvm::SmallVector<llvm::Value*, 4> results; |
3921 | Address addr = ReturnValue.withElementType(ElemTy: coercionType); |
3922 | for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { |
3923 | auto coercedEltType = coercionType->getElementType(N: i); |
3924 | if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType: coercedEltType)) |
3925 | continue; |
3926 | |
3927 | auto eltAddr = Builder.CreateStructGEP(Addr: addr, Index: i); |
3928 | auto elt = Builder.CreateLoad(Addr: eltAddr); |
3929 | results.push_back(Elt: elt); |
3930 | } |
3931 | |
3932 | // If we have one result, it's the single direct result type. |
3933 | if (results.size() == 1) { |
3934 | RV = results[0]; |
3935 | |
3936 | // Otherwise, we need to make a first-class aggregate. |
3937 | } else { |
3938 | // Construct a return type that lacks padding elements. |
3939 | llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType(); |
3940 | |
3941 | RV = llvm::PoisonValue::get(T: returnType); |
3942 | for (unsigned i = 0, e = results.size(); i != e; ++i) { |
3943 | RV = Builder.CreateInsertValue(Agg: RV, Val: results[i], Idxs: i); |
3944 | } |
3945 | } |
3946 | break; |
3947 | } |
3948 | case ABIArgInfo::Expand: |
3949 | case ABIArgInfo::IndirectAliased: |
3950 | llvm_unreachable("Invalid ABI kind for return argument" ); |
3951 | } |
3952 | |
3953 | llvm::Instruction *Ret; |
3954 | if (RV) { |
3955 | if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) { |
3956 | // For certain return types, clear padding bits, as they may reveal |
3957 | // sensitive information. |
3958 | // Small struct/union types are passed as integers. |
3959 | auto *ITy = dyn_cast<llvm::IntegerType>(Val: RV->getType()); |
3960 | if (ITy != nullptr && isa<RecordType>(Val: RetTy.getCanonicalType())) |
3961 | RV = EmitCMSEClearRecord(Src: RV, ITy, QTy: RetTy); |
3962 | } |
3963 | EmitReturnValueCheck(RV); |
3964 | Ret = Builder.CreateRet(V: RV); |
3965 | } else { |
3966 | Ret = Builder.CreateRetVoid(); |
3967 | } |
3968 | |
3969 | if (RetDbgLoc) |
3970 | Ret->setDebugLoc(std::move(RetDbgLoc)); |
3971 | } |
3972 | |
3973 | void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) { |
3974 | // A current decl may not be available when emitting vtable thunks. |
3975 | if (!CurCodeDecl) |
3976 | return; |
3977 | |
3978 | // If the return block isn't reachable, neither is this check, so don't emit |
3979 | // it. |
3980 | if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) |
3981 | return; |
3982 | |
3983 | ReturnsNonNullAttr *RetNNAttr = nullptr; |
3984 | if (SanOpts.has(K: SanitizerKind::ReturnsNonnullAttribute)) |
3985 | RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>(); |
3986 | |
3987 | if (!RetNNAttr && !requiresReturnValueNullabilityCheck()) |
3988 | return; |
3989 | |
3990 | // Prefer the returns_nonnull attribute if it's present. |
3991 | SourceLocation AttrLoc; |
3992 | SanitizerMask CheckKind; |
3993 | SanitizerHandler Handler; |
3994 | if (RetNNAttr) { |
3995 | assert(!requiresReturnValueNullabilityCheck() && |
3996 | "Cannot check nullability and the nonnull attribute" ); |
3997 | AttrLoc = RetNNAttr->getLocation(); |
3998 | CheckKind = SanitizerKind::ReturnsNonnullAttribute; |
3999 | Handler = SanitizerHandler::NonnullReturn; |
4000 | } else { |
4001 | if (auto *DD = dyn_cast<DeclaratorDecl>(Val: CurCodeDecl)) |
4002 | if (auto *TSI = DD->getTypeSourceInfo()) |
4003 | if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>()) |
4004 | AttrLoc = FTL.getReturnLoc().findNullabilityLoc(); |
4005 | CheckKind = SanitizerKind::NullabilityReturn; |
4006 | Handler = SanitizerHandler::NullabilityReturn; |
4007 | } |
4008 | |
4009 | SanitizerScope SanScope(this); |
4010 | |
4011 | // Make sure the "return" source location is valid. If we're checking a |
4012 | // nullability annotation, make sure the preconditions for the check are met. |
4013 | llvm::BasicBlock *Check = createBasicBlock(name: "nullcheck" ); |
4014 | llvm::BasicBlock *NoCheck = createBasicBlock(name: "no.nullcheck" ); |
4015 | llvm::Value *SLocPtr = Builder.CreateLoad(Addr: ReturnLocation, Name: "return.sloc.load" ); |
4016 | llvm::Value *CanNullCheck = Builder.CreateIsNotNull(Arg: SLocPtr); |
4017 | if (requiresReturnValueNullabilityCheck()) |
4018 | CanNullCheck = |
4019 | Builder.CreateAnd(LHS: CanNullCheck, RHS: RetValNullabilityPrecondition); |
4020 | Builder.CreateCondBr(Cond: CanNullCheck, True: Check, False: NoCheck); |
4021 | EmitBlock(BB: Check); |
4022 | |
4023 | // Now do the null check. |
4024 | llvm::Value *Cond = Builder.CreateIsNotNull(Arg: RV); |
4025 | llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc: AttrLoc)}; |
4026 | llvm::Value *DynamicData[] = {SLocPtr}; |
4027 | EmitCheck(Checked: std::make_pair(x&: Cond, y&: CheckKind), Check: Handler, StaticArgs: StaticData, DynamicArgs: DynamicData); |
4028 | |
4029 | EmitBlock(BB: NoCheck); |
4030 | |
4031 | #ifndef NDEBUG |
4032 | // The return location should not be used after the check has been emitted. |
4033 | ReturnLocation = Address::invalid(); |
4034 | #endif |
4035 | } |
4036 | |
4037 | static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { |
4038 | const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); |
4039 | return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; |
4040 | } |
4041 | |
4042 | static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, |
4043 | QualType Ty) { |
4044 | // FIXME: Generate IR in one pass, rather than going back and fixing up these |
4045 | // placeholders. |
4046 | llvm::Type *IRTy = CGF.ConvertTypeForMem(T: Ty); |
4047 | llvm::Type *IRPtrTy = llvm::PointerType::getUnqual(C&: CGF.getLLVMContext()); |
4048 | llvm::Value *Placeholder = llvm::PoisonValue::get(T: IRPtrTy); |
4049 | |
4050 | // FIXME: When we generate this IR in one pass, we shouldn't need |
4051 | // this win32-specific alignment hack. |
4052 | CharUnits Align = CharUnits::fromQuantity(Quantity: 4); |
4053 | Placeholder = CGF.Builder.CreateAlignedLoad(Ty: IRPtrTy, Addr: Placeholder, Align); |
4054 | |
4055 | return AggValueSlot::forAddr(addr: Address(Placeholder, IRTy, Align), |
4056 | quals: Ty.getQualifiers(), |
4057 | isDestructed: AggValueSlot::IsNotDestructed, |
4058 | needsGC: AggValueSlot::DoesNotNeedGCBarriers, |
4059 | isAliased: AggValueSlot::IsNotAliased, |
4060 | mayOverlap: AggValueSlot::DoesNotOverlap); |
4061 | } |
4062 | |
4063 | void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, |
4064 | const VarDecl *param, |
4065 | SourceLocation loc) { |
4066 | // StartFunction converted the ABI-lowered parameter(s) into a |
4067 | // local alloca. We need to turn that into an r-value suitable |
4068 | // for EmitCall. |
4069 | Address local = GetAddrOfLocalVar(VD: param); |
4070 | |
4071 | QualType type = param->getType(); |
4072 | |
4073 | // GetAddrOfLocalVar returns a pointer-to-pointer for references, |
4074 | // but the argument needs to be the original pointer. |
4075 | if (type->isReferenceType()) { |
4076 | args.add(rvalue: RValue::get(V: Builder.CreateLoad(Addr: local)), type); |
4077 | |
4078 | // In ARC, move out of consumed arguments so that the release cleanup |
4079 | // entered by StartFunction doesn't cause an over-release. This isn't |
4080 | // optimal -O0 code generation, but it should get cleaned up when |
4081 | // optimization is enabled. This also assumes that delegate calls are |
4082 | // performed exactly once for a set of arguments, but that should be safe. |
4083 | } else if (getLangOpts().ObjCAutoRefCount && |
4084 | param->hasAttr<NSConsumedAttr>() && |
4085 | type->isObjCRetainableType()) { |
4086 | llvm::Value *ptr = Builder.CreateLoad(Addr: local); |
4087 | auto null = |
4088 | llvm::ConstantPointerNull::get(T: cast<llvm::PointerType>(Val: ptr->getType())); |
4089 | Builder.CreateStore(Val: null, Addr: local); |
4090 | args.add(rvalue: RValue::get(V: ptr), type); |
4091 | |
4092 | // For the most part, we just need to load the alloca, except that |
4093 | // aggregate r-values are actually pointers to temporaries. |
4094 | } else { |
4095 | args.add(rvalue: convertTempToRValue(addr: local, type, Loc: loc), type); |
4096 | } |
4097 | |
4098 | // Deactivate the cleanup for the callee-destructed param that was pushed. |
4099 | if (type->isRecordType() && !CurFuncIsThunk && |
4100 | type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() && |
4101 | param->needsDestruction(Ctx: getContext())) { |
4102 | EHScopeStack::stable_iterator cleanup = |
4103 | CalleeDestructedParamCleanups.lookup(Val: cast<ParmVarDecl>(Val: param)); |
4104 | assert(cleanup.isValid() && |
4105 | "cleanup for callee-destructed param not recorded" ); |
4106 | // This unreachable is a temporary marker which will be removed later. |
4107 | llvm::Instruction *isActive = Builder.CreateUnreachable(); |
4108 | args.addArgCleanupDeactivation(Cleanup: cleanup, IsActiveIP: isActive); |
4109 | } |
4110 | } |
4111 | |
4112 | static bool isProvablyNull(llvm::Value *addr) { |
4113 | return llvm::isa_and_nonnull<llvm::ConstantPointerNull>(Val: addr); |
4114 | } |
4115 | |
4116 | static bool isProvablyNonNull(Address Addr, CodeGenFunction &CGF) { |
4117 | return llvm::isKnownNonZero(V: Addr.getBasePointer(), Q: CGF.CGM.getDataLayout()); |
4118 | } |
4119 | |
4120 | /// Emit the actual writing-back of a writeback. |
4121 | static void emitWriteback(CodeGenFunction &CGF, |
4122 | const CallArgList::Writeback &writeback) { |
4123 | const LValue &srcLV = writeback.Source; |
4124 | Address srcAddr = srcLV.getAddress(); |
4125 | assert(!isProvablyNull(srcAddr.getBasePointer()) && |
4126 | "shouldn't have writeback for provably null argument" ); |
4127 | |
4128 | llvm::BasicBlock *contBB = nullptr; |
4129 | |
4130 | // If the argument wasn't provably non-null, we need to null check |
4131 | // before doing the store. |
4132 | bool provablyNonNull = isProvablyNonNull(Addr: srcAddr, CGF); |
4133 | |
4134 | if (!provablyNonNull) { |
4135 | llvm::BasicBlock *writebackBB = CGF.createBasicBlock(name: "icr.writeback" ); |
4136 | contBB = CGF.createBasicBlock(name: "icr.done" ); |
4137 | |
4138 | llvm::Value *isNull = CGF.Builder.CreateIsNull(Addr: srcAddr, Name: "icr.isnull" ); |
4139 | CGF.Builder.CreateCondBr(Cond: isNull, True: contBB, False: writebackBB); |
4140 | CGF.EmitBlock(BB: writebackBB); |
4141 | } |
4142 | |
4143 | // Load the value to writeback. |
4144 | llvm::Value *value = CGF.Builder.CreateLoad(Addr: writeback.Temporary); |
4145 | |
4146 | // Cast it back, in case we're writing an id to a Foo* or something. |
4147 | value = CGF.Builder.CreateBitCast(V: value, DestTy: srcAddr.getElementType(), |
4148 | Name: "icr.writeback-cast" ); |
4149 | |
4150 | // Perform the writeback. |
4151 | |
4152 | // If we have a "to use" value, it's something we need to emit a use |
4153 | // of. This has to be carefully threaded in: if it's done after the |
4154 | // release it's potentially undefined behavior (and the optimizer |
4155 | // will ignore it), and if it happens before the retain then the |
4156 | // optimizer could move the release there. |
4157 | if (writeback.ToUse) { |
4158 | assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); |
4159 | |
4160 | // Retain the new value. No need to block-copy here: the block's |
4161 | // being passed up the stack. |
4162 | value = CGF.EmitARCRetainNonBlock(value); |
4163 | |
4164 | // Emit the intrinsic use here. |
4165 | CGF.EmitARCIntrinsicUse(values: writeback.ToUse); |
4166 | |
4167 | // Load the old value (primitively). |
4168 | llvm::Value *oldValue = CGF.EmitLoadOfScalar(lvalue: srcLV, Loc: SourceLocation()); |
4169 | |
4170 | // Put the new value in place (primitively). |
4171 | CGF.EmitStoreOfScalar(value, lvalue: srcLV, /*init*/ isInit: false); |
4172 | |
4173 | // Release the old value. |
4174 | CGF.EmitARCRelease(value: oldValue, precise: srcLV.isARCPreciseLifetime()); |
4175 | |
4176 | // Otherwise, we can just do a normal lvalue store. |
4177 | } else { |
4178 | CGF.EmitStoreThroughLValue(Src: RValue::get(V: value), Dst: srcLV); |
4179 | } |
4180 | |
4181 | // Jump to the continuation block. |
4182 | if (!provablyNonNull) |
4183 | CGF.EmitBlock(BB: contBB); |
4184 | } |
4185 | |
4186 | static void emitWritebacks(CodeGenFunction &CGF, |
4187 | const CallArgList &args) { |
4188 | for (const auto &I : args.writebacks()) |
4189 | emitWriteback(CGF, writeback: I); |
4190 | } |
4191 | |
4192 | static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, |
4193 | const CallArgList &CallArgs) { |
4194 | ArrayRef<CallArgList::CallArgCleanup> Cleanups = |
4195 | CallArgs.getCleanupsToDeactivate(); |
4196 | // Iterate in reverse to increase the likelihood of popping the cleanup. |
4197 | for (const auto &I : llvm::reverse(C&: Cleanups)) { |
4198 | CGF.DeactivateCleanupBlock(Cleanup: I.Cleanup, DominatingIP: I.IsActiveIP); |
4199 | I.IsActiveIP->eraseFromParent(); |
4200 | } |
4201 | } |
4202 | |
4203 | static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { |
4204 | if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(Val: E->IgnoreParens())) |
4205 | if (uop->getOpcode() == UO_AddrOf) |
4206 | return uop->getSubExpr(); |
4207 | return nullptr; |
4208 | } |
4209 | |
4210 | /// Emit an argument that's being passed call-by-writeback. That is, |
4211 | /// we are passing the address of an __autoreleased temporary; it |
4212 | /// might be copy-initialized with the current value of the given |
4213 | /// address, but it will definitely be copied out of after the call. |
4214 | static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, |
4215 | const ObjCIndirectCopyRestoreExpr *CRE) { |
4216 | LValue srcLV; |
4217 | |
4218 | // Make an optimistic effort to emit the address as an l-value. |
4219 | // This can fail if the argument expression is more complicated. |
4220 | if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(E: CRE->getSubExpr())) { |
4221 | srcLV = CGF.EmitLValue(E: lvExpr); |
4222 | |
4223 | // Otherwise, just emit it as a scalar. |
4224 | } else { |
4225 | Address srcAddr = CGF.EmitPointerWithAlignment(Addr: CRE->getSubExpr()); |
4226 | |
4227 | QualType srcAddrType = |
4228 | CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); |
4229 | srcLV = CGF.MakeAddrLValue(Addr: srcAddr, T: srcAddrType); |
4230 | } |
4231 | Address srcAddr = srcLV.getAddress(); |
4232 | |
4233 | // The dest and src types don't necessarily match in LLVM terms |
4234 | // because of the crazy ObjC compatibility rules. |
4235 | |
4236 | llvm::PointerType *destType = |
4237 | cast<llvm::PointerType>(Val: CGF.ConvertType(T: CRE->getType())); |
4238 | llvm::Type *destElemType = |
4239 | CGF.ConvertTypeForMem(T: CRE->getType()->getPointeeType()); |
4240 | |
4241 | // If the address is a constant null, just pass the appropriate null. |
4242 | if (isProvablyNull(addr: srcAddr.getBasePointer())) { |
4243 | args.add(rvalue: RValue::get(V: llvm::ConstantPointerNull::get(T: destType)), |
4244 | type: CRE->getType()); |
4245 | return; |
4246 | } |
4247 | |
4248 | // Create the temporary. |
4249 | Address temp = |
4250 | CGF.CreateTempAlloca(Ty: destElemType, align: CGF.getPointerAlign(), Name: "icr.temp" ); |
4251 | // Loading an l-value can introduce a cleanup if the l-value is __weak, |
4252 | // and that cleanup will be conditional if we can't prove that the l-value |
4253 | // isn't null, so we need to register a dominating point so that the cleanups |
4254 | // system will make valid IR. |
4255 | CodeGenFunction::ConditionalEvaluation condEval(CGF); |
4256 | |
4257 | // Zero-initialize it if we're not doing a copy-initialization. |
4258 | bool shouldCopy = CRE->shouldCopy(); |
4259 | if (!shouldCopy) { |
4260 | llvm::Value *null = |
4261 | llvm::ConstantPointerNull::get(T: cast<llvm::PointerType>(Val: destElemType)); |
4262 | CGF.Builder.CreateStore(Val: null, Addr: temp); |
4263 | } |
4264 | |
4265 | llvm::BasicBlock *contBB = nullptr; |
4266 | llvm::BasicBlock *originBB = nullptr; |
4267 | |
4268 | // If the address is *not* known to be non-null, we need to switch. |
4269 | llvm::Value *finalArgument; |
4270 | |
4271 | bool provablyNonNull = isProvablyNonNull(Addr: srcAddr, CGF); |
4272 | |
4273 | if (provablyNonNull) { |
4274 | finalArgument = temp.emitRawPointer(CGF); |
4275 | } else { |
4276 | llvm::Value *isNull = CGF.Builder.CreateIsNull(Addr: srcAddr, Name: "icr.isnull" ); |
4277 | |
4278 | finalArgument = CGF.Builder.CreateSelect( |
4279 | C: isNull, True: llvm::ConstantPointerNull::get(T: destType), |
4280 | False: temp.emitRawPointer(CGF), Name: "icr.argument" ); |
4281 | |
4282 | // If we need to copy, then the load has to be conditional, which |
4283 | // means we need control flow. |
4284 | if (shouldCopy) { |
4285 | originBB = CGF.Builder.GetInsertBlock(); |
4286 | contBB = CGF.createBasicBlock(name: "icr.cont" ); |
4287 | llvm::BasicBlock *copyBB = CGF.createBasicBlock(name: "icr.copy" ); |
4288 | CGF.Builder.CreateCondBr(Cond: isNull, True: contBB, False: copyBB); |
4289 | CGF.EmitBlock(BB: copyBB); |
4290 | condEval.begin(CGF); |
4291 | } |
4292 | } |
4293 | |
4294 | llvm::Value *valueToUse = nullptr; |
4295 | |
4296 | // Perform a copy if necessary. |
4297 | if (shouldCopy) { |
4298 | RValue srcRV = CGF.EmitLoadOfLValue(V: srcLV, Loc: SourceLocation()); |
4299 | assert(srcRV.isScalar()); |
4300 | |
4301 | llvm::Value *src = srcRV.getScalarVal(); |
4302 | src = CGF.Builder.CreateBitCast(V: src, DestTy: destElemType, Name: "icr.cast" ); |
4303 | |
4304 | // Use an ordinary store, not a store-to-lvalue. |
4305 | CGF.Builder.CreateStore(Val: src, Addr: temp); |
4306 | |
4307 | // If optimization is enabled, and the value was held in a |
4308 | // __strong variable, we need to tell the optimizer that this |
4309 | // value has to stay alive until we're doing the store back. |
4310 | // This is because the temporary is effectively unretained, |
4311 | // and so otherwise we can violate the high-level semantics. |
4312 | if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && |
4313 | srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { |
4314 | valueToUse = src; |
4315 | } |
4316 | } |
4317 | |
4318 | // Finish the control flow if we needed it. |
4319 | if (shouldCopy && !provablyNonNull) { |
4320 | llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); |
4321 | CGF.EmitBlock(BB: contBB); |
4322 | |
4323 | // Make a phi for the value to intrinsically use. |
4324 | if (valueToUse) { |
4325 | llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(Ty: valueToUse->getType(), NumReservedValues: 2, |
4326 | Name: "icr.to-use" ); |
4327 | phiToUse->addIncoming(V: valueToUse, BB: copyBB); |
4328 | phiToUse->addIncoming(V: llvm::UndefValue::get(T: valueToUse->getType()), |
4329 | BB: originBB); |
4330 | valueToUse = phiToUse; |
4331 | } |
4332 | |
4333 | condEval.end(CGF); |
4334 | } |
4335 | |
4336 | args.addWriteback(srcLV, temporary: temp, toUse: valueToUse); |
4337 | args.add(rvalue: RValue::get(V: finalArgument), type: CRE->getType()); |
4338 | } |
4339 | |
4340 | void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { |
4341 | assert(!StackBase); |
4342 | |
4343 | // Save the stack. |
4344 | StackBase = CGF.Builder.CreateStackSave(Name: "inalloca.save" ); |
4345 | } |
4346 | |
4347 | void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { |
4348 | if (StackBase) { |
4349 | // Restore the stack after the call. |
4350 | CGF.Builder.CreateStackRestore(Ptr: StackBase); |
4351 | } |
4352 | } |
4353 | |
4354 | void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType, |
4355 | SourceLocation ArgLoc, |
4356 | AbstractCallee AC, |
4357 | unsigned ParmNum) { |
4358 | if (!AC.getDecl() || !(SanOpts.has(K: SanitizerKind::NonnullAttribute) || |
4359 | SanOpts.has(K: SanitizerKind::NullabilityArg))) |
4360 | return; |
4361 | |
4362 | // The param decl may be missing in a variadic function. |
4363 | auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(I: ParmNum) : nullptr; |
4364 | unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; |
4365 | |
4366 | // Prefer the nonnull attribute if it's present. |
4367 | const NonNullAttr *NNAttr = nullptr; |
4368 | if (SanOpts.has(K: SanitizerKind::NonnullAttribute)) |
4369 | NNAttr = getNonNullAttr(FD: AC.getDecl(), PVD, ArgType, ArgNo); |
4370 | |
4371 | bool CanCheckNullability = false; |
4372 | if (SanOpts.has(K: SanitizerKind::NullabilityArg) && !NNAttr && PVD && |
4373 | !PVD->getType()->isRecordType()) { |
4374 | auto Nullability = PVD->getType()->getNullability(); |
4375 | CanCheckNullability = Nullability && |
4376 | *Nullability == NullabilityKind::NonNull && |
4377 | PVD->getTypeSourceInfo(); |
4378 | } |
4379 | |
4380 | if (!NNAttr && !CanCheckNullability) |
4381 | return; |
4382 | |
4383 | SourceLocation AttrLoc; |
4384 | SanitizerMask CheckKind; |
4385 | SanitizerHandler Handler; |
4386 | if (NNAttr) { |
4387 | AttrLoc = NNAttr->getLocation(); |
4388 | CheckKind = SanitizerKind::NonnullAttribute; |
4389 | Handler = SanitizerHandler::NonnullArg; |
4390 | } else { |
4391 | AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc(); |
4392 | CheckKind = SanitizerKind::NullabilityArg; |
4393 | Handler = SanitizerHandler::NullabilityArg; |
4394 | } |
4395 | |
4396 | SanitizerScope SanScope(this); |
4397 | llvm::Value *Cond = EmitNonNullRValueCheck(RV, T: ArgType); |
4398 | llvm::Constant *StaticData[] = { |
4399 | EmitCheckSourceLocation(Loc: ArgLoc), EmitCheckSourceLocation(Loc: AttrLoc), |
4400 | llvm::ConstantInt::get(Ty: Int32Ty, V: ArgNo + 1), |
4401 | }; |
4402 | EmitCheck(Checked: std::make_pair(x&: Cond, y&: CheckKind), Check: Handler, StaticArgs: StaticData, DynamicArgs: std::nullopt); |
4403 | } |
4404 | |
4405 | void CodeGenFunction::EmitNonNullArgCheck(Address Addr, QualType ArgType, |
4406 | SourceLocation ArgLoc, |
4407 | AbstractCallee AC, unsigned ParmNum) { |
4408 | if (!AC.getDecl() || !(SanOpts.has(K: SanitizerKind::NonnullAttribute) || |
4409 | SanOpts.has(K: SanitizerKind::NullabilityArg))) |
4410 | return; |
4411 | |
4412 | EmitNonNullArgCheck(RV: RValue::get(Addr, CGF&: *this), ArgType, ArgLoc, AC, ParmNum); |
4413 | } |
4414 | |
4415 | // Check if the call is going to use the inalloca convention. This needs to |
4416 | // agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged |
4417 | // later, so we can't check it directly. |
4418 | static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, |
4419 | ArrayRef<QualType> ArgTypes) { |
4420 | // The Swift calling conventions don't go through the target-specific |
4421 | // argument classification, they never use inalloca. |
4422 | // TODO: Consider limiting inalloca use to only calling conventions supported |
4423 | // by MSVC. |
4424 | if (ExplicitCC == CC_Swift || ExplicitCC == CC_SwiftAsync) |
4425 | return false; |
4426 | if (!CGM.getTarget().getCXXABI().isMicrosoft()) |
4427 | return false; |
4428 | return llvm::any_of(Range&: ArgTypes, P: [&](QualType Ty) { |
4429 | return isInAllocaArgument(ABI&: CGM.getCXXABI(), type: Ty); |
4430 | }); |
4431 | } |
4432 | |
4433 | #ifndef NDEBUG |
4434 | // Determine whether the given argument is an Objective-C method |
4435 | // that may have type parameters in its signature. |
4436 | static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) { |
4437 | const DeclContext *dc = method->getDeclContext(); |
4438 | if (const ObjCInterfaceDecl *classDecl = dyn_cast<ObjCInterfaceDecl>(dc)) { |
4439 | return classDecl->getTypeParamListAsWritten(); |
4440 | } |
4441 | |
4442 | if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) { |
4443 | return catDecl->getTypeParamList(); |
4444 | } |
4445 | |
4446 | return false; |
4447 | } |
4448 | #endif |
4449 | |
4450 | /// EmitCallArgs - Emit call arguments for a function. |
4451 | void CodeGenFunction::EmitCallArgs( |
4452 | CallArgList &Args, PrototypeWrapper Prototype, |
4453 | llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange, |
4454 | AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) { |
4455 | SmallVector<QualType, 16> ArgTypes; |
4456 | |
4457 | assert((ParamsToSkip == 0 || Prototype.P) && |
4458 | "Can't skip parameters if type info is not provided" ); |
4459 | |
4460 | // This variable only captures *explicitly* written conventions, not those |
4461 | // applied by default via command line flags or target defaults, such as |
4462 | // thiscall, aapcs, stdcall via -mrtd, etc. Computing that correctly would |
4463 | // require knowing if this is a C++ instance method or being able to see |
4464 | // unprototyped FunctionTypes. |
4465 | CallingConv ExplicitCC = CC_C; |
4466 | |
4467 | // First, if a prototype was provided, use those argument types. |
4468 | bool IsVariadic = false; |
4469 | if (Prototype.P) { |
4470 | const auto *MD = Prototype.P.dyn_cast<const ObjCMethodDecl *>(); |
4471 | if (MD) { |
4472 | IsVariadic = MD->isVariadic(); |
4473 | ExplicitCC = getCallingConventionForDecl( |
4474 | D: MD, IsWindows: CGM.getTarget().getTriple().isOSWindows()); |
4475 | ArgTypes.assign(in_start: MD->param_type_begin() + ParamsToSkip, |
4476 | in_end: MD->param_type_end()); |
4477 | } else { |
4478 | const auto *FPT = Prototype.P.get<const FunctionProtoType *>(); |
4479 | IsVariadic = FPT->isVariadic(); |
4480 | ExplicitCC = FPT->getExtInfo().getCC(); |
4481 | ArgTypes.assign(in_start: FPT->param_type_begin() + ParamsToSkip, |
4482 | in_end: FPT->param_type_end()); |
4483 | } |
4484 | |
4485 | #ifndef NDEBUG |
4486 | // Check that the prototyped types match the argument expression types. |
4487 | bool isGenericMethod = MD && isObjCMethodWithTypeParams(MD); |
4488 | CallExpr::const_arg_iterator Arg = ArgRange.begin(); |
4489 | for (QualType Ty : ArgTypes) { |
4490 | assert(Arg != ArgRange.end() && "Running over edge of argument list!" ); |
4491 | assert( |
4492 | (isGenericMethod || Ty->isVariablyModifiedType() || |
4493 | Ty.getNonReferenceType()->isObjCRetainableType() || |
4494 | getContext() |
4495 | .getCanonicalType(Ty.getNonReferenceType()) |
4496 | .getTypePtr() == |
4497 | getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && |
4498 | "type mismatch in call argument!" ); |
4499 | ++Arg; |
4500 | } |
4501 | |
4502 | // Either we've emitted all the call args, or we have a call to variadic |
4503 | // function. |
4504 | assert((Arg == ArgRange.end() || IsVariadic) && |
4505 | "Extra arguments in non-variadic function!" ); |
4506 | #endif |
4507 | } |
4508 | |
4509 | // If we still have any arguments, emit them using the type of the argument. |
4510 | for (auto *A : llvm::drop_begin(RangeOrContainer&: ArgRange, N: ArgTypes.size())) |
4511 | ArgTypes.push_back(Elt: IsVariadic ? getVarArgType(Arg: A) : A->getType()); |
4512 | assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); |
4513 | |
4514 | // We must evaluate arguments from right to left in the MS C++ ABI, |
4515 | // because arguments are destroyed left to right in the callee. As a special |
4516 | // case, there are certain language constructs that require left-to-right |
4517 | // evaluation, and in those cases we consider the evaluation order requirement |
4518 | // to trump the "destruction order is reverse construction order" guarantee. |
4519 | bool LeftToRight = |
4520 | CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee() |
4521 | ? Order == EvaluationOrder::ForceLeftToRight |
4522 | : Order != EvaluationOrder::ForceRightToLeft; |
4523 | |
4524 | auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg, |
4525 | RValue EmittedArg) { |
4526 | if (!AC.hasFunctionDecl() || I >= AC.getNumParams()) |
4527 | return; |
4528 | auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>(); |
4529 | if (PS == nullptr) |
4530 | return; |
4531 | |
4532 | const auto &Context = getContext(); |
4533 | auto SizeTy = Context.getSizeType(); |
4534 | auto T = Builder.getIntNTy(N: Context.getTypeSize(T: SizeTy)); |
4535 | assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?" ); |
4536 | llvm::Value *V = evaluateOrEmitBuiltinObjectSize(E: Arg, Type: PS->getType(), ResType: T, |
4537 | EmittedE: EmittedArg.getScalarVal(), |
4538 | IsDynamic: PS->isDynamic()); |
4539 | Args.add(rvalue: RValue::get(V), type: SizeTy); |
4540 | // If we're emitting args in reverse, be sure to do so with |
4541 | // pass_object_size, as well. |
4542 | if (!LeftToRight) |
4543 | std::swap(a&: Args.back(), b&: *(&Args.back() - 1)); |
4544 | }; |
4545 | |
4546 | // Insert a stack save if we're going to need any inalloca args. |
4547 | if (hasInAllocaArgs(CGM, ExplicitCC, ArgTypes)) { |
4548 | assert(getTarget().getTriple().getArch() == llvm::Triple::x86 && |
4549 | "inalloca only supported on x86" ); |
4550 | Args.allocateArgumentMemory(CGF&: *this); |
4551 | } |
4552 | |
4553 | // Evaluate each argument in the appropriate order. |
4554 | size_t CallArgsStart = Args.size(); |
4555 | for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { |
4556 | unsigned Idx = LeftToRight ? I : E - I - 1; |
4557 | CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx; |
4558 | unsigned InitialArgSize = Args.size(); |
4559 | // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of |
4560 | // the argument and parameter match or the objc method is parameterized. |
4561 | assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || |
4562 | getContext().hasSameUnqualifiedType((*Arg)->getType(), |
4563 | ArgTypes[Idx]) || |
4564 | (isa<ObjCMethodDecl>(AC.getDecl()) && |
4565 | isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && |
4566 | "Argument and parameter types don't match" ); |
4567 | EmitCallArg(args&: Args, E: *Arg, ArgType: ArgTypes[Idx]); |
4568 | // In particular, we depend on it being the last arg in Args, and the |
4569 | // objectsize bits depend on there only being one arg if !LeftToRight. |
4570 | assert(InitialArgSize + 1 == Args.size() && |
4571 | "The code below depends on only adding one arg per EmitCallArg" ); |
4572 | (void)InitialArgSize; |
4573 | // Since pointer argument are never emitted as LValue, it is safe to emit |
4574 | // non-null argument check for r-value only. |
4575 | if (!Args.back().hasLValue()) { |
4576 | RValue RVArg = Args.back().getKnownRValue(); |
4577 | EmitNonNullArgCheck(RV: RVArg, ArgType: ArgTypes[Idx], ArgLoc: (*Arg)->getExprLoc(), AC, |
4578 | ParmNum: ParamsToSkip + Idx); |
4579 | // @llvm.objectsize should never have side-effects and shouldn't need |
4580 | // destruction/cleanups, so we can safely "emit" it after its arg, |
4581 | // regardless of right-to-leftness |
4582 | MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg); |
4583 | } |
4584 | } |
4585 | |
4586 | if (!LeftToRight) { |
4587 | // Un-reverse the arguments we just evaluated so they match up with the LLVM |
4588 | // IR function. |
4589 | std::reverse(first: Args.begin() + CallArgsStart, last: Args.end()); |
4590 | } |
4591 | } |
4592 | |
4593 | namespace { |
4594 | |
4595 | struct DestroyUnpassedArg final : EHScopeStack::Cleanup { |
4596 | DestroyUnpassedArg(Address Addr, QualType Ty) |
4597 | : Addr(Addr), Ty(Ty) {} |
4598 | |
4599 | Address Addr; |
4600 | QualType Ty; |
4601 | |
4602 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
4603 | QualType::DestructionKind DtorKind = Ty.isDestructedType(); |
4604 | if (DtorKind == QualType::DK_cxx_destructor) { |
4605 | const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); |
4606 | assert(!Dtor->isTrivial()); |
4607 | CGF.EmitCXXDestructorCall(D: Dtor, Type: Dtor_Complete, /*for vbase*/ ForVirtualBase: false, |
4608 | /*Delegating=*/false, This: Addr, ThisTy: Ty); |
4609 | } else { |
4610 | CGF.callCStructDestructor(Dst: CGF.MakeAddrLValue(Addr, T: Ty)); |
4611 | } |
4612 | } |
4613 | }; |
4614 | |
4615 | struct DisableDebugLocationUpdates { |
4616 | CodeGenFunction &CGF; |
4617 | bool disabledDebugInfo; |
4618 | DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { |
4619 | if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(Val: E) && CGF.getDebugInfo())) |
4620 | CGF.disableDebugInfo(); |
4621 | } |
4622 | ~DisableDebugLocationUpdates() { |
4623 | if (disabledDebugInfo) |
4624 | CGF.enableDebugInfo(); |
4625 | } |
4626 | }; |
4627 | |
4628 | } // end anonymous namespace |
4629 | |
4630 | RValue CallArg::getRValue(CodeGenFunction &CGF) const { |
4631 | if (!HasLV) |
4632 | return RV; |
4633 | LValue Copy = CGF.MakeAddrLValue(Addr: CGF.CreateMemTemp(T: Ty), T: Ty); |
4634 | CGF.EmitAggregateCopy(Dest: Copy, Src: LV, EltTy: Ty, MayOverlap: AggValueSlot::DoesNotOverlap, |
4635 | isVolatile: LV.isVolatile()); |
4636 | IsUsed = true; |
4637 | return RValue::getAggregate(addr: Copy.getAddress()); |
4638 | } |
4639 | |
4640 | void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const { |
4641 | LValue Dst = CGF.MakeAddrLValue(Addr, T: Ty); |
4642 | if (!HasLV && RV.isScalar()) |
4643 | CGF.EmitStoreOfScalar(value: RV.getScalarVal(), lvalue: Dst, /*isInit=*/true); |
4644 | else if (!HasLV && RV.isComplex()) |
4645 | CGF.EmitStoreOfComplex(V: RV.getComplexVal(), dest: Dst, /*init=*/isInit: true); |
4646 | else { |
4647 | auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress(); |
4648 | LValue SrcLV = CGF.MakeAddrLValue(Addr, T: Ty); |
4649 | // We assume that call args are never copied into subobjects. |
4650 | CGF.EmitAggregateCopy(Dest: Dst, Src: SrcLV, EltTy: Ty, MayOverlap: AggValueSlot::DoesNotOverlap, |
4651 | isVolatile: HasLV ? LV.isVolatileQualified() |
4652 | : RV.isVolatileQualified()); |
4653 | } |
4654 | IsUsed = true; |
4655 | } |
4656 | |
4657 | void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, |
4658 | QualType type) { |
4659 | DisableDebugLocationUpdates Dis(*this, E); |
4660 | if (const ObjCIndirectCopyRestoreExpr *CRE |
4661 | = dyn_cast<ObjCIndirectCopyRestoreExpr>(Val: E)) { |
4662 | assert(getLangOpts().ObjCAutoRefCount); |
4663 | return emitWritebackArg(CGF&: *this, args, CRE); |
4664 | } |
4665 | |
4666 | assert(type->isReferenceType() == E->isGLValue() && |
4667 | "reference binding to unmaterialized r-value!" ); |
4668 | |
4669 | if (E->isGLValue()) { |
4670 | assert(E->getObjectKind() == OK_Ordinary); |
4671 | return args.add(rvalue: EmitReferenceBindingToExpr(E), type); |
4672 | } |
4673 | |
4674 | bool HasAggregateEvalKind = hasAggregateEvaluationKind(T: type); |
4675 | |
4676 | // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. |
4677 | // However, we still have to push an EH-only cleanup in case we unwind before |
4678 | // we make it to the call. |
4679 | if (type->isRecordType() && |
4680 | type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) { |
4681 | // If we're using inalloca, use the argument memory. Otherwise, use a |
4682 | // temporary. |
4683 | AggValueSlot Slot = args.isUsingInAlloca() |
4684 | ? createPlaceholderSlot(CGF&: *this, Ty: type) : CreateAggTemp(T: type, Name: "agg.tmp" ); |
4685 | |
4686 | bool DestroyedInCallee = true, NeedsCleanup = true; |
4687 | if (const auto *RD = type->getAsCXXRecordDecl()) |
4688 | DestroyedInCallee = RD->hasNonTrivialDestructor(); |
4689 | else |
4690 | NeedsCleanup = type.isDestructedType(); |
4691 | |
4692 | if (DestroyedInCallee) |
4693 | Slot.setExternallyDestructed(); |
4694 | |
4695 | EmitAggExpr(E, AS: Slot); |
4696 | RValue RV = Slot.asRValue(); |
4697 | args.add(rvalue: RV, type); |
4698 | |
4699 | if (DestroyedInCallee && NeedsCleanup) { |
4700 | // Create a no-op GEP between the placeholder and the cleanup so we can |
4701 | // RAUW it successfully. It also serves as a marker of the first |
4702 | // instruction where the cleanup is active. |
4703 | pushFullExprCleanup<DestroyUnpassedArg>(kind: NormalAndEHCleanup, |
4704 | A: Slot.getAddress(), A: type); |
4705 | // This unreachable is a temporary marker which will be removed later. |
4706 | llvm::Instruction *IsActive = |
4707 | Builder.CreateFlagLoad(Addr: llvm::Constant::getNullValue(Ty: Int8PtrTy)); |
4708 | args.addArgCleanupDeactivation(Cleanup: EHStack.stable_begin(), IsActiveIP: IsActive); |
4709 | } |
4710 | return; |
4711 | } |
4712 | |
4713 | if (HasAggregateEvalKind && isa<ImplicitCastExpr>(Val: E) && |
4714 | cast<CastExpr>(Val: E)->getCastKind() == CK_LValueToRValue && |
4715 | !type->isArrayParameterType()) { |
4716 | LValue L = EmitLValue(E: cast<CastExpr>(Val: E)->getSubExpr()); |
4717 | assert(L.isSimple()); |
4718 | args.addUncopiedAggregate(LV: L, type); |
4719 | return; |
4720 | } |
4721 | |
4722 | args.add(rvalue: EmitAnyExprToTemp(E), type); |
4723 | } |
4724 | |
4725 | QualType CodeGenFunction::getVarArgType(const Expr *Arg) { |
4726 | // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC |
4727 | // implicitly widens null pointer constants that are arguments to varargs |
4728 | // functions to pointer-sized ints. |
4729 | if (!getTarget().getTriple().isOSWindows()) |
4730 | return Arg->getType(); |
4731 | |
4732 | if (Arg->getType()->isIntegerType() && |
4733 | getContext().getTypeSize(T: Arg->getType()) < |
4734 | getContext().getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default) && |
4735 | Arg->isNullPointerConstant(Ctx&: getContext(), |
4736 | NPC: Expr::NPC_ValueDependentIsNotNull)) { |
4737 | return getContext().getIntPtrType(); |
4738 | } |
4739 | |
4740 | return Arg->getType(); |
4741 | } |
4742 | |
4743 | // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC |
4744 | // optimizer it can aggressively ignore unwind edges. |
4745 | void |
4746 | CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { |
4747 | if (CGM.getCodeGenOpts().OptimizationLevel != 0 && |
4748 | !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) |
4749 | Inst->setMetadata(Kind: "clang.arc.no_objc_arc_exceptions" , |
4750 | Node: CGM.getNoObjCARCExceptionsMetadata()); |
4751 | } |
4752 | |
4753 | /// Emits a call to the given no-arguments nounwind runtime function. |
4754 | llvm::CallInst * |
4755 | CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, |
4756 | const llvm::Twine &name) { |
4757 | return EmitNounwindRuntimeCall(callee, args: ArrayRef<llvm::Value *>(), name); |
4758 | } |
4759 | |
4760 | /// Emits a call to the given nounwind runtime function. |
4761 | llvm::CallInst * |
4762 | CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, |
4763 | ArrayRef<Address> args, |
4764 | const llvm::Twine &name) { |
4765 | SmallVector<llvm::Value *, 3> values; |
4766 | for (auto arg : args) |
4767 | values.push_back(Elt: arg.emitRawPointer(CGF&: *this)); |
4768 | return EmitNounwindRuntimeCall(callee, args: values, name); |
4769 | } |
4770 | |
4771 | llvm::CallInst * |
4772 | CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, |
4773 | ArrayRef<llvm::Value *> args, |
4774 | const llvm::Twine &name) { |
4775 | llvm::CallInst *call = EmitRuntimeCall(callee, args, name); |
4776 | call->setDoesNotThrow(); |
4777 | return call; |
4778 | } |
4779 | |
4780 | /// Emits a simple call (never an invoke) to the given no-arguments |
4781 | /// runtime function. |
4782 | llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, |
4783 | const llvm::Twine &name) { |
4784 | return EmitRuntimeCall(callee, args: std::nullopt, name); |
4785 | } |
4786 | |
4787 | // Calls which may throw must have operand bundles indicating which funclet |
4788 | // they are nested within. |
4789 | SmallVector<llvm::OperandBundleDef, 1> |
4790 | CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) { |
4791 | // There is no need for a funclet operand bundle if we aren't inside a |
4792 | // funclet. |
4793 | if (!CurrentFuncletPad) |
4794 | return (SmallVector<llvm::OperandBundleDef, 1>()); |
4795 | |
4796 | // Skip intrinsics which cannot throw (as long as they don't lower into |
4797 | // regular function calls in the course of IR transformations). |
4798 | if (auto *CalleeFn = dyn_cast<llvm::Function>(Val: Callee->stripPointerCasts())) { |
4799 | if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) { |
4800 | auto IID = CalleeFn->getIntrinsicID(); |
4801 | if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID)) |
4802 | return (SmallVector<llvm::OperandBundleDef, 1>()); |
4803 | } |
4804 | } |
4805 | |
4806 | SmallVector<llvm::OperandBundleDef, 1> BundleList; |
4807 | BundleList.emplace_back(Args: "funclet" , Args&: CurrentFuncletPad); |
4808 | return BundleList; |
4809 | } |
4810 | |
4811 | /// Emits a simple call (never an invoke) to the given runtime function. |
4812 | llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, |
4813 | ArrayRef<llvm::Value *> args, |
4814 | const llvm::Twine &name) { |
4815 | llvm::CallInst *call = Builder.CreateCall( |
4816 | Callee: callee, Args: args, OpBundles: getBundlesForFunclet(Callee: callee.getCallee()), Name: name); |
4817 | call->setCallingConv(getRuntimeCC()); |
4818 | |
4819 | if (CGM.shouldEmitConvergenceTokens() && call->isConvergent()) |
4820 | return addControlledConvergenceToken(Input: call); |
4821 | return call; |
4822 | } |
4823 | |
4824 | /// Emits a call or invoke to the given noreturn runtime function. |
4825 | void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke( |
4826 | llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) { |
4827 | SmallVector<llvm::OperandBundleDef, 1> BundleList = |
4828 | getBundlesForFunclet(Callee: callee.getCallee()); |
4829 | |
4830 | if (getInvokeDest()) { |
4831 | llvm::InvokeInst *invoke = |
4832 | Builder.CreateInvoke(Callee: callee, |
4833 | NormalDest: getUnreachableBlock(), |
4834 | UnwindDest: getInvokeDest(), |
4835 | Args: args, |
4836 | OpBundles: BundleList); |
4837 | invoke->setDoesNotReturn(); |
4838 | invoke->setCallingConv(getRuntimeCC()); |
4839 | } else { |
4840 | llvm::CallInst *call = Builder.CreateCall(Callee: callee, Args: args, OpBundles: BundleList); |
4841 | call->setDoesNotReturn(); |
4842 | call->setCallingConv(getRuntimeCC()); |
4843 | Builder.CreateUnreachable(); |
4844 | } |
4845 | } |
4846 | |
4847 | /// Emits a call or invoke instruction to the given nullary runtime function. |
4848 | llvm::CallBase * |
4849 | CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, |
4850 | const Twine &name) { |
4851 | return EmitRuntimeCallOrInvoke(callee, args: std::nullopt, name); |
4852 | } |
4853 | |
4854 | /// Emits a call or invoke instruction to the given runtime function. |
4855 | llvm::CallBase * |
4856 | CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, |
4857 | ArrayRef<llvm::Value *> args, |
4858 | const Twine &name) { |
4859 | llvm::CallBase *call = EmitCallOrInvoke(Callee: callee, Args: args, Name: name); |
4860 | call->setCallingConv(getRuntimeCC()); |
4861 | return call; |
4862 | } |
4863 | |
4864 | /// Emits a call or invoke instruction to the given function, depending |
4865 | /// on the current state of the EH stack. |
4866 | llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee, |
4867 | ArrayRef<llvm::Value *> Args, |
4868 | const Twine &Name) { |
4869 | llvm::BasicBlock *InvokeDest = getInvokeDest(); |
4870 | SmallVector<llvm::OperandBundleDef, 1> BundleList = |
4871 | getBundlesForFunclet(Callee: Callee.getCallee()); |
4872 | |
4873 | llvm::CallBase *Inst; |
4874 | if (!InvokeDest) |
4875 | Inst = Builder.CreateCall(Callee, Args, OpBundles: BundleList, Name); |
4876 | else { |
4877 | llvm::BasicBlock *ContBB = createBasicBlock(name: "invoke.cont" ); |
4878 | Inst = Builder.CreateInvoke(Callee, NormalDest: ContBB, UnwindDest: InvokeDest, Args, OpBundles: BundleList, |
4879 | Name); |
4880 | EmitBlock(BB: ContBB); |
4881 | } |
4882 | |
4883 | // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC |
4884 | // optimizer it can aggressively ignore unwind edges. |
4885 | if (CGM.getLangOpts().ObjCAutoRefCount) |
4886 | AddObjCARCExceptionMetadata(Inst); |
4887 | |
4888 | return Inst; |
4889 | } |
4890 | |
4891 | void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, |
4892 | llvm::Value *New) { |
4893 | DeferredReplacements.push_back( |
4894 | Elt: std::make_pair(x: llvm::WeakTrackingVH(Old), y&: New)); |
4895 | } |
4896 | |
4897 | namespace { |
4898 | |
4899 | /// Specify given \p NewAlign as the alignment of return value attribute. If |
4900 | /// such attribute already exists, re-set it to the maximal one of two options. |
4901 | [[nodiscard]] llvm::AttributeList |
4902 | maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx, |
4903 | const llvm::AttributeList &Attrs, |
4904 | llvm::Align NewAlign) { |
4905 | llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne(); |
4906 | if (CurAlign >= NewAlign) |
4907 | return Attrs; |
4908 | llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Context&: Ctx, Alignment: NewAlign); |
4909 | return Attrs.removeRetAttribute(C&: Ctx, Kind: llvm::Attribute::AttrKind::Alignment) |
4910 | .addRetAttribute(C&: Ctx, Attr: AlignAttr); |
4911 | } |
4912 | |
4913 | template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter { |
4914 | protected: |
4915 | CodeGenFunction &CGF; |
4916 | |
4917 | /// We do nothing if this is, or becomes, nullptr. |
4918 | const AlignedAttrTy *AA = nullptr; |
4919 | |
4920 | llvm::Value *Alignment = nullptr; // May or may not be a constant. |
4921 | llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero. |
4922 | |
4923 | AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) |
4924 | : CGF(CGF_) { |
4925 | if (!FuncDecl) |
4926 | return; |
4927 | AA = FuncDecl->getAttr<AlignedAttrTy>(); |
4928 | } |
4929 | |
4930 | public: |
4931 | /// If we can, materialize the alignment as an attribute on return value. |
4932 | [[nodiscard]] llvm::AttributeList |
4933 | TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) { |
4934 | if (!AA || OffsetCI || CGF.SanOpts.has(K: SanitizerKind::Alignment)) |
4935 | return Attrs; |
4936 | const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Val: Alignment); |
4937 | if (!AlignmentCI) |
4938 | return Attrs; |
4939 | // We may legitimately have non-power-of-2 alignment here. |
4940 | // If so, this is UB land, emit it via `@llvm.assume` instead. |
4941 | if (!AlignmentCI->getValue().isPowerOf2()) |
4942 | return Attrs; |
4943 | llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute( |
4944 | Ctx&: CGF.getLLVMContext(), Attrs, |
4945 | NewAlign: llvm::Align( |
4946 | AlignmentCI->getLimitedValue(Limit: llvm::Value::MaximumAlignment))); |
4947 | AA = nullptr; // We're done. Disallow doing anything else. |
4948 | return NewAttrs; |
4949 | } |
4950 | |
4951 | /// Emit alignment assumption. |
4952 | /// This is a general fallback that we take if either there is an offset, |
4953 | /// or the alignment is variable or we are sanitizing for alignment. |
4954 | void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) { |
4955 | if (!AA) |
4956 | return; |
4957 | CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, |
4958 | AA->getLocation(), Alignment, OffsetCI); |
4959 | AA = nullptr; // We're done. Disallow doing anything else. |
4960 | } |
4961 | }; |
4962 | |
4963 | /// Helper data structure to emit `AssumeAlignedAttr`. |
4964 | class AssumeAlignedAttrEmitter final |
4965 | : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> { |
4966 | public: |
4967 | AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) |
4968 | : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { |
4969 | if (!AA) |
4970 | return; |
4971 | // It is guaranteed that the alignment/offset are constants. |
4972 | Alignment = cast<llvm::ConstantInt>(Val: CGF.EmitScalarExpr(E: AA->getAlignment())); |
4973 | if (Expr *Offset = AA->getOffset()) { |
4974 | OffsetCI = cast<llvm::ConstantInt>(Val: CGF.EmitScalarExpr(E: Offset)); |
4975 | if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset. |
4976 | OffsetCI = nullptr; |
4977 | } |
4978 | } |
4979 | }; |
4980 | |
4981 | /// Helper data structure to emit `AllocAlignAttr`. |
4982 | class AllocAlignAttrEmitter final |
4983 | : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> { |
4984 | public: |
4985 | AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl, |
4986 | const CallArgList &CallArgs) |
4987 | : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { |
4988 | if (!AA) |
4989 | return; |
4990 | // Alignment may or may not be a constant, and that is okay. |
4991 | Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()] |
4992 | .getRValue(CGF) |
4993 | .getScalarVal(); |
4994 | } |
4995 | }; |
4996 | |
4997 | } // namespace |
4998 | |
4999 | static unsigned getMaxVectorWidth(const llvm::Type *Ty) { |
5000 | if (auto *VT = dyn_cast<llvm::VectorType>(Val: Ty)) |
5001 | return VT->getPrimitiveSizeInBits().getKnownMinValue(); |
5002 | if (auto *AT = dyn_cast<llvm::ArrayType>(Val: Ty)) |
5003 | return getMaxVectorWidth(Ty: AT->getElementType()); |
5004 | |
5005 | unsigned MaxVectorWidth = 0; |
5006 | if (auto *ST = dyn_cast<llvm::StructType>(Val: Ty)) |
5007 | for (auto *I : ST->elements()) |
5008 | MaxVectorWidth = std::max(a: MaxVectorWidth, b: getMaxVectorWidth(Ty: I)); |
5009 | return MaxVectorWidth; |
5010 | } |
5011 | |
5012 | RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, |
5013 | const CGCallee &Callee, |
5014 | ReturnValueSlot ReturnValue, |
5015 | const CallArgList &CallArgs, |
5016 | llvm::CallBase **callOrInvoke, bool IsMustTail, |
5017 | SourceLocation Loc, |
5018 | bool IsVirtualFunctionPointerThunk) { |
5019 | // FIXME: We no longer need the types from CallArgs; lift up and simplify. |
5020 | |
5021 | assert(Callee.isOrdinary() || Callee.isVirtual()); |
5022 | |
5023 | // Handle struct-return functions by passing a pointer to the |
5024 | // location that we would like to return into. |
5025 | QualType RetTy = CallInfo.getReturnType(); |
5026 | const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); |
5027 | |
5028 | llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(FI: CallInfo); |
5029 | |
5030 | const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl(); |
5031 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Val: TargetDecl)) { |
5032 | // We can only guarantee that a function is called from the correct |
5033 | // context/function based on the appropriate target attributes, |
5034 | // so only check in the case where we have both always_inline and target |
5035 | // since otherwise we could be making a conditional call after a check for |
5036 | // the proper cpu features (and it won't cause code generation issues due to |
5037 | // function based code generation). |
5038 | if (TargetDecl->hasAttr<AlwaysInlineAttr>() && |
5039 | (TargetDecl->hasAttr<TargetAttr>() || |
5040 | (CurFuncDecl && CurFuncDecl->hasAttr<TargetAttr>()))) |
5041 | checkTargetFeatures(Loc, TargetDecl: FD); |
5042 | } |
5043 | |
5044 | // Some architectures (such as x86-64) have the ABI changed based on |
5045 | // attribute-target/features. Give them a chance to diagnose. |
5046 | CGM.getTargetCodeGenInfo().checkFunctionCallABI( |
5047 | CGM, CallLoc: Loc, Caller: dyn_cast_or_null<FunctionDecl>(Val: CurCodeDecl), |
5048 | Callee: dyn_cast_or_null<FunctionDecl>(Val: TargetDecl), Args: CallArgs, ReturnType: RetTy); |
5049 | |
5050 | // 1. Set up the arguments. |
5051 | |
5052 | // If we're using inalloca, insert the allocation after the stack save. |
5053 | // FIXME: Do this earlier rather than hacking it in here! |
5054 | RawAddress ArgMemory = RawAddress::invalid(); |
5055 | if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { |
5056 | const llvm::DataLayout &DL = CGM.getDataLayout(); |
5057 | llvm::Instruction *IP = CallArgs.getStackBase(); |
5058 | llvm::AllocaInst *AI; |
5059 | if (IP) { |
5060 | IP = IP->getNextNode(); |
5061 | AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(), |
5062 | "argmem" , IP); |
5063 | } else { |
5064 | AI = CreateTempAlloca(Ty: ArgStruct, Name: "argmem" ); |
5065 | } |
5066 | auto Align = CallInfo.getArgStructAlignment(); |
5067 | AI->setAlignment(Align.getAsAlign()); |
5068 | AI->setUsedWithInAlloca(true); |
5069 | assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); |
5070 | ArgMemory = RawAddress(AI, ArgStruct, Align); |
5071 | } |
5072 | |
5073 | ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); |
5074 | SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); |
5075 | |
5076 | // If the call returns a temporary with struct return, create a temporary |
5077 | // alloca to hold the result, unless one is given to us. |
5078 | Address SRetPtr = Address::invalid(); |
5079 | RawAddress SRetAlloca = RawAddress::invalid(); |
5080 | llvm::Value *UnusedReturnSizePtr = nullptr; |
5081 | if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) { |
5082 | if (IsVirtualFunctionPointerThunk && RetAI.isIndirect()) { |
5083 | SRetPtr = makeNaturalAddressForPointer(Ptr: CurFn->arg_begin() + |
5084 | IRFunctionArgs.getSRetArgNo(), |
5085 | T: RetTy, Alignment: CharUnits::fromQuantity(Quantity: 1)); |
5086 | } else if (!ReturnValue.isNull()) { |
5087 | SRetPtr = ReturnValue.getAddress(); |
5088 | } else { |
5089 | SRetPtr = CreateMemTemp(T: RetTy, Name: "tmp" , Alloca: &SRetAlloca); |
5090 | if (HaveInsertPoint() && ReturnValue.isUnused()) { |
5091 | llvm::TypeSize size = |
5092 | CGM.getDataLayout().getTypeAllocSize(Ty: ConvertTypeForMem(T: RetTy)); |
5093 | UnusedReturnSizePtr = EmitLifetimeStart(Size: size, Addr: SRetAlloca.getPointer()); |
5094 | } |
5095 | } |
5096 | if (IRFunctionArgs.hasSRetArg()) { |
5097 | IRCallArgs[IRFunctionArgs.getSRetArgNo()] = |
5098 | getAsNaturalPointerTo(Addr: SRetPtr, PointeeType: RetTy); |
5099 | } else if (RetAI.isInAlloca()) { |
5100 | Address Addr = |
5101 | Builder.CreateStructGEP(Addr: ArgMemory, Index: RetAI.getInAllocaFieldIndex()); |
5102 | Builder.CreateStore(Val: getAsNaturalPointerTo(Addr: SRetPtr, PointeeType: RetTy), Addr); |
5103 | } |
5104 | } |
5105 | |
5106 | RawAddress swiftErrorTemp = RawAddress::invalid(); |
5107 | Address swiftErrorArg = Address::invalid(); |
5108 | |
5109 | // When passing arguments using temporary allocas, we need to add the |
5110 | // appropriate lifetime markers. This vector keeps track of all the lifetime |
5111 | // markers that need to be ended right after the call. |
5112 | SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall; |
5113 | |
5114 | // Translate all of the arguments as necessary to match the IR lowering. |
5115 | assert(CallInfo.arg_size() == CallArgs.size() && |
5116 | "Mismatch between function signature & arguments." ); |
5117 | unsigned ArgNo = 0; |
5118 | CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); |
5119 | for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); |
5120 | I != E; ++I, ++info_it, ++ArgNo) { |
5121 | const ABIArgInfo &ArgInfo = info_it->info; |
5122 | |
5123 | // Insert a padding argument to ensure proper alignment. |
5124 | if (IRFunctionArgs.hasPaddingArg(ArgNo)) |
5125 | IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = |
5126 | llvm::UndefValue::get(T: ArgInfo.getPaddingType()); |
5127 | |
5128 | unsigned FirstIRArg, NumIRArgs; |
5129 | std::tie(args&: FirstIRArg, args&: NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); |
5130 | |
5131 | bool ArgHasMaybeUndefAttr = |
5132 | IsArgumentMaybeUndef(TargetDecl, NumRequiredArgs: CallInfo.getNumRequiredArgs(), ArgNo); |
5133 | |
5134 | switch (ArgInfo.getKind()) { |
5135 | case ABIArgInfo::InAlloca: { |
5136 | assert(NumIRArgs == 0); |
5137 | assert(getTarget().getTriple().getArch() == llvm::Triple::x86); |
5138 | if (I->isAggregate()) { |
5139 | RawAddress Addr = I->hasLValue() |
5140 | ? I->getKnownLValue().getAddress() |
5141 | : I->getKnownRValue().getAggregateAddress(); |
5142 | llvm::Instruction *Placeholder = |
5143 | cast<llvm::Instruction>(Val: Addr.getPointer()); |
5144 | |
5145 | if (!ArgInfo.getInAllocaIndirect()) { |
5146 | // Replace the placeholder with the appropriate argument slot GEP. |
5147 | CGBuilderTy::InsertPoint IP = Builder.saveIP(); |
5148 | Builder.SetInsertPoint(Placeholder); |
5149 | Addr = Builder.CreateStructGEP(Addr: ArgMemory, |
5150 | Index: ArgInfo.getInAllocaFieldIndex()); |
5151 | Builder.restoreIP(IP); |
5152 | } else { |
5153 | // For indirect things such as overaligned structs, replace the |
5154 | // placeholder with a regular aggregate temporary alloca. Store the |
5155 | // address of this alloca into the struct. |
5156 | Addr = CreateMemTemp(T: info_it->type, Name: "inalloca.indirect.tmp" ); |
5157 | Address ArgSlot = Builder.CreateStructGEP( |
5158 | Addr: ArgMemory, Index: ArgInfo.getInAllocaFieldIndex()); |
5159 | Builder.CreateStore(Val: Addr.getPointer(), Addr: ArgSlot); |
5160 | } |
5161 | deferPlaceholderReplacement(Old: Placeholder, New: Addr.getPointer()); |
5162 | } else if (ArgInfo.getInAllocaIndirect()) { |
5163 | // Make a temporary alloca and store the address of it into the argument |
5164 | // struct. |
5165 | RawAddress Addr = CreateMemTempWithoutCast( |
5166 | T: I->Ty, Align: getContext().getTypeAlignInChars(T: I->Ty), |
5167 | Name: "indirect-arg-temp" ); |
5168 | I->copyInto(CGF&: *this, Addr); |
5169 | Address ArgSlot = |
5170 | Builder.CreateStructGEP(Addr: ArgMemory, Index: ArgInfo.getInAllocaFieldIndex()); |
5171 | Builder.CreateStore(Val: Addr.getPointer(), Addr: ArgSlot); |
5172 | } else { |
5173 | // Store the RValue into the argument struct. |
5174 | Address Addr = |
5175 | Builder.CreateStructGEP(Addr: ArgMemory, Index: ArgInfo.getInAllocaFieldIndex()); |
5176 | Addr = Addr.withElementType(ElemTy: ConvertTypeForMem(T: I->Ty)); |
5177 | I->copyInto(CGF&: *this, Addr); |
5178 | } |
5179 | break; |
5180 | } |
5181 | |
5182 | case ABIArgInfo::Indirect: |
5183 | case ABIArgInfo::IndirectAliased: { |
5184 | assert(NumIRArgs == 1); |
5185 | if (I->isAggregate()) { |
5186 | // We want to avoid creating an unnecessary temporary+copy here; |
5187 | // however, we need one in three cases: |
5188 | // 1. If the argument is not byval, and we are required to copy the |
5189 | // source. (This case doesn't occur on any common architecture.) |
5190 | // 2. If the argument is byval, RV is not sufficiently aligned, and |
5191 | // we cannot force it to be sufficiently aligned. |
5192 | // 3. If the argument is byval, but RV is not located in default |
5193 | // or alloca address space. |
5194 | Address Addr = I->hasLValue() |
5195 | ? I->getKnownLValue().getAddress() |
5196 | : I->getKnownRValue().getAggregateAddress(); |
5197 | CharUnits Align = ArgInfo.getIndirectAlign(); |
5198 | const llvm::DataLayout *TD = &CGM.getDataLayout(); |
5199 | |
5200 | assert((FirstIRArg >= IRFuncTy->getNumParams() || |
5201 | IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == |
5202 | TD->getAllocaAddrSpace()) && |
5203 | "indirect argument must be in alloca address space" ); |
5204 | |
5205 | bool NeedCopy = false; |
5206 | if (Addr.getAlignment() < Align && |
5207 | llvm::getOrEnforceKnownAlignment(V: Addr.emitRawPointer(CGF&: *this), |
5208 | PrefAlign: Align.getAsAlign(), |
5209 | DL: *TD) < Align.getAsAlign()) { |
5210 | NeedCopy = true; |
5211 | } else if (I->hasLValue()) { |
5212 | auto LV = I->getKnownLValue(); |
5213 | auto AS = LV.getAddressSpace(); |
5214 | |
5215 | bool isByValOrRef = |
5216 | ArgInfo.isIndirectAliased() || ArgInfo.getIndirectByVal(); |
5217 | |
5218 | if (!isByValOrRef || |
5219 | (LV.getAlignment() < getContext().getTypeAlignInChars(T: I->Ty))) { |
5220 | NeedCopy = true; |
5221 | } |
5222 | if (!getLangOpts().OpenCL) { |
5223 | if ((isByValOrRef && |
5224 | (AS != LangAS::Default && |
5225 | AS != CGM.getASTAllocaAddressSpace()))) { |
5226 | NeedCopy = true; |
5227 | } |
5228 | } |
5229 | // For OpenCL even if RV is located in default or alloca address space |
5230 | // we don't want to perform address space cast for it. |
5231 | else if ((isByValOrRef && |
5232 | Addr.getType()->getAddressSpace() != IRFuncTy-> |
5233 | getParamType(i: FirstIRArg)->getPointerAddressSpace())) { |
5234 | NeedCopy = true; |
5235 | } |
5236 | } |
5237 | |
5238 | if (!NeedCopy) { |
5239 | // Skip the extra memcpy call. |
5240 | llvm::Value *V = getAsNaturalPointerTo(Addr, PointeeType: I->Ty); |
5241 | auto *T = llvm::PointerType::get( |
5242 | C&: CGM.getLLVMContext(), AddressSpace: CGM.getDataLayout().getAllocaAddrSpace()); |
5243 | |
5244 | llvm::Value *Val = getTargetHooks().performAddrSpaceCast( |
5245 | CGF&: *this, V, SrcAddr: LangAS::Default, DestAddr: CGM.getASTAllocaAddressSpace(), DestTy: T, |
5246 | IsNonNull: true); |
5247 | if (ArgHasMaybeUndefAttr) |
5248 | Val = Builder.CreateFreeze(V: Val); |
5249 | IRCallArgs[FirstIRArg] = Val; |
5250 | break; |
5251 | } |
5252 | } |
5253 | |
5254 | // For non-aggregate args and aggregate args meeting conditions above |
5255 | // we need to create an aligned temporary, and copy to it. |
5256 | RawAddress AI = CreateMemTempWithoutCast( |
5257 | T: I->Ty, Align: ArgInfo.getIndirectAlign(), Name: "byval-temp" ); |
5258 | llvm::Value *Val = getAsNaturalPointerTo(Addr: AI, PointeeType: I->Ty); |
5259 | if (ArgHasMaybeUndefAttr) |
5260 | Val = Builder.CreateFreeze(V: Val); |
5261 | IRCallArgs[FirstIRArg] = Val; |
5262 | |
5263 | // Emit lifetime markers for the temporary alloca. |
5264 | llvm::TypeSize ByvalTempElementSize = |
5265 | CGM.getDataLayout().getTypeAllocSize(Ty: AI.getElementType()); |
5266 | llvm::Value *LifetimeSize = |
5267 | EmitLifetimeStart(Size: ByvalTempElementSize, Addr: AI.getPointer()); |
5268 | |
5269 | // Add cleanup code to emit the end lifetime marker after the call. |
5270 | if (LifetimeSize) // In case we disabled lifetime markers. |
5271 | CallLifetimeEndAfterCall.emplace_back(Args&: AI, Args&: LifetimeSize); |
5272 | |
5273 | // Generate the copy. |
5274 | I->copyInto(CGF&: *this, Addr: AI); |
5275 | break; |
5276 | } |
5277 | |
5278 | case ABIArgInfo::Ignore: |
5279 | assert(NumIRArgs == 0); |
5280 | break; |
5281 | |
5282 | case ABIArgInfo::Extend: |
5283 | case ABIArgInfo::Direct: { |
5284 | if (!isa<llvm::StructType>(Val: ArgInfo.getCoerceToType()) && |
5285 | ArgInfo.getCoerceToType() == ConvertType(T: info_it->type) && |
5286 | ArgInfo.getDirectOffset() == 0) { |
5287 | assert(NumIRArgs == 1); |
5288 | llvm::Value *V; |
5289 | if (!I->isAggregate()) |
5290 | V = I->getKnownRValue().getScalarVal(); |
5291 | else |
5292 | V = Builder.CreateLoad( |
5293 | Addr: I->hasLValue() ? I->getKnownLValue().getAddress() |
5294 | : I->getKnownRValue().getAggregateAddress()); |
5295 | |
5296 | // Implement swifterror by copying into a new swifterror argument. |
5297 | // We'll write back in the normal path out of the call. |
5298 | if (CallInfo.getExtParameterInfo(argIndex: ArgNo).getABI() |
5299 | == ParameterABI::SwiftErrorResult) { |
5300 | assert(!swiftErrorTemp.isValid() && "multiple swifterror args" ); |
5301 | |
5302 | QualType pointeeTy = I->Ty->getPointeeType(); |
5303 | swiftErrorArg = makeNaturalAddressForPointer( |
5304 | Ptr: V, T: pointeeTy, Alignment: getContext().getTypeAlignInChars(T: pointeeTy)); |
5305 | |
5306 | swiftErrorTemp = |
5307 | CreateMemTemp(T: pointeeTy, Align: getPointerAlign(), Name: "swifterror.temp" ); |
5308 | V = swiftErrorTemp.getPointer(); |
5309 | cast<llvm::AllocaInst>(Val: V)->setSwiftError(true); |
5310 | |
5311 | llvm::Value *errorValue = Builder.CreateLoad(Addr: swiftErrorArg); |
5312 | Builder.CreateStore(Val: errorValue, Addr: swiftErrorTemp); |
5313 | } |
5314 | |
5315 | // We might have to widen integers, but we should never truncate. |
5316 | if (ArgInfo.getCoerceToType() != V->getType() && |
5317 | V->getType()->isIntegerTy()) |
5318 | V = Builder.CreateZExt(V, DestTy: ArgInfo.getCoerceToType()); |
5319 | |
5320 | // If the argument doesn't match, perform a bitcast to coerce it. This |
5321 | // can happen due to trivial type mismatches. |
5322 | if (FirstIRArg < IRFuncTy->getNumParams() && |
5323 | V->getType() != IRFuncTy->getParamType(i: FirstIRArg)) |
5324 | V = Builder.CreateBitCast(V, DestTy: IRFuncTy->getParamType(i: FirstIRArg)); |
5325 | |
5326 | if (ArgHasMaybeUndefAttr) |
5327 | V = Builder.CreateFreeze(V); |
5328 | IRCallArgs[FirstIRArg] = V; |
5329 | break; |
5330 | } |
5331 | |
5332 | llvm::StructType *STy = |
5333 | dyn_cast<llvm::StructType>(Val: ArgInfo.getCoerceToType()); |
5334 | if (STy && ArgInfo.isDirect() && !ArgInfo.getCanBeFlattened()) { |
5335 | llvm::Type *SrcTy = ConvertTypeForMem(T: I->Ty); |
5336 | [[maybe_unused]] llvm::TypeSize SrcTypeSize = |
5337 | CGM.getDataLayout().getTypeAllocSize(Ty: SrcTy); |
5338 | [[maybe_unused]] llvm::TypeSize DstTypeSize = |
5339 | CGM.getDataLayout().getTypeAllocSize(Ty: STy); |
5340 | if (STy->containsHomogeneousScalableVectorTypes()) { |
5341 | assert(SrcTypeSize == DstTypeSize && |
5342 | "Only allow non-fractional movement of structure with " |
5343 | "homogeneous scalable vector type" ); |
5344 | |
5345 | IRCallArgs[FirstIRArg] = I->getKnownRValue().getScalarVal(); |
5346 | break; |
5347 | } |
5348 | } |
5349 | |
5350 | // FIXME: Avoid the conversion through memory if possible. |
5351 | Address Src = Address::invalid(); |
5352 | if (!I->isAggregate()) { |
5353 | Src = CreateMemTemp(T: I->Ty, Name: "coerce" ); |
5354 | I->copyInto(CGF&: *this, Addr: Src); |
5355 | } else { |
5356 | Src = I->hasLValue() ? I->getKnownLValue().getAddress() |
5357 | : I->getKnownRValue().getAggregateAddress(); |
5358 | } |
5359 | |
5360 | // If the value is offset in memory, apply the offset now. |
5361 | Src = emitAddressAtOffset(CGF&: *this, addr: Src, info: ArgInfo); |
5362 | |
5363 | // Fast-isel and the optimizer generally like scalar values better than |
5364 | // FCAs, so we flatten them if this is safe to do for this argument. |
5365 | if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { |
5366 | llvm::Type *SrcTy = Src.getElementType(); |
5367 | llvm::TypeSize SrcTypeSize = |
5368 | CGM.getDataLayout().getTypeAllocSize(Ty: SrcTy); |
5369 | llvm::TypeSize DstTypeSize = CGM.getDataLayout().getTypeAllocSize(Ty: STy); |
5370 | if (SrcTypeSize.isScalable()) { |
5371 | assert(STy->containsHomogeneousScalableVectorTypes() && |
5372 | "ABI only supports structure with homogeneous scalable vector " |
5373 | "type" ); |
5374 | assert(SrcTypeSize == DstTypeSize && |
5375 | "Only allow non-fractional movement of structure with " |
5376 | "homogeneous scalable vector type" ); |
5377 | assert(NumIRArgs == STy->getNumElements()); |
5378 | |
5379 | llvm::Value *StoredStructValue = |
5380 | Builder.CreateLoad(Addr: Src, Name: Src.getName() + ".tuple" ); |
5381 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { |
5382 | llvm::Value * = Builder.CreateExtractValue( |
5383 | Agg: StoredStructValue, Idxs: i, Name: Src.getName() + ".extract" + Twine(i)); |
5384 | IRCallArgs[FirstIRArg + i] = Extract; |
5385 | } |
5386 | } else { |
5387 | uint64_t SrcSize = SrcTypeSize.getFixedValue(); |
5388 | uint64_t DstSize = DstTypeSize.getFixedValue(); |
5389 | |
5390 | // If the source type is smaller than the destination type of the |
5391 | // coerce-to logic, copy the source value into a temp alloca the size |
5392 | // of the destination type to allow loading all of it. The bits past |
5393 | // the source value are left undef. |
5394 | if (SrcSize < DstSize) { |
5395 | Address TempAlloca = CreateTempAlloca(Ty: STy, align: Src.getAlignment(), |
5396 | Name: Src.getName() + ".coerce" ); |
5397 | Builder.CreateMemCpy(Dest: TempAlloca, Src, Size: SrcSize); |
5398 | Src = TempAlloca; |
5399 | } else { |
5400 | Src = Src.withElementType(ElemTy: STy); |
5401 | } |
5402 | |
5403 | assert(NumIRArgs == STy->getNumElements()); |
5404 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { |
5405 | Address EltPtr = Builder.CreateStructGEP(Addr: Src, Index: i); |
5406 | llvm::Value *LI = Builder.CreateLoad(Addr: EltPtr); |
5407 | if (ArgHasMaybeUndefAttr) |
5408 | LI = Builder.CreateFreeze(V: LI); |
5409 | IRCallArgs[FirstIRArg + i] = LI; |
5410 | } |
5411 | } |
5412 | } else { |
5413 | // In the simple case, just pass the coerced loaded value. |
5414 | assert(NumIRArgs == 1); |
5415 | llvm::Value *Load = |
5416 | CreateCoercedLoad(Src, Ty: ArgInfo.getCoerceToType(), CGF&: *this); |
5417 | |
5418 | if (CallInfo.isCmseNSCall()) { |
5419 | // For certain parameter types, clear padding bits, as they may reveal |
5420 | // sensitive information. |
5421 | // Small struct/union types are passed as integer arrays. |
5422 | auto *ATy = dyn_cast<llvm::ArrayType>(Val: Load->getType()); |
5423 | if (ATy != nullptr && isa<RecordType>(Val: I->Ty.getCanonicalType())) |
5424 | Load = EmitCMSEClearRecord(Src: Load, ATy, QTy: I->Ty); |
5425 | } |
5426 | |
5427 | if (ArgHasMaybeUndefAttr) |
5428 | Load = Builder.CreateFreeze(V: Load); |
5429 | IRCallArgs[FirstIRArg] = Load; |
5430 | } |
5431 | |
5432 | break; |
5433 | } |
5434 | |
5435 | case ABIArgInfo::CoerceAndExpand: { |
5436 | auto coercionType = ArgInfo.getCoerceAndExpandType(); |
5437 | auto layout = CGM.getDataLayout().getStructLayout(Ty: coercionType); |
5438 | |
5439 | llvm::Value *tempSize = nullptr; |
5440 | Address addr = Address::invalid(); |
5441 | RawAddress AllocaAddr = RawAddress::invalid(); |
5442 | if (I->isAggregate()) { |
5443 | addr = I->hasLValue() ? I->getKnownLValue().getAddress() |
5444 | : I->getKnownRValue().getAggregateAddress(); |
5445 | |
5446 | } else { |
5447 | RValue RV = I->getKnownRValue(); |
5448 | assert(RV.isScalar()); // complex should always just be direct |
5449 | |
5450 | llvm::Type *scalarType = RV.getScalarVal()->getType(); |
5451 | auto scalarSize = CGM.getDataLayout().getTypeAllocSize(Ty: scalarType); |
5452 | auto scalarAlign = CGM.getDataLayout().getPrefTypeAlign(Ty: scalarType); |
5453 | |
5454 | // Materialize to a temporary. |
5455 | addr = CreateTempAlloca( |
5456 | Ty: RV.getScalarVal()->getType(), |
5457 | align: CharUnits::fromQuantity(Quantity: std::max(a: layout->getAlignment(), b: scalarAlign)), |
5458 | Name: "tmp" , |
5459 | /*ArraySize=*/nullptr, Alloca: &AllocaAddr); |
5460 | tempSize = EmitLifetimeStart(Size: scalarSize, Addr: AllocaAddr.getPointer()); |
5461 | |
5462 | Builder.CreateStore(Val: RV.getScalarVal(), Addr: addr); |
5463 | } |
5464 | |
5465 | addr = addr.withElementType(ElemTy: coercionType); |
5466 | |
5467 | unsigned IRArgPos = FirstIRArg; |
5468 | for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { |
5469 | llvm::Type *eltType = coercionType->getElementType(N: i); |
5470 | if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; |
5471 | Address eltAddr = Builder.CreateStructGEP(Addr: addr, Index: i); |
5472 | llvm::Value *elt = Builder.CreateLoad(Addr: eltAddr); |
5473 | if (ArgHasMaybeUndefAttr) |
5474 | elt = Builder.CreateFreeze(V: elt); |
5475 | IRCallArgs[IRArgPos++] = elt; |
5476 | } |
5477 | assert(IRArgPos == FirstIRArg + NumIRArgs); |
5478 | |
5479 | if (tempSize) { |
5480 | EmitLifetimeEnd(Size: tempSize, Addr: AllocaAddr.getPointer()); |
5481 | } |
5482 | |
5483 | break; |
5484 | } |
5485 | |
5486 | case ABIArgInfo::Expand: { |
5487 | unsigned IRArgPos = FirstIRArg; |
5488 | ExpandTypeToArgs(Ty: I->Ty, Arg: *I, IRFuncTy, IRCallArgs, IRCallArgPos&: IRArgPos); |
5489 | assert(IRArgPos == FirstIRArg + NumIRArgs); |
5490 | break; |
5491 | } |
5492 | } |
5493 | } |
5494 | |
5495 | const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(CGF&: *this); |
5496 | llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer(); |
5497 | |
5498 | // If we're using inalloca, set up that argument. |
5499 | if (ArgMemory.isValid()) { |
5500 | llvm::Value *Arg = ArgMemory.getPointer(); |
5501 | assert(IRFunctionArgs.hasInallocaArg()); |
5502 | IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; |
5503 | } |
5504 | |
5505 | // 2. Prepare the function pointer. |
5506 | |
5507 | // If the callee is a bitcast of a non-variadic function to have a |
5508 | // variadic function pointer type, check to see if we can remove the |
5509 | // bitcast. This comes up with unprototyped functions. |
5510 | // |
5511 | // This makes the IR nicer, but more importantly it ensures that we |
5512 | // can inline the function at -O0 if it is marked always_inline. |
5513 | auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT, |
5514 | llvm::Value *Ptr) -> llvm::Function * { |
5515 | if (!CalleeFT->isVarArg()) |
5516 | return nullptr; |
5517 | |
5518 | // Get underlying value if it's a bitcast |
5519 | if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Val: Ptr)) { |
5520 | if (CE->getOpcode() == llvm::Instruction::BitCast) |
5521 | Ptr = CE->getOperand(i_nocapture: 0); |
5522 | } |
5523 | |
5524 | llvm::Function *OrigFn = dyn_cast<llvm::Function>(Val: Ptr); |
5525 | if (!OrigFn) |
5526 | return nullptr; |
5527 | |
5528 | llvm::FunctionType *OrigFT = OrigFn->getFunctionType(); |
5529 | |
5530 | // If the original type is variadic, or if any of the component types |
5531 | // disagree, we cannot remove the cast. |
5532 | if (OrigFT->isVarArg() || |
5533 | OrigFT->getNumParams() != CalleeFT->getNumParams() || |
5534 | OrigFT->getReturnType() != CalleeFT->getReturnType()) |
5535 | return nullptr; |
5536 | |
5537 | for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i) |
5538 | if (OrigFT->getParamType(i) != CalleeFT->getParamType(i)) |
5539 | return nullptr; |
5540 | |
5541 | return OrigFn; |
5542 | }; |
5543 | |
5544 | if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) { |
5545 | CalleePtr = OrigFn; |
5546 | IRFuncTy = OrigFn->getFunctionType(); |
5547 | } |
5548 | |
5549 | // 3. Perform the actual call. |
5550 | |
5551 | // Deactivate any cleanups that we're supposed to do immediately before |
5552 | // the call. |
5553 | if (!CallArgs.getCleanupsToDeactivate().empty()) |
5554 | deactivateArgCleanupsBeforeCall(CGF&: *this, CallArgs); |
5555 | |
5556 | // Assert that the arguments we computed match up. The IR verifier |
5557 | // will catch this, but this is a common enough source of problems |
5558 | // during IRGen changes that it's way better for debugging to catch |
5559 | // it ourselves here. |
5560 | #ifndef NDEBUG |
5561 | assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); |
5562 | for (unsigned i = 0; i < IRCallArgs.size(); ++i) { |
5563 | // Inalloca argument can have different type. |
5564 | if (IRFunctionArgs.hasInallocaArg() && |
5565 | i == IRFunctionArgs.getInallocaArgNo()) |
5566 | continue; |
5567 | if (i < IRFuncTy->getNumParams()) |
5568 | assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); |
5569 | } |
5570 | #endif |
5571 | |
5572 | // Update the largest vector width if any arguments have vector types. |
5573 | for (unsigned i = 0; i < IRCallArgs.size(); ++i) |
5574 | LargestVectorWidth = std::max(a: LargestVectorWidth, |
5575 | b: getMaxVectorWidth(Ty: IRCallArgs[i]->getType())); |
5576 | |
5577 | // Compute the calling convention and attributes. |
5578 | unsigned CallingConv; |
5579 | llvm::AttributeList Attrs; |
5580 | CGM.ConstructAttributeList(Name: CalleePtr->getName(), FI: CallInfo, |
5581 | CalleeInfo: Callee.getAbstractInfo(), AttrList&: Attrs, CallingConv, |
5582 | /*AttrOnCallSite=*/true, |
5583 | /*IsThunk=*/false); |
5584 | |
5585 | if (CallingConv == llvm::CallingConv::X86_VectorCall && |
5586 | getTarget().getTriple().isWindowsArm64EC()) { |
5587 | CGM.Error(loc: Loc, error: "__vectorcall calling convention is not currently " |
5588 | "supported" ); |
5589 | } |
5590 | |
5591 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Val: CurFuncDecl)) { |
5592 | if (FD->hasAttr<StrictFPAttr>()) |
5593 | // All calls within a strictfp function are marked strictfp |
5594 | Attrs = Attrs.addFnAttribute(C&: getLLVMContext(), Kind: llvm::Attribute::StrictFP); |
5595 | |
5596 | // If -ffast-math is enabled and the function is guarded by an |
5597 | // '__attribute__((optnone)) adjust the memory attribute so the BE emits the |
5598 | // library call instead of the intrinsic. |
5599 | if (FD->hasAttr<OptimizeNoneAttr>() && getLangOpts().FastMath) |
5600 | CGM.AdjustMemoryAttribute(Name: CalleePtr->getName(), CalleeInfo: Callee.getAbstractInfo(), |
5601 | Attrs); |
5602 | } |
5603 | // Add call-site nomerge attribute if exists. |
5604 | if (InNoMergeAttributedStmt) |
5605 | Attrs = Attrs.addFnAttribute(C&: getLLVMContext(), Kind: llvm::Attribute::NoMerge); |
5606 | |
5607 | // Add call-site noinline attribute if exists. |
5608 | if (InNoInlineAttributedStmt) |
5609 | Attrs = Attrs.addFnAttribute(C&: getLLVMContext(), Kind: llvm::Attribute::NoInline); |
5610 | |
5611 | // Add call-site always_inline attribute if exists. |
5612 | if (InAlwaysInlineAttributedStmt) |
5613 | Attrs = |
5614 | Attrs.addFnAttribute(C&: getLLVMContext(), Kind: llvm::Attribute::AlwaysInline); |
5615 | |
5616 | // Apply some call-site-specific attributes. |
5617 | // TODO: work this into building the attribute set. |
5618 | |
5619 | // Apply always_inline to all calls within flatten functions. |
5620 | // FIXME: should this really take priority over __try, below? |
5621 | if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && |
5622 | !InNoInlineAttributedStmt && |
5623 | !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) { |
5624 | Attrs = |
5625 | Attrs.addFnAttribute(C&: getLLVMContext(), Kind: llvm::Attribute::AlwaysInline); |
5626 | } |
5627 | |
5628 | // Disable inlining inside SEH __try blocks. |
5629 | if (isSEHTryScope()) { |
5630 | Attrs = Attrs.addFnAttribute(C&: getLLVMContext(), Kind: llvm::Attribute::NoInline); |
5631 | } |
5632 | |
5633 | // Decide whether to use a call or an invoke. |
5634 | bool CannotThrow; |
5635 | if (currentFunctionUsesSEHTry()) { |
5636 | // SEH cares about asynchronous exceptions, so everything can "throw." |
5637 | CannotThrow = false; |
5638 | } else if (isCleanupPadScope() && |
5639 | EHPersonality::get(CGF&: *this).isMSVCXXPersonality()) { |
5640 | // The MSVC++ personality will implicitly terminate the program if an |
5641 | // exception is thrown during a cleanup outside of a try/catch. |
5642 | // We don't need to model anything in IR to get this behavior. |
5643 | CannotThrow = true; |
5644 | } else { |
5645 | // Otherwise, nounwind call sites will never throw. |
5646 | CannotThrow = Attrs.hasFnAttr(Kind: llvm::Attribute::NoUnwind); |
5647 | |
5648 | if (auto *FPtr = dyn_cast<llvm::Function>(Val: CalleePtr)) |
5649 | if (FPtr->hasFnAttribute(Kind: llvm::Attribute::NoUnwind)) |
5650 | CannotThrow = true; |
5651 | } |
5652 | |
5653 | // If we made a temporary, be sure to clean up after ourselves. Note that we |
5654 | // can't depend on being inside of an ExprWithCleanups, so we need to manually |
5655 | // pop this cleanup later on. Being eager about this is OK, since this |
5656 | // temporary is 'invisible' outside of the callee. |
5657 | if (UnusedReturnSizePtr) |
5658 | pushFullExprCleanup<CallLifetimeEnd>(kind: NormalEHLifetimeMarker, A: SRetAlloca, |
5659 | A: UnusedReturnSizePtr); |
5660 | |
5661 | llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest(); |
5662 | |
5663 | SmallVector<llvm::OperandBundleDef, 1> BundleList = |
5664 | getBundlesForFunclet(Callee: CalleePtr); |
5665 | |
5666 | if (SanOpts.has(K: SanitizerKind::KCFI) && |
5667 | !isa_and_nonnull<FunctionDecl>(Val: TargetDecl)) |
5668 | EmitKCFIOperandBundle(Callee: ConcreteCallee, Bundles&: BundleList); |
5669 | |
5670 | // Add the pointer-authentication bundle. |
5671 | EmitPointerAuthOperandBundle(Info: ConcreteCallee.getPointerAuthInfo(), Bundles&: BundleList); |
5672 | |
5673 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Val: CurFuncDecl)) |
5674 | if (FD->hasAttr<StrictFPAttr>()) |
5675 | // All calls within a strictfp function are marked strictfp |
5676 | Attrs = Attrs.addFnAttribute(C&: getLLVMContext(), Kind: llvm::Attribute::StrictFP); |
5677 | |
5678 | AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl); |
5679 | Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); |
5680 | |
5681 | AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs); |
5682 | Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); |
5683 | |
5684 | // Emit the actual call/invoke instruction. |
5685 | llvm::CallBase *CI; |
5686 | if (!InvokeDest) { |
5687 | CI = Builder.CreateCall(FTy: IRFuncTy, Callee: CalleePtr, Args: IRCallArgs, OpBundles: BundleList); |
5688 | } else { |
5689 | llvm::BasicBlock *Cont = createBasicBlock(name: "invoke.cont" ); |
5690 | CI = Builder.CreateInvoke(Ty: IRFuncTy, Callee: CalleePtr, NormalDest: Cont, UnwindDest: InvokeDest, Args: IRCallArgs, |
5691 | OpBundles: BundleList); |
5692 | EmitBlock(BB: Cont); |
5693 | } |
5694 | if (CI->getCalledFunction() && CI->getCalledFunction()->hasName() && |
5695 | CI->getCalledFunction()->getName().starts_with(Prefix: "_Z4sqrt" )) { |
5696 | SetSqrtFPAccuracy(CI); |
5697 | } |
5698 | if (callOrInvoke) |
5699 | *callOrInvoke = CI; |
5700 | |
5701 | // If this is within a function that has the guard(nocf) attribute and is an |
5702 | // indirect call, add the "guard_nocf" attribute to this call to indicate that |
5703 | // Control Flow Guard checks should not be added, even if the call is inlined. |
5704 | if (const auto *FD = dyn_cast_or_null<FunctionDecl>(Val: CurFuncDecl)) { |
5705 | if (const auto *A = FD->getAttr<CFGuardAttr>()) { |
5706 | if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction()) |
5707 | Attrs = Attrs.addFnAttribute(C&: getLLVMContext(), Kind: "guard_nocf" ); |
5708 | } |
5709 | } |
5710 | |
5711 | // Apply the attributes and calling convention. |
5712 | CI->setAttributes(Attrs); |
5713 | CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); |
5714 | |
5715 | // Apply various metadata. |
5716 | |
5717 | if (!CI->getType()->isVoidTy()) |
5718 | CI->setName("call" ); |
5719 | |
5720 | if (CGM.shouldEmitConvergenceTokens() && CI->isConvergent()) |
5721 | CI = addControlledConvergenceToken(Input: CI); |
5722 | |
5723 | // Update largest vector width from the return type. |
5724 | LargestVectorWidth = |
5725 | std::max(a: LargestVectorWidth, b: getMaxVectorWidth(Ty: CI->getType())); |
5726 | |
5727 | // Insert instrumentation or attach profile metadata at indirect call sites. |
5728 | // For more details, see the comment before the definition of |
5729 | // IPVK_IndirectCallTarget in InstrProfData.inc. |
5730 | if (!CI->getCalledFunction()) |
5731 | PGO.valueProfile(Builder, ValueKind: llvm::IPVK_IndirectCallTarget, |
5732 | ValueSite: CI, ValuePtr: CalleePtr); |
5733 | |
5734 | // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC |
5735 | // optimizer it can aggressively ignore unwind edges. |
5736 | if (CGM.getLangOpts().ObjCAutoRefCount) |
5737 | AddObjCARCExceptionMetadata(Inst: CI); |
5738 | |
5739 | // Set tail call kind if necessary. |
5740 | if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(Val: CI)) { |
5741 | if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>()) |
5742 | Call->setTailCallKind(llvm::CallInst::TCK_NoTail); |
5743 | else if (IsMustTail) { |
5744 | if (getTarget().getTriple().isPPC()) { |
5745 | if (getTarget().getTriple().isOSAIX()) |
5746 | CGM.getDiags().Report(Loc, DiagID: diag::err_aix_musttail_unsupported); |
5747 | else if (!getTarget().hasFeature(Feature: "pcrelative-memops" )) { |
5748 | if (getTarget().hasFeature(Feature: "longcall" )) |
5749 | CGM.getDiags().Report(Loc, DiagID: diag::err_ppc_impossible_musttail) << 0; |
5750 | else if (Call->isIndirectCall()) |
5751 | CGM.getDiags().Report(Loc, DiagID: diag::err_ppc_impossible_musttail) << 1; |
5752 | else if (isa_and_nonnull<FunctionDecl>(Val: TargetDecl)) { |
5753 | if (!cast<FunctionDecl>(Val: TargetDecl)->isDefined()) |
5754 | // The undefined callee may be a forward declaration. Without |
5755 | // knowning all symbols in the module, we won't know the symbol is |
5756 | // defined or not. Collect all these symbols for later diagnosing. |
5757 | CGM.addUndefinedGlobalForTailCall( |
5758 | Global: {cast<FunctionDecl>(Val: TargetDecl), Loc}); |
5759 | else { |
5760 | llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage( |
5761 | GD: GlobalDecl(cast<FunctionDecl>(Val: TargetDecl))); |
5762 | if (llvm::GlobalValue::isWeakForLinker(Linkage) || |
5763 | llvm::GlobalValue::isDiscardableIfUnused(Linkage)) |
5764 | CGM.getDiags().Report(Loc, DiagID: diag::err_ppc_impossible_musttail) |
5765 | << 2; |
5766 | } |
5767 | } |
5768 | } |
5769 | } |
5770 | Call->setTailCallKind(llvm::CallInst::TCK_MustTail); |
5771 | } |
5772 | } |
5773 | |
5774 | // Add metadata for calls to MSAllocator functions |
5775 | if (getDebugInfo() && TargetDecl && |
5776 | TargetDecl->hasAttr<MSAllocatorAttr>()) |
5777 | getDebugInfo()->addHeapAllocSiteMetadata(CallSite: CI, AllocatedTy: RetTy->getPointeeType(), Loc); |
5778 | |
5779 | // Add metadata if calling an __attribute__((error(""))) or warning fn. |
5780 | if (TargetDecl && TargetDecl->hasAttr<ErrorAttr>()) { |
5781 | llvm::ConstantInt *Line = |
5782 | llvm::ConstantInt::get(Ty: Int64Ty, V: Loc.getRawEncoding()); |
5783 | llvm::ConstantAsMetadata *MD = llvm::ConstantAsMetadata::get(C: Line); |
5784 | llvm::MDTuple *MDT = llvm::MDNode::get(Context&: getLLVMContext(), MDs: {MD}); |
5785 | CI->setMetadata(Kind: "srcloc" , Node: MDT); |
5786 | } |
5787 | |
5788 | // 4. Finish the call. |
5789 | |
5790 | // If the call doesn't return, finish the basic block and clear the |
5791 | // insertion point; this allows the rest of IRGen to discard |
5792 | // unreachable code. |
5793 | if (CI->doesNotReturn()) { |
5794 | if (UnusedReturnSizePtr) |
5795 | PopCleanupBlock(); |
5796 | |
5797 | // Strip away the noreturn attribute to better diagnose unreachable UB. |
5798 | if (SanOpts.has(K: SanitizerKind::Unreachable)) { |
5799 | // Also remove from function since CallBase::hasFnAttr additionally checks |
5800 | // attributes of the called function. |
5801 | if (auto *F = CI->getCalledFunction()) |
5802 | F->removeFnAttr(Kind: llvm::Attribute::NoReturn); |
5803 | CI->removeFnAttr(Kind: llvm::Attribute::NoReturn); |
5804 | |
5805 | // Avoid incompatibility with ASan which relies on the `noreturn` |
5806 | // attribute to insert handler calls. |
5807 | if (SanOpts.hasOneOf(K: SanitizerKind::Address | |
5808 | SanitizerKind::KernelAddress)) { |
5809 | SanitizerScope SanScope(this); |
5810 | llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder); |
5811 | Builder.SetInsertPoint(CI); |
5812 | auto *FnType = llvm::FunctionType::get(Result: CGM.VoidTy, /*isVarArg=*/false); |
5813 | llvm::FunctionCallee Fn = |
5814 | CGM.CreateRuntimeFunction(Ty: FnType, Name: "__asan_handle_no_return" ); |
5815 | EmitNounwindRuntimeCall(callee: Fn); |
5816 | } |
5817 | } |
5818 | |
5819 | EmitUnreachable(Loc); |
5820 | Builder.ClearInsertionPoint(); |
5821 | |
5822 | // FIXME: For now, emit a dummy basic block because expr emitters in |
5823 | // generally are not ready to handle emitting expressions at unreachable |
5824 | // points. |
5825 | EnsureInsertPoint(); |
5826 | |
5827 | // Return a reasonable RValue. |
5828 | return GetUndefRValue(Ty: RetTy); |
5829 | } |
5830 | |
5831 | // If this is a musttail call, return immediately. We do not branch to the |
5832 | // epilogue in this case. |
5833 | if (IsMustTail) { |
5834 | for (auto it = EHStack.find(sp: CurrentCleanupScopeDepth); it != EHStack.end(); |
5835 | ++it) { |
5836 | EHCleanupScope *Cleanup = dyn_cast<EHCleanupScope>(Val: &*it); |
5837 | if (!(Cleanup && Cleanup->getCleanup()->isRedundantBeforeReturn())) |
5838 | CGM.ErrorUnsupported(S: MustTailCall, Type: "tail call skipping over cleanups" ); |
5839 | } |
5840 | if (CI->getType()->isVoidTy()) |
5841 | Builder.CreateRetVoid(); |
5842 | else |
5843 | Builder.CreateRet(V: CI); |
5844 | Builder.ClearInsertionPoint(); |
5845 | EnsureInsertPoint(); |
5846 | return GetUndefRValue(Ty: RetTy); |
5847 | } |
5848 | |
5849 | // Perform the swifterror writeback. |
5850 | if (swiftErrorTemp.isValid()) { |
5851 | llvm::Value *errorResult = Builder.CreateLoad(Addr: swiftErrorTemp); |
5852 | Builder.CreateStore(Val: errorResult, Addr: swiftErrorArg); |
5853 | } |
5854 | |
5855 | // Emit any call-associated writebacks immediately. Arguably this |
5856 | // should happen after any return-value munging. |
5857 | if (CallArgs.hasWritebacks()) |
5858 | emitWritebacks(CGF&: *this, args: CallArgs); |
5859 | |
5860 | // The stack cleanup for inalloca arguments has to run out of the normal |
5861 | // lexical order, so deactivate it and run it manually here. |
5862 | CallArgs.freeArgumentMemory(CGF&: *this); |
5863 | |
5864 | // Extract the return value. |
5865 | RValue Ret; |
5866 | |
5867 | // If the current function is a virtual function pointer thunk, avoid copying |
5868 | // the return value of the musttail call to a temporary. |
5869 | if (IsVirtualFunctionPointerThunk) { |
5870 | Ret = RValue::get(V: CI); |
5871 | } else { |
5872 | Ret = [&] { |
5873 | switch (RetAI.getKind()) { |
5874 | case ABIArgInfo::CoerceAndExpand: { |
5875 | auto coercionType = RetAI.getCoerceAndExpandType(); |
5876 | |
5877 | Address addr = SRetPtr.withElementType(ElemTy: coercionType); |
5878 | |
5879 | assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()); |
5880 | bool = isa<llvm::StructType>(Val: CI->getType()); |
5881 | |
5882 | unsigned unpaddedIndex = 0; |
5883 | for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { |
5884 | llvm::Type *eltType = coercionType->getElementType(N: i); |
5885 | if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) |
5886 | continue; |
5887 | Address eltAddr = Builder.CreateStructGEP(Addr: addr, Index: i); |
5888 | llvm::Value *elt = CI; |
5889 | if (requiresExtract) |
5890 | elt = Builder.CreateExtractValue(Agg: elt, Idxs: unpaddedIndex++); |
5891 | else |
5892 | assert(unpaddedIndex == 0); |
5893 | Builder.CreateStore(Val: elt, Addr: eltAddr); |
5894 | } |
5895 | [[fallthrough]]; |
5896 | } |
5897 | |
5898 | case ABIArgInfo::InAlloca: |
5899 | case ABIArgInfo::Indirect: { |
5900 | RValue ret = convertTempToRValue(addr: SRetPtr, type: RetTy, Loc: SourceLocation()); |
5901 | if (UnusedReturnSizePtr) |
5902 | PopCleanupBlock(); |
5903 | return ret; |
5904 | } |
5905 | |
5906 | case ABIArgInfo::Ignore: |
5907 | // If we are ignoring an argument that had a result, make sure to |
5908 | // construct the appropriate return value for our caller. |
5909 | return GetUndefRValue(Ty: RetTy); |
5910 | |
5911 | case ABIArgInfo::Extend: |
5912 | case ABIArgInfo::Direct: { |
5913 | llvm::Type *RetIRTy = ConvertType(T: RetTy); |
5914 | if (RetAI.getCoerceToType() == RetIRTy && |
5915 | RetAI.getDirectOffset() == 0) { |
5916 | switch (getEvaluationKind(T: RetTy)) { |
5917 | case TEK_Complex: { |
5918 | llvm::Value *Real = Builder.CreateExtractValue(Agg: CI, Idxs: 0); |
5919 | llvm::Value *Imag = Builder.CreateExtractValue(Agg: CI, Idxs: 1); |
5920 | return RValue::getComplex(C: std::make_pair(x&: Real, y&: Imag)); |
5921 | } |
5922 | case TEK_Aggregate: |
5923 | break; |
5924 | case TEK_Scalar: { |
5925 | // If the argument doesn't match, perform a bitcast to coerce it. |
5926 | // This can happen due to trivial type mismatches. |
5927 | llvm::Value *V = CI; |
5928 | if (V->getType() != RetIRTy) |
5929 | V = Builder.CreateBitCast(V, DestTy: RetIRTy); |
5930 | return RValue::get(V); |
5931 | } |
5932 | } |
5933 | } |
5934 | |
5935 | // If coercing a fixed vector from a scalable vector for ABI |
5936 | // compatibility, and the types match, use the llvm.vector.extract |
5937 | // intrinsic to perform the conversion. |
5938 | if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(Val: RetIRTy)) { |
5939 | llvm::Value *V = CI; |
5940 | if (auto *ScalableSrcTy = |
5941 | dyn_cast<llvm::ScalableVectorType>(Val: V->getType())) { |
5942 | if (FixedDstTy->getElementType() == |
5943 | ScalableSrcTy->getElementType()) { |
5944 | llvm::Value *Zero = llvm::Constant::getNullValue(Ty: CGM.Int64Ty); |
5945 | V = Builder.CreateExtractVector(DstType: FixedDstTy, SrcVec: V, Idx: Zero, |
5946 | Name: "cast.fixed" ); |
5947 | return RValue::get(V); |
5948 | } |
5949 | } |
5950 | } |
5951 | |
5952 | Address DestPtr = ReturnValue.getValue(); |
5953 | bool DestIsVolatile = ReturnValue.isVolatile(); |
5954 | uint64_t DestSize = |
5955 | getContext().getTypeInfoDataSizeInChars(T: RetTy).Width.getQuantity(); |
5956 | |
5957 | if (!DestPtr.isValid()) { |
5958 | DestPtr = CreateMemTemp(T: RetTy, Name: "coerce" ); |
5959 | DestIsVolatile = false; |
5960 | DestSize = getContext().getTypeSizeInChars(T: RetTy).getQuantity(); |
5961 | } |
5962 | |
5963 | // An empty record can overlap other data (if declared with |
5964 | // no_unique_address); omit the store for such types - as there is no |
5965 | // actual data to store. |
5966 | if (!isEmptyRecord(Context&: getContext(), T: RetTy, AllowArrays: true)) { |
5967 | // If the value is offset in memory, apply the offset now. |
5968 | Address StorePtr = emitAddressAtOffset(CGF&: *this, addr: DestPtr, info: RetAI); |
5969 | CreateCoercedStore( |
5970 | Src: CI, Dst: StorePtr, |
5971 | DstSize: llvm::TypeSize::getFixed(ExactSize: DestSize - RetAI.getDirectOffset()), |
5972 | DstIsVolatile: DestIsVolatile); |
5973 | } |
5974 | |
5975 | return convertTempToRValue(addr: DestPtr, type: RetTy, Loc: SourceLocation()); |
5976 | } |
5977 | |
5978 | case ABIArgInfo::Expand: |
5979 | case ABIArgInfo::IndirectAliased: |
5980 | llvm_unreachable("Invalid ABI kind for return argument" ); |
5981 | } |
5982 | |
5983 | llvm_unreachable("Unhandled ABIArgInfo::Kind" ); |
5984 | }(); |
5985 | } |
5986 | |
5987 | // Emit the assume_aligned check on the return value. |
5988 | if (Ret.isScalar() && TargetDecl) { |
5989 | AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); |
5990 | AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); |
5991 | } |
5992 | |
5993 | // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though |
5994 | // we can't use the full cleanup mechanism. |
5995 | for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall) |
5996 | LifetimeEnd.Emit(CGF&: *this, /*Flags=*/flags: {}); |
5997 | |
5998 | if (!ReturnValue.isExternallyDestructed() && |
5999 | RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct) |
6000 | pushDestroy(dtorKind: QualType::DK_nontrivial_c_struct, addr: Ret.getAggregateAddress(), |
6001 | type: RetTy); |
6002 | |
6003 | return Ret; |
6004 | } |
6005 | |
6006 | CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const { |
6007 | if (isVirtual()) { |
6008 | const CallExpr *CE = getVirtualCallExpr(); |
6009 | return CGF.CGM.getCXXABI().getVirtualFunctionPointer( |
6010 | CGF, GD: getVirtualMethodDecl(), This: getThisAddress(), Ty: getVirtualFunctionType(), |
6011 | Loc: CE ? CE->getBeginLoc() : SourceLocation()); |
6012 | } |
6013 | |
6014 | return *this; |
6015 | } |
6016 | |
6017 | /* VarArg handling */ |
6018 | |
6019 | RValue CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr, |
6020 | AggValueSlot Slot) { |
6021 | VAListAddr = VE->isMicrosoftABI() ? EmitMSVAListRef(E: VE->getSubExpr()) |
6022 | : EmitVAListRef(E: VE->getSubExpr()); |
6023 | QualType Ty = VE->getType(); |
6024 | if (VE->isMicrosoftABI()) |
6025 | return CGM.getTypes().getABIInfo().EmitMSVAArg(CGF&: *this, VAListAddr, Ty, Slot); |
6026 | return CGM.getTypes().getABIInfo().EmitVAArg(CGF&: *this, VAListAddr, Ty, Slot); |
6027 | } |
6028 | |