1//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// These classes wrap the information about a call or function
10// definition used to handle ABI compliancy.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_CLANG_LIB_CODEGEN_CGCALL_H
15#define LLVM_CLANG_LIB_CODEGEN_CGCALL_H
16
17#include "CGPointerAuthInfo.h"
18#include "CGValue.h"
19#include "EHScopeStack.h"
20#include "clang/AST/ASTFwd.h"
21#include "clang/AST/CanonicalType.h"
22#include "clang/AST/GlobalDecl.h"
23#include "clang/AST/Type.h"
24#include "llvm/ADT/STLForwardCompat.h"
25#include "llvm/IR/Value.h"
26
27namespace llvm {
28class Type;
29class Value;
30} // namespace llvm
31
32namespace clang {
33class Decl;
34class FunctionDecl;
35class TargetOptions;
36class VarDecl;
37
38namespace CodeGen {
39
40/// Abstract information about a function or function prototype.
41class CGCalleeInfo {
42 /// The function prototype of the callee.
43 const FunctionProtoType *CalleeProtoTy;
44 /// The function declaration of the callee.
45 GlobalDecl CalleeDecl;
46
47public:
48 explicit CGCalleeInfo() : CalleeProtoTy(nullptr) {}
49 CGCalleeInfo(const FunctionProtoType *calleeProtoTy, GlobalDecl calleeDecl)
50 : CalleeProtoTy(calleeProtoTy), CalleeDecl(calleeDecl) {}
51 CGCalleeInfo(const FunctionProtoType *calleeProtoTy)
52 : CalleeProtoTy(calleeProtoTy) {}
53 CGCalleeInfo(GlobalDecl calleeDecl)
54 : CalleeProtoTy(nullptr), CalleeDecl(calleeDecl) {}
55
56 const FunctionProtoType *getCalleeFunctionProtoType() const {
57 return CalleeProtoTy;
58 }
59 const GlobalDecl getCalleeDecl() const { return CalleeDecl; }
60};
61
62/// All available information about a concrete callee.
63class CGCallee {
64 enum class SpecialKind : uintptr_t {
65 Invalid,
66 Builtin,
67 PseudoDestructor,
68 Virtual,
69
70 Last = Virtual
71 };
72
73 struct OrdinaryInfoStorage {
74 CGCalleeInfo AbstractInfo;
75 CGPointerAuthInfo PointerAuthInfo;
76 };
77 struct BuiltinInfoStorage {
78 const FunctionDecl *Decl;
79 unsigned ID;
80 };
81 struct PseudoDestructorInfoStorage {
82 const CXXPseudoDestructorExpr *Expr;
83 };
84 struct VirtualInfoStorage {
85 const CallExpr *CE;
86 GlobalDecl MD;
87 Address Addr;
88 llvm::FunctionType *FTy;
89 };
90
91 SpecialKind KindOrFunctionPointer;
92 union {
93 OrdinaryInfoStorage OrdinaryInfo;
94 BuiltinInfoStorage BuiltinInfo;
95 PseudoDestructorInfoStorage PseudoDestructorInfo;
96 VirtualInfoStorage VirtualInfo;
97 };
98
99 explicit CGCallee(SpecialKind kind) : KindOrFunctionPointer(kind) {}
100
101 CGCallee(const FunctionDecl *builtinDecl, unsigned builtinID)
102 : KindOrFunctionPointer(SpecialKind::Builtin) {
103 BuiltinInfo.Decl = builtinDecl;
104 BuiltinInfo.ID = builtinID;
105 }
106
107public:
108 CGCallee() : KindOrFunctionPointer(SpecialKind::Invalid) {}
109
110 /// Construct a callee. Call this constructor directly when this
111 /// isn't a direct call.
112 CGCallee(const CGCalleeInfo &abstractInfo, llvm::Value *functionPtr,
113 /* FIXME: make parameter pointerAuthInfo mandatory */
114 const CGPointerAuthInfo &pointerAuthInfo = CGPointerAuthInfo())
115 : KindOrFunctionPointer(
116 SpecialKind(reinterpret_cast<uintptr_t>(functionPtr))) {
117 OrdinaryInfo.AbstractInfo = abstractInfo;
118 OrdinaryInfo.PointerAuthInfo = pointerAuthInfo;
119 assert(functionPtr && "configuring callee without function pointer");
120 assert(functionPtr->getType()->isPointerTy());
121 }
122
123 static CGCallee forBuiltin(unsigned builtinID,
124 const FunctionDecl *builtinDecl) {
125 CGCallee result(SpecialKind::Builtin);
126 result.BuiltinInfo.Decl = builtinDecl;
127 result.BuiltinInfo.ID = builtinID;
128 return result;
129 }
130
131 static CGCallee forPseudoDestructor(const CXXPseudoDestructorExpr *E) {
132 CGCallee result(SpecialKind::PseudoDestructor);
133 result.PseudoDestructorInfo.Expr = E;
134 return result;
135 }
136
137 static CGCallee forDirect(llvm::Constant *functionPtr,
138 const CGCalleeInfo &abstractInfo = CGCalleeInfo()) {
139 return CGCallee(abstractInfo, functionPtr);
140 }
141
142 static CGCallee forDirect(llvm::FunctionCallee functionPtr,
143 const CGCalleeInfo &abstractInfo = CGCalleeInfo()) {
144 return CGCallee(abstractInfo, functionPtr.getCallee());
145 }
146
147 static CGCallee forVirtual(const CallExpr *CE, GlobalDecl MD, Address Addr,
148 llvm::FunctionType *FTy) {
149 CGCallee result(SpecialKind::Virtual);
150 result.VirtualInfo.CE = CE;
151 result.VirtualInfo.MD = MD;
152 result.VirtualInfo.Addr = Addr;
153 result.VirtualInfo.FTy = FTy;
154 return result;
155 }
156
157 bool isBuiltin() const {
158 return KindOrFunctionPointer == SpecialKind::Builtin;
159 }
160 const FunctionDecl *getBuiltinDecl() const {
161 assert(isBuiltin());
162 return BuiltinInfo.Decl;
163 }
164 unsigned getBuiltinID() const {
165 assert(isBuiltin());
166 return BuiltinInfo.ID;
167 }
168
169 bool isPseudoDestructor() const {
170 return KindOrFunctionPointer == SpecialKind::PseudoDestructor;
171 }
172 const CXXPseudoDestructorExpr *getPseudoDestructorExpr() const {
173 assert(isPseudoDestructor());
174 return PseudoDestructorInfo.Expr;
175 }
176
177 bool isOrdinary() const {
178 return uintptr_t(KindOrFunctionPointer) > uintptr_t(SpecialKind::Last);
179 }
180 CGCalleeInfo getAbstractInfo() const {
181 if (isVirtual())
182 return VirtualInfo.MD;
183 assert(isOrdinary());
184 return OrdinaryInfo.AbstractInfo;
185 }
186 const CGPointerAuthInfo &getPointerAuthInfo() const {
187 assert(isOrdinary());
188 return OrdinaryInfo.PointerAuthInfo;
189 }
190 llvm::Value *getFunctionPointer() const {
191 assert(isOrdinary());
192 return reinterpret_cast<llvm::Value *>(uintptr_t(KindOrFunctionPointer));
193 }
194 void setFunctionPointer(llvm::Value *functionPtr) {
195 assert(isOrdinary());
196 KindOrFunctionPointer =
197 SpecialKind(reinterpret_cast<uintptr_t>(functionPtr));
198 }
199 void setPointerAuthInfo(CGPointerAuthInfo PointerAuth) {
200 assert(isOrdinary());
201 OrdinaryInfo.PointerAuthInfo = PointerAuth;
202 }
203
204 bool isVirtual() const {
205 return KindOrFunctionPointer == SpecialKind::Virtual;
206 }
207 const CallExpr *getVirtualCallExpr() const {
208 assert(isVirtual());
209 return VirtualInfo.CE;
210 }
211 GlobalDecl getVirtualMethodDecl() const {
212 assert(isVirtual());
213 return VirtualInfo.MD;
214 }
215 Address getThisAddress() const {
216 assert(isVirtual());
217 return VirtualInfo.Addr;
218 }
219 llvm::FunctionType *getVirtualFunctionType() const {
220 assert(isVirtual());
221 return VirtualInfo.FTy;
222 }
223
224 /// If this is a delayed callee computation of some sort, prepare
225 /// a concrete callee.
226 CGCallee prepareConcreteCallee(CodeGenFunction &CGF) const;
227};
228
229struct CallArg {
230private:
231 union {
232 RValue RV;
233 LValue LV; /// The argument is semantically a load from this l-value.
234 };
235 bool HasLV;
236
237 /// A data-flow flag to make sure getRValue and/or copyInto are not
238 /// called twice for duplicated IR emission.
239 mutable bool IsUsed;
240
241public:
242 QualType Ty;
243 CallArg(RValue rv, QualType ty)
244 : RV(rv), HasLV(false), IsUsed(false), Ty(ty) {}
245 CallArg(LValue lv, QualType ty)
246 : LV(lv), HasLV(true), IsUsed(false), Ty(ty) {}
247 bool hasLValue() const { return HasLV; }
248 QualType getType() const { return Ty; }
249
250 /// \returns an independent RValue. If the CallArg contains an LValue,
251 /// a temporary copy is returned.
252 RValue getRValue(CodeGenFunction &CGF) const;
253
254 LValue getKnownLValue() const {
255 assert(HasLV && !IsUsed);
256 return LV;
257 }
258 RValue getKnownRValue() const {
259 assert(!HasLV && !IsUsed);
260 return RV;
261 }
262 void setRValue(RValue _RV) {
263 assert(!HasLV);
264 RV = _RV;
265 }
266
267 bool isAggregate() const { return HasLV || RV.isAggregate(); }
268
269 void copyInto(CodeGenFunction &CGF, Address A) const;
270};
271
272/// CallArgList - Type for representing both the value and type of
273/// arguments in a call.
274class CallArgList : public SmallVector<CallArg, 8> {
275public:
276 CallArgList() = default;
277
278 struct Writeback {
279 /// The original argument. Note that the argument l-value
280 /// is potentially null.
281 LValue Source;
282
283 /// The temporary alloca.
284 Address Temporary;
285
286 /// A value to "use" after the writeback, or null.
287 llvm::Value *ToUse;
288
289 /// An Expression (optional) that performs the writeback with any required
290 /// casting.
291 const Expr *WritebackExpr;
292
293 // Size for optional lifetime end on the temporary.
294 llvm::Value *LifetimeSz;
295 };
296
297 struct CallArgCleanup {
298 EHScopeStack::stable_iterator Cleanup;
299
300 /// The "is active" insertion point. This instruction is temporary and
301 /// will be removed after insertion.
302 llvm::Instruction *IsActiveIP;
303 };
304
305 void add(RValue rvalue, QualType type) { push_back(Elt: CallArg(rvalue, type)); }
306
307 void addUncopiedAggregate(LValue LV, QualType type) {
308 push_back(Elt: CallArg(LV, type));
309 }
310
311 /// Add all the arguments from another CallArgList to this one. After doing
312 /// this, the old CallArgList retains its list of arguments, but must not
313 /// be used to emit a call.
314 void addFrom(const CallArgList &other) {
315 llvm::append_range(C&: *this, R: other);
316 llvm::append_range(C&: Writebacks, R: other.Writebacks);
317 llvm::append_range(C&: CleanupsToDeactivate, R: other.CleanupsToDeactivate);
318 assert(!(StackBase && other.StackBase) && "can't merge stackbases");
319 if (!StackBase)
320 StackBase = other.StackBase;
321 }
322
323 void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse,
324 const Expr *writebackExpr = nullptr,
325 llvm::Value *lifetimeSz = nullptr) {
326 Writeback writeback = {.Source: srcLV, .Temporary: temporary, .ToUse: toUse, .WritebackExpr: writebackExpr, .LifetimeSz: lifetimeSz};
327 Writebacks.push_back(Elt: writeback);
328 }
329
330 bool hasWritebacks() const { return !Writebacks.empty(); }
331
332 typedef llvm::iterator_range<SmallVectorImpl<Writeback>::const_iterator>
333 writeback_const_range;
334
335 writeback_const_range writebacks() const {
336 return writeback_const_range(Writebacks.begin(), Writebacks.end());
337 }
338
339 void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup,
340 llvm::Instruction *IsActiveIP) {
341 CallArgCleanup ArgCleanup;
342 ArgCleanup.Cleanup = Cleanup;
343 ArgCleanup.IsActiveIP = IsActiveIP;
344 CleanupsToDeactivate.push_back(Elt: ArgCleanup);
345 }
346
347 ArrayRef<CallArgCleanup> getCleanupsToDeactivate() const {
348 return CleanupsToDeactivate;
349 }
350
351 void allocateArgumentMemory(CodeGenFunction &CGF);
352 llvm::Instruction *getStackBase() const { return StackBase; }
353 void freeArgumentMemory(CodeGenFunction &CGF) const;
354
355 /// Returns if we're using an inalloca struct to pass arguments in
356 /// memory.
357 bool isUsingInAlloca() const { return StackBase; }
358
359 // Support reversing writebacks for MSVC ABI.
360 void reverseWritebacks() {
361 std::reverse(first: Writebacks.begin(), last: Writebacks.end());
362 }
363
364private:
365 SmallVector<Writeback, 1> Writebacks;
366
367 /// Deactivate these cleanups immediately before making the call. This
368 /// is used to cleanup objects that are owned by the callee once the call
369 /// occurs.
370 SmallVector<CallArgCleanup, 1> CleanupsToDeactivate;
371
372 /// The stacksave call. It dominates all of the argument evaluation.
373 llvm::CallInst *StackBase = nullptr;
374};
375
376/// FunctionArgList - Type for representing both the decl and type
377/// of parameters to a function. The decl must be either a
378/// ParmVarDecl or ImplicitParamDecl.
379class FunctionArgList : public SmallVector<const VarDecl *, 16> {};
380
381/// ReturnValueSlot - Contains the address where the return value of a
382/// function can be stored, and whether the address is volatile or not.
383class ReturnValueSlot {
384 Address Addr = Address::invalid();
385
386 // Return value slot flags
387 LLVM_PREFERRED_TYPE(bool)
388 unsigned IsVolatile : 1;
389 LLVM_PREFERRED_TYPE(bool)
390 unsigned IsUnused : 1;
391 LLVM_PREFERRED_TYPE(bool)
392 unsigned IsExternallyDestructed : 1;
393
394public:
395 ReturnValueSlot()
396 : IsVolatile(false), IsUnused(false), IsExternallyDestructed(false) {}
397 ReturnValueSlot(Address Addr, bool IsVolatile, bool IsUnused = false,
398 bool IsExternallyDestructed = false)
399 : Addr(Addr), IsVolatile(IsVolatile), IsUnused(IsUnused),
400 IsExternallyDestructed(IsExternallyDestructed) {}
401
402 bool isNull() const { return !Addr.isValid(); }
403 bool isVolatile() const { return IsVolatile; }
404 Address getValue() const { return Addr; }
405 bool isUnused() const { return IsUnused; }
406 bool isExternallyDestructed() const { return IsExternallyDestructed; }
407 Address getAddress() const { return Addr; }
408};
409
410/// Adds attributes to \p F according to our \p CodeGenOpts and \p LangOpts, as
411/// though we had emitted it ourselves. We remove any attributes on F that
412/// conflict with the attributes we add here.
413///
414/// This is useful for adding attrs to bitcode modules that you want to link
415/// with but don't control, such as CUDA's libdevice. When linking with such
416/// a bitcode library, you might want to set e.g. its functions'
417/// "unsafe-fp-math" attribute to match the attr of the functions you're
418/// codegen'ing. Otherwise, LLVM will interpret the bitcode module's lack of
419/// unsafe-fp-math attrs as tantamount to unsafe-fp-math=false, and then LLVM
420/// will propagate unsafe-fp-math=false up to every transitive caller of a
421/// function in the bitcode library!
422///
423/// With the exception of fast-math attrs, this will only make the attributes
424/// on the function more conservative. But it's unsafe to call this on a
425/// function which relies on particular fast-math attributes for correctness.
426/// It's up to you to ensure that this is safe.
427void mergeDefaultFunctionDefinitionAttributes(llvm::Function &F,
428 const CodeGenOptions &CodeGenOpts,
429 const LangOptions &LangOpts,
430 const TargetOptions &TargetOpts,
431 bool WillInternalize);
432
433enum class FnInfoOpts {
434 None = 0,
435 IsInstanceMethod = 1 << 0,
436 IsChainCall = 1 << 1,
437 IsDelegateCall = 1 << 2,
438};
439
440inline FnInfoOpts operator|(FnInfoOpts A, FnInfoOpts B) {
441 return static_cast<FnInfoOpts>(llvm::to_underlying(E: A) |
442 llvm::to_underlying(E: B));
443}
444
445inline FnInfoOpts operator&(FnInfoOpts A, FnInfoOpts B) {
446 return static_cast<FnInfoOpts>(llvm::to_underlying(E: A) &
447 llvm::to_underlying(E: B));
448}
449
450inline FnInfoOpts &operator|=(FnInfoOpts &A, FnInfoOpts B) {
451 A = A | B;
452 return A;
453}
454
455inline FnInfoOpts &operator&=(FnInfoOpts &A, FnInfoOpts B) {
456 A = A & B;
457 return A;
458}
459
460} // end namespace CodeGen
461} // end namespace clang
462
463#endif
464