1//===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the internal per-function state used for llvm translation.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
14#define LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
15
16#include "CGBuilder.h"
17#include "CGLoopInfo.h"
18#include "CGValue.h"
19#include "CodeGenModule.h"
20#include "EHScopeStack.h"
21#include "SanitizerHandler.h"
22#include "VarBypassDetector.h"
23#include "clang/AST/CharUnits.h"
24#include "clang/AST/CurrentSourceLocExprScope.h"
25#include "clang/AST/ExprCXX.h"
26#include "clang/AST/ExprObjC.h"
27#include "clang/AST/ExprOpenMP.h"
28#include "clang/AST/StmtOpenACC.h"
29#include "clang/AST/StmtOpenMP.h"
30#include "clang/AST/StmtSYCL.h"
31#include "clang/AST/Type.h"
32#include "clang/Basic/ABI.h"
33#include "clang/Basic/CapturedStmt.h"
34#include "clang/Basic/CodeGenOptions.h"
35#include "clang/Basic/OpenMPKinds.h"
36#include "clang/Basic/TargetInfo.h"
37#include "llvm/ADT/ArrayRef.h"
38#include "llvm/ADT/DenseMap.h"
39#include "llvm/ADT/MapVector.h"
40#include "llvm/ADT/SmallVector.h"
41#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
42#include "llvm/IR/Instructions.h"
43#include "llvm/IR/ValueHandle.h"
44#include "llvm/Support/Debug.h"
45#include "llvm/Transforms/Utils/SanitizerStats.h"
46#include <optional>
47
48namespace llvm {
49class BasicBlock;
50class ConvergenceControlInst;
51class LLVMContext;
52class MDNode;
53class SwitchInst;
54class Twine;
55class Value;
56class CanonicalLoopInfo;
57} // namespace llvm
58
59namespace clang {
60class ASTContext;
61class CXXDestructorDecl;
62class CXXForRangeStmt;
63class CXXTryStmt;
64class Decl;
65class LabelDecl;
66class FunctionDecl;
67class FunctionProtoType;
68class LabelStmt;
69class ObjCContainerDecl;
70class ObjCInterfaceDecl;
71class ObjCIvarDecl;
72class ObjCMethodDecl;
73class ObjCImplementationDecl;
74class ObjCPropertyImplDecl;
75class TargetInfo;
76class VarDecl;
77class ObjCForCollectionStmt;
78class ObjCAtTryStmt;
79class ObjCAtThrowStmt;
80class ObjCAtSynchronizedStmt;
81class ObjCAutoreleasePoolStmt;
82class OMPUseDevicePtrClause;
83class OMPUseDeviceAddrClause;
84class SVETypeFlags;
85class OMPExecutableDirective;
86
87namespace analyze_os_log {
88class OSLogBufferLayout;
89}
90
91namespace CodeGen {
92class CodeGenTypes;
93class CodeGenPGO;
94class CGCallee;
95class CGFunctionInfo;
96class CGBlockInfo;
97class CGCXXABI;
98class BlockByrefHelpers;
99class BlockByrefInfo;
100class BlockFieldFlags;
101class RegionCodeGenTy;
102class TargetCodeGenInfo;
103struct OMPTaskDataTy;
104struct CGCoroData;
105
106// clang-format off
107/// The kind of evaluation to perform on values of a particular
108/// type. Basically, is the code in CGExprScalar, CGExprComplex, or
109/// CGExprAgg?
110///
111/// TODO: should vectors maybe be split out into their own thing?
112enum TypeEvaluationKind {
113 TEK_Scalar,
114 TEK_Complex,
115 TEK_Aggregate
116};
117// clang-format on
118
119/// Helper class with most of the code for saving a value for a
120/// conditional expression cleanup.
121struct DominatingLLVMValue {
122 typedef llvm::PointerIntPair<llvm::Value *, 1, bool> saved_type;
123
124 /// Answer whether the given value needs extra work to be saved.
125 static bool needsSaving(llvm::Value *value) {
126 if (!value)
127 return false;
128
129 // If it's not an instruction, we don't need to save.
130 if (!isa<llvm::Instruction>(Val: value))
131 return false;
132
133 // If it's an instruction in the entry block, we don't need to save.
134 llvm::BasicBlock *block = cast<llvm::Instruction>(Val: value)->getParent();
135 return (block != &block->getParent()->getEntryBlock());
136 }
137
138 static saved_type save(CodeGenFunction &CGF, llvm::Value *value);
139 static llvm::Value *restore(CodeGenFunction &CGF, saved_type value);
140};
141
142/// A partial specialization of DominatingValue for llvm::Values that
143/// might be llvm::Instructions.
144template <class T> struct DominatingPointer<T, true> : DominatingLLVMValue {
145 typedef T *type;
146 static type restore(CodeGenFunction &CGF, saved_type value) {
147 return static_cast<T *>(DominatingLLVMValue::restore(CGF, value));
148 }
149};
150
151/// A specialization of DominatingValue for Address.
152template <> struct DominatingValue<Address> {
153 typedef Address type;
154
155 struct saved_type {
156 DominatingLLVMValue::saved_type BasePtr;
157 llvm::Type *ElementType;
158 CharUnits Alignment;
159 DominatingLLVMValue::saved_type Offset;
160 llvm::PointerType *EffectiveType;
161 };
162
163 static bool needsSaving(type value) {
164 if (DominatingLLVMValue::needsSaving(value: value.getBasePointer()) ||
165 DominatingLLVMValue::needsSaving(value: value.getOffset()))
166 return true;
167 return false;
168 }
169 static saved_type save(CodeGenFunction &CGF, type value) {
170 return {.BasePtr: DominatingLLVMValue::save(CGF, value: value.getBasePointer()),
171 .ElementType: value.getElementType(), .Alignment: value.getAlignment(),
172 .Offset: DominatingLLVMValue::save(CGF, value: value.getOffset()), .EffectiveType: value.getType()};
173 }
174 static type restore(CodeGenFunction &CGF, saved_type value) {
175 return Address(DominatingLLVMValue::restore(CGF, value: value.BasePtr),
176 value.ElementType, value.Alignment, CGPointerAuthInfo(),
177 DominatingLLVMValue::restore(CGF, value: value.Offset));
178 }
179};
180
181/// A specialization of DominatingValue for RValue.
182template <> struct DominatingValue<RValue> {
183 typedef RValue type;
184 class saved_type {
185 enum Kind {
186 ScalarLiteral,
187 ScalarAddress,
188 AggregateLiteral,
189 AggregateAddress,
190 ComplexAddress
191 };
192 union {
193 struct {
194 DominatingLLVMValue::saved_type first, second;
195 } Vals;
196 DominatingValue<Address>::saved_type AggregateAddr;
197 };
198 LLVM_PREFERRED_TYPE(Kind)
199 unsigned K : 3;
200
201 saved_type(DominatingLLVMValue::saved_type Val1, unsigned K)
202 : Vals{.first: Val1, .second: DominatingLLVMValue::saved_type()}, K(K) {}
203
204 saved_type(DominatingLLVMValue::saved_type Val1,
205 DominatingLLVMValue::saved_type Val2)
206 : Vals{.first: Val1, .second: Val2}, K(ComplexAddress) {}
207
208 saved_type(DominatingValue<Address>::saved_type AggregateAddr, unsigned K)
209 : AggregateAddr(AggregateAddr), K(K) {}
210
211 public:
212 static bool needsSaving(RValue value);
213 static saved_type save(CodeGenFunction &CGF, RValue value);
214 RValue restore(CodeGenFunction &CGF);
215
216 // implementations in CGCleanup.cpp
217 };
218
219 static bool needsSaving(type value) { return saved_type::needsSaving(value); }
220 static saved_type save(CodeGenFunction &CGF, type value) {
221 return saved_type::save(CGF, value);
222 }
223 static type restore(CodeGenFunction &CGF, saved_type value) {
224 return value.restore(CGF);
225 }
226};
227
228/// A scoped helper to set the current source atom group for
229/// CGDebugInfo::addInstToCurrentSourceAtom. A source atom is a source construct
230/// that is "interesting" for debug stepping purposes. We use an atom group
231/// number to track the instruction(s) that implement the functionality for the
232/// atom, plus backup instructions/source locations.
233class ApplyAtomGroup {
234 uint64_t OriginalAtom = 0;
235 CGDebugInfo *DI = nullptr;
236
237 ApplyAtomGroup(const ApplyAtomGroup &) = delete;
238 void operator=(const ApplyAtomGroup &) = delete;
239
240public:
241 ApplyAtomGroup(CGDebugInfo *DI);
242 ~ApplyAtomGroup();
243};
244
245/// CodeGenFunction - This class organizes the per-function state that is used
246/// while generating LLVM code.
247class CodeGenFunction : public CodeGenTypeCache {
248 CodeGenFunction(const CodeGenFunction &) = delete;
249 void operator=(const CodeGenFunction &) = delete;
250
251 friend class CGCXXABI;
252
253public:
254 /// A jump destination is an abstract label, branching to which may
255 /// require a jump out through normal cleanups.
256 struct JumpDest {
257 JumpDest() : Block(nullptr), Index(0) {}
258 JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth,
259 unsigned Index)
260 : Block(Block), ScopeDepth(Depth), Index(Index) {}
261
262 bool isValid() const { return Block != nullptr; }
263 llvm::BasicBlock *getBlock() const { return Block; }
264 EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
265 unsigned getDestIndex() const { return Index; }
266
267 // This should be used cautiously.
268 void setScopeDepth(EHScopeStack::stable_iterator depth) {
269 ScopeDepth = depth;
270 }
271
272 private:
273 llvm::BasicBlock *Block;
274 EHScopeStack::stable_iterator ScopeDepth;
275 unsigned Index;
276 };
277
278 CodeGenModule &CGM; // Per-module state.
279 const TargetInfo &Target;
280
281 // For EH/SEH outlined funclets, this field points to parent's CGF
282 CodeGenFunction *ParentCGF = nullptr;
283
284 typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
285 LoopInfoStack LoopStack;
286 CGBuilderTy Builder;
287
288 // Stores variables for which we can't generate correct lifetime markers
289 // because of jumps.
290 VarBypassDetector Bypasses;
291
292 /// List of recently emitted OMPCanonicalLoops.
293 ///
294 /// Since OMPCanonicalLoops are nested inside other statements (in particular
295 /// CapturedStmt generated by OMPExecutableDirective and non-perfectly nested
296 /// loops), we cannot directly call OMPEmitOMPCanonicalLoop and receive its
297 /// llvm::CanonicalLoopInfo. Instead, we call EmitStmt and any
298 /// OMPEmitOMPCanonicalLoop called by it will add its CanonicalLoopInfo to
299 /// this stack when done. Entering a new loop requires clearing this list; it
300 /// either means we start parsing a new loop nest (in which case the previous
301 /// loop nest goes out of scope) or a second loop in the same level in which
302 /// case it would be ambiguous into which of the two (or more) loops the loop
303 /// nest would extend.
304 SmallVector<llvm::CanonicalLoopInfo *, 4> OMPLoopNestStack;
305
306 /// Stack to track the controlled convergence tokens.
307 SmallVector<llvm::ConvergenceControlInst *, 4> ConvergenceTokenStack;
308
309 /// Number of nested loop to be consumed by the last surrounding
310 /// loop-associated directive.
311 int ExpectedOMPLoopDepth = 0;
312
313 // CodeGen lambda for loops and support for ordered clause
314 typedef llvm::function_ref<void(CodeGenFunction &, const OMPLoopDirective &,
315 JumpDest)>
316 CodeGenLoopTy;
317 typedef llvm::function_ref<void(CodeGenFunction &, SourceLocation,
318 const unsigned, const bool)>
319 CodeGenOrderedTy;
320
321 // Codegen lambda for loop bounds in worksharing loop constructs
322 typedef llvm::function_ref<std::pair<LValue, LValue>(
323 CodeGenFunction &, const OMPExecutableDirective &S)>
324 CodeGenLoopBoundsTy;
325
326 // Codegen lambda for loop bounds in dispatch-based loop implementation
327 typedef llvm::function_ref<std::pair<llvm::Value *, llvm::Value *>(
328 CodeGenFunction &, const OMPExecutableDirective &S, Address LB,
329 Address UB)>
330 CodeGenDispatchBoundsTy;
331
332 /// CGBuilder insert helper. This function is called after an
333 /// instruction is created using Builder.
334 void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
335 llvm::BasicBlock::iterator InsertPt) const;
336
337 /// CurFuncDecl - Holds the Decl for the current outermost
338 /// non-closure context.
339 const Decl *CurFuncDecl = nullptr;
340 /// CurCodeDecl - This is the inner-most code context, which includes blocks.
341 const Decl *CurCodeDecl = nullptr;
342 const CGFunctionInfo *CurFnInfo = nullptr;
343 QualType FnRetTy;
344 llvm::Function *CurFn = nullptr;
345
346 /// If a cast expression is being visited, this holds the current cast's
347 /// expression.
348 const CastExpr *CurCast = nullptr;
349
350 /// Save Parameter Decl for coroutine.
351 llvm::SmallVector<const ParmVarDecl *, 4> FnArgs;
352
353 // Holds coroutine data if the current function is a coroutine. We use a
354 // wrapper to manage its lifetime, so that we don't have to define CGCoroData
355 // in this header.
356 struct CGCoroInfo {
357 std::unique_ptr<CGCoroData> Data;
358 bool InSuspendBlock = false;
359 CGCoroInfo();
360 ~CGCoroInfo();
361 };
362 CGCoroInfo CurCoro;
363
364 bool isCoroutine() const { return CurCoro.Data != nullptr; }
365
366 bool inSuspendBlock() const {
367 return isCoroutine() && CurCoro.InSuspendBlock;
368 }
369
370 // Holds FramePtr for await_suspend wrapper generation,
371 // so that __builtin_coro_frame call can be lowered
372 // directly to value of its second argument
373 struct AwaitSuspendWrapperInfo {
374 llvm::Value *FramePtr = nullptr;
375 };
376 AwaitSuspendWrapperInfo CurAwaitSuspendWrapper;
377
378 // Generates wrapper function for `llvm.coro.await.suspend.*` intrinisics.
379 // It encapsulates SuspendExpr in a function, to separate it's body
380 // from the main coroutine to avoid miscompilations. Intrinisic
381 // is lowered to this function call in CoroSplit pass
382 // Function signature is:
383 // <type> __await_suspend_wrapper_<name>(ptr %awaiter, ptr %hdl)
384 // where type is one of (void, i1, ptr)
385 llvm::Function *generateAwaitSuspendWrapper(Twine const &CoroName,
386 Twine const &SuspendPointName,
387 CoroutineSuspendExpr const &S);
388
389 /// CurGD - The GlobalDecl for the current function being compiled.
390 GlobalDecl CurGD;
391
392 /// PrologueCleanupDepth - The cleanup depth enclosing all the
393 /// cleanups associated with the parameters.
394 EHScopeStack::stable_iterator PrologueCleanupDepth;
395
396 /// ReturnBlock - Unified return block.
397 JumpDest ReturnBlock;
398
399 /// ReturnValue - The temporary alloca to hold the return
400 /// value. This is invalid iff the function has no return value.
401 Address ReturnValue = Address::invalid();
402
403 /// ReturnValuePointer - The temporary alloca to hold a pointer to sret.
404 /// This is invalid if sret is not in use.
405 Address ReturnValuePointer = Address::invalid();
406
407 /// If a return statement is being visited, this holds the return statment's
408 /// result expression.
409 const Expr *RetExpr = nullptr;
410
411 /// Return true if a label was seen in the current scope.
412 bool hasLabelBeenSeenInCurrentScope() const {
413 if (CurLexicalScope)
414 return CurLexicalScope->hasLabels();
415 return !LabelMap.empty();
416 }
417
418 /// AllocaInsertPoint - This is an instruction in the entry block before which
419 /// we prefer to insert allocas.
420 llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
421
422private:
423 /// PostAllocaInsertPt - This is a place in the prologue where code can be
424 /// inserted that will be dominated by all the static allocas. This helps
425 /// achieve two things:
426 /// 1. Contiguity of all static allocas (within the prologue) is maintained.
427 /// 2. All other prologue code (which are dominated by static allocas) do
428 /// appear in the source order immediately after all static allocas.
429 ///
430 /// PostAllocaInsertPt will be lazily created when it is *really* required.
431 llvm::AssertingVH<llvm::Instruction> PostAllocaInsertPt = nullptr;
432
433public:
434 /// Return PostAllocaInsertPt. If it is not yet created, then insert it
435 /// immediately after AllocaInsertPt.
436 llvm::Instruction *getPostAllocaInsertPoint() {
437 if (!PostAllocaInsertPt) {
438 assert(AllocaInsertPt &&
439 "Expected static alloca insertion point at function prologue");
440 assert(AllocaInsertPt->getParent()->isEntryBlock() &&
441 "EBB should be entry block of the current code gen function");
442 PostAllocaInsertPt = AllocaInsertPt->clone();
443 PostAllocaInsertPt->setName("postallocapt");
444 PostAllocaInsertPt->insertAfter(InsertPos: AllocaInsertPt->getIterator());
445 }
446
447 return PostAllocaInsertPt;
448 }
449
450 // Try to preserve the source's name to make IR more readable.
451 llvm::Value *performAddrSpaceCast(llvm::Value *Src, llvm::Type *DestTy) {
452 return Builder.CreateAddrSpaceCast(
453 V: Src, DestTy, Name: Src->hasName() ? Src->getName() + ".ascast" : "");
454 }
455
456 /// API for captured statement code generation.
457 class CGCapturedStmtInfo {
458 public:
459 explicit CGCapturedStmtInfo(CapturedRegionKind K = CR_Default)
460 : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {}
461 explicit CGCapturedStmtInfo(const CapturedStmt &S,
462 CapturedRegionKind K = CR_Default)
463 : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {
464
465 RecordDecl::field_iterator Field =
466 S.getCapturedRecordDecl()->field_begin();
467 for (CapturedStmt::const_capture_iterator I = S.capture_begin(),
468 E = S.capture_end();
469 I != E; ++I, ++Field) {
470 if (I->capturesThis())
471 CXXThisFieldDecl = *Field;
472 else if (I->capturesVariable())
473 CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
474 else if (I->capturesVariableByCopy())
475 CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
476 }
477 }
478
479 virtual ~CGCapturedStmtInfo();
480
481 CapturedRegionKind getKind() const { return Kind; }
482
483 virtual void setContextValue(llvm::Value *V) { ThisValue = V; }
484 // Retrieve the value of the context parameter.
485 virtual llvm::Value *getContextValue() const { return ThisValue; }
486
487 /// Lookup the captured field decl for a variable.
488 virtual const FieldDecl *lookup(const VarDecl *VD) const {
489 return CaptureFields.lookup(Val: VD->getCanonicalDecl());
490 }
491
492 bool isCXXThisExprCaptured() const { return getThisFieldDecl() != nullptr; }
493 virtual FieldDecl *getThisFieldDecl() const { return CXXThisFieldDecl; }
494
495 static bool classof(const CGCapturedStmtInfo *) { return true; }
496
497 /// Emit the captured statement body.
498 virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S) {
499 CGF.incrementProfileCounter(S);
500 CGF.EmitStmt(S);
501 }
502
503 /// Get the name of the capture helper.
504 virtual StringRef getHelperName() const { return "__captured_stmt"; }
505
506 /// Get the CaptureFields
507 llvm::SmallDenseMap<const VarDecl *, FieldDecl *> getCaptureFields() {
508 return CaptureFields;
509 }
510
511 private:
512 /// The kind of captured statement being generated.
513 CapturedRegionKind Kind;
514
515 /// Keep the map between VarDecl and FieldDecl.
516 llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields;
517
518 /// The base address of the captured record, passed in as the first
519 /// argument of the parallel region function.
520 llvm::Value *ThisValue;
521
522 /// Captured 'this' type.
523 FieldDecl *CXXThisFieldDecl;
524 };
525 CGCapturedStmtInfo *CapturedStmtInfo = nullptr;
526
527 /// RAII for correct setting/restoring of CapturedStmtInfo.
528 class CGCapturedStmtRAII {
529 private:
530 CodeGenFunction &CGF;
531 CGCapturedStmtInfo *PrevCapturedStmtInfo;
532
533 public:
534 CGCapturedStmtRAII(CodeGenFunction &CGF,
535 CGCapturedStmtInfo *NewCapturedStmtInfo)
536 : CGF(CGF), PrevCapturedStmtInfo(CGF.CapturedStmtInfo) {
537 CGF.CapturedStmtInfo = NewCapturedStmtInfo;
538 }
539 ~CGCapturedStmtRAII() { CGF.CapturedStmtInfo = PrevCapturedStmtInfo; }
540 };
541
542 /// An abstract representation of regular/ObjC call/message targets.
543 class AbstractCallee {
544 /// The function declaration of the callee.
545 const Decl *CalleeDecl;
546
547 public:
548 AbstractCallee() : CalleeDecl(nullptr) {}
549 AbstractCallee(const FunctionDecl *FD) : CalleeDecl(FD) {}
550 AbstractCallee(const ObjCMethodDecl *OMD) : CalleeDecl(OMD) {}
551 bool hasFunctionDecl() const {
552 return isa_and_nonnull<FunctionDecl>(Val: CalleeDecl);
553 }
554 const Decl *getDecl() const { return CalleeDecl; }
555 unsigned getNumParams() const {
556 if (const auto *FD = dyn_cast<FunctionDecl>(Val: CalleeDecl))
557 return FD->getNumParams();
558 return cast<ObjCMethodDecl>(Val: CalleeDecl)->param_size();
559 }
560 const ParmVarDecl *getParamDecl(unsigned I) const {
561 if (const auto *FD = dyn_cast<FunctionDecl>(Val: CalleeDecl))
562 return FD->getParamDecl(i: I);
563 return *(cast<ObjCMethodDecl>(Val: CalleeDecl)->param_begin() + I);
564 }
565 };
566
567 /// Sanitizers enabled for this function.
568 SanitizerSet SanOpts;
569
570 /// True if CodeGen currently emits code implementing sanitizer checks.
571 bool IsSanitizerScope = false;
572
573 /// RAII object to set/unset CodeGenFunction::IsSanitizerScope.
574 class SanitizerScope {
575 CodeGenFunction *CGF;
576
577 public:
578 SanitizerScope(CodeGenFunction *CGF);
579 ~SanitizerScope();
580 };
581
582 /// In C++, whether we are code generating a thunk. This controls whether we
583 /// should emit cleanups.
584 bool CurFuncIsThunk = false;
585
586 /// In ARC, whether we should autorelease the return value.
587 bool AutoreleaseResult = false;
588
589 /// Whether we processed a Microsoft-style asm block during CodeGen. These can
590 /// potentially set the return value.
591 bool SawAsmBlock = false;
592
593 GlobalDecl CurSEHParent;
594
595 /// True if the current function is an outlined SEH helper. This can be a
596 /// finally block or filter expression.
597 bool IsOutlinedSEHHelper = false;
598
599 /// True if CodeGen currently emits code inside presereved access index
600 /// region.
601 bool IsInPreservedAIRegion = false;
602
603 /// True if the current statement has nomerge attribute.
604 bool InNoMergeAttributedStmt = false;
605
606 /// True if the current statement has noinline attribute.
607 bool InNoInlineAttributedStmt = false;
608
609 /// True if the current statement has always_inline attribute.
610 bool InAlwaysInlineAttributedStmt = false;
611
612 /// True if the current statement has noconvergent attribute.
613 bool InNoConvergentAttributedStmt = false;
614
615 /// HLSL Branch attribute.
616 HLSLControlFlowHintAttr::Spelling HLSLControlFlowAttr =
617 HLSLControlFlowHintAttr::SpellingNotCalculated;
618
619 // The CallExpr within the current statement that the musttail attribute
620 // applies to. nullptr if there is no 'musttail' on the current statement.
621 const CallExpr *MustTailCall = nullptr;
622
623 /// Returns true if a function must make progress, which means the
624 /// mustprogress attribute can be added.
625 bool checkIfFunctionMustProgress() {
626 if (CGM.getCodeGenOpts().getFiniteLoops() ==
627 CodeGenOptions::FiniteLoopsKind::Never)
628 return false;
629
630 // C++11 and later guarantees that a thread eventually will do one of the
631 // following (C++11 [intro.multithread]p24 and C++17 [intro.progress]p1):
632 // - terminate,
633 // - make a call to a library I/O function,
634 // - perform an access through a volatile glvalue, or
635 // - perform a synchronization operation or an atomic operation.
636 //
637 // Hence each function is 'mustprogress' in C++11 or later.
638 return getLangOpts().CPlusPlus11;
639 }
640
641 /// Returns true if a loop must make progress, which means the mustprogress
642 /// attribute can be added. \p HasConstantCond indicates whether the branch
643 /// condition is a known constant.
644 bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody);
645
646 const CodeGen::CGBlockInfo *BlockInfo = nullptr;
647 llvm::Value *BlockPointer = nullptr;
648
649 llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields;
650 FieldDecl *LambdaThisCaptureField = nullptr;
651
652 /// A mapping from NRVO variables to the flags used to indicate
653 /// when the NRVO has been applied to this variable.
654 llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
655
656 EHScopeStack EHStack;
657 llvm::SmallVector<char, 256> LifetimeExtendedCleanupStack;
658
659 // A stack of cleanups which were added to EHStack but have to be deactivated
660 // later before being popped or emitted. These are usually deactivated on
661 // exiting a `CleanupDeactivationScope` scope. For instance, after a
662 // full-expr.
663 //
664 // These are specially useful for correctly emitting cleanups while
665 // encountering branches out of expression (through stmt-expr or coroutine
666 // suspensions).
667 struct DeferredDeactivateCleanup {
668 EHScopeStack::stable_iterator Cleanup;
669 llvm::Instruction *DominatingIP;
670 };
671 llvm::SmallVector<DeferredDeactivateCleanup> DeferredDeactivationCleanupStack;
672
673 // Enters a new scope for capturing cleanups which are deferred to be
674 // deactivated, all of which will be deactivated once the scope is exited.
675 struct CleanupDeactivationScope {
676 CodeGenFunction &CGF;
677 size_t OldDeactivateCleanupStackSize;
678 bool Deactivated;
679 CleanupDeactivationScope(CodeGenFunction &CGF)
680 : CGF(CGF), OldDeactivateCleanupStackSize(
681 CGF.DeferredDeactivationCleanupStack.size()),
682 Deactivated(false) {}
683
684 void ForceDeactivate() {
685 assert(!Deactivated && "Deactivating already deactivated scope");
686 auto &Stack = CGF.DeferredDeactivationCleanupStack;
687 for (size_t I = Stack.size(); I > OldDeactivateCleanupStackSize; I--) {
688 CGF.DeactivateCleanupBlock(Cleanup: Stack[I - 1].Cleanup,
689 DominatingIP: Stack[I - 1].DominatingIP);
690 Stack[I - 1].DominatingIP->eraseFromParent();
691 }
692 Stack.resize(N: OldDeactivateCleanupStackSize);
693 Deactivated = true;
694 }
695
696 ~CleanupDeactivationScope() {
697 if (Deactivated)
698 return;
699 ForceDeactivate();
700 }
701 };
702
703 llvm::SmallVector<const JumpDest *, 2> SEHTryEpilogueStack;
704
705 llvm::Instruction *CurrentFuncletPad = nullptr;
706
707 class CallLifetimeEnd final : public EHScopeStack::Cleanup {
708 bool isRedundantBeforeReturn() override { return true; }
709
710 llvm::Value *Addr;
711
712 public:
713 CallLifetimeEnd(RawAddress addr) : Addr(addr.getPointer()) {}
714
715 void Emit(CodeGenFunction &CGF, Flags flags) override {
716 CGF.EmitLifetimeEnd(Addr);
717 }
718 };
719
720 // We are using objects of this 'cleanup' class to emit fake.use calls
721 // for -fextend-variable-liveness. They are placed at the end of a variable's
722 // scope analogous to lifetime markers.
723 class FakeUse final : public EHScopeStack::Cleanup {
724 Address Addr;
725
726 public:
727 FakeUse(Address addr) : Addr(addr) {}
728
729 void Emit(CodeGenFunction &CGF, Flags flags) override {
730 CGF.EmitFakeUse(Addr);
731 }
732 };
733
734 /// Header for data within LifetimeExtendedCleanupStack.
735 struct alignas(uint64_t) LifetimeExtendedCleanupHeader {
736 /// The size of the following cleanup object.
737 unsigned Size;
738 /// The kind of cleanup to push.
739 LLVM_PREFERRED_TYPE(CleanupKind)
740 unsigned Kind : 31;
741 /// Whether this is a conditional cleanup.
742 LLVM_PREFERRED_TYPE(bool)
743 unsigned IsConditional : 1;
744
745 size_t getSize() const { return Size; }
746 CleanupKind getKind() const { return (CleanupKind)Kind; }
747 bool isConditional() const { return IsConditional; }
748 };
749
750 /// i32s containing the indexes of the cleanup destinations.
751 RawAddress NormalCleanupDest = RawAddress::invalid();
752
753 unsigned NextCleanupDestIndex = 1;
754
755 /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
756 llvm::BasicBlock *EHResumeBlock = nullptr;
757
758 /// The exception slot. All landing pads write the current exception pointer
759 /// into this alloca.
760 llvm::Value *ExceptionSlot = nullptr;
761
762 /// The selector slot. Under the MandatoryCleanup model, all landing pads
763 /// write the current selector value into this alloca.
764 llvm::AllocaInst *EHSelectorSlot = nullptr;
765
766 /// A stack of exception code slots. Entering an __except block pushes a slot
767 /// on the stack and leaving pops one. The __exception_code() intrinsic loads
768 /// a value from the top of the stack.
769 SmallVector<Address, 1> SEHCodeSlotStack;
770
771 /// Value returned by __exception_info intrinsic.
772 llvm::Value *SEHInfo = nullptr;
773
774 /// Emits a landing pad for the current EH stack.
775 llvm::BasicBlock *EmitLandingPad();
776
777 llvm::BasicBlock *getInvokeDestImpl();
778
779 /// Parent loop-based directive for scan directive.
780 const OMPExecutableDirective *OMPParentLoopDirectiveForScan = nullptr;
781 llvm::BasicBlock *OMPBeforeScanBlock = nullptr;
782 llvm::BasicBlock *OMPAfterScanBlock = nullptr;
783 llvm::BasicBlock *OMPScanExitBlock = nullptr;
784 llvm::BasicBlock *OMPScanDispatch = nullptr;
785 bool OMPFirstScanLoop = false;
786
787 /// Manages parent directive for scan directives.
788 class ParentLoopDirectiveForScanRegion {
789 CodeGenFunction &CGF;
790 const OMPExecutableDirective *ParentLoopDirectiveForScan;
791
792 public:
793 ParentLoopDirectiveForScanRegion(
794 CodeGenFunction &CGF,
795 const OMPExecutableDirective &ParentLoopDirectiveForScan)
796 : CGF(CGF),
797 ParentLoopDirectiveForScan(CGF.OMPParentLoopDirectiveForScan) {
798 CGF.OMPParentLoopDirectiveForScan = &ParentLoopDirectiveForScan;
799 }
800 ~ParentLoopDirectiveForScanRegion() {
801 CGF.OMPParentLoopDirectiveForScan = ParentLoopDirectiveForScan;
802 }
803 };
804
805 template <class T>
806 typename DominatingValue<T>::saved_type saveValueInCond(T value) {
807 return DominatingValue<T>::save(*this, value);
808 }
809
810 class CGFPOptionsRAII {
811 public:
812 CGFPOptionsRAII(CodeGenFunction &CGF, FPOptions FPFeatures);
813 CGFPOptionsRAII(CodeGenFunction &CGF, const Expr *E);
814 ~CGFPOptionsRAII();
815
816 private:
817 void ConstructorHelper(FPOptions FPFeatures);
818 CodeGenFunction &CGF;
819 FPOptions OldFPFeatures;
820 llvm::fp::ExceptionBehavior OldExcept;
821 llvm::RoundingMode OldRounding;
822 std::optional<CGBuilderTy::FastMathFlagGuard> FMFGuard;
823 };
824 FPOptions CurFPFeatures;
825
826 class CGAtomicOptionsRAII {
827 public:
828 CGAtomicOptionsRAII(CodeGenModule &CGM_, AtomicOptions AO)
829 : CGM(CGM_), SavedAtomicOpts(CGM.getAtomicOpts()) {
830 CGM.setAtomicOpts(AO);
831 }
832 CGAtomicOptionsRAII(CodeGenModule &CGM_, const AtomicAttr *AA)
833 : CGM(CGM_), SavedAtomicOpts(CGM.getAtomicOpts()) {
834 if (!AA)
835 return;
836 AtomicOptions AO = SavedAtomicOpts;
837 for (auto Option : AA->atomicOptions()) {
838 switch (Option) {
839 case AtomicAttr::remote_memory:
840 AO.remote_memory = true;
841 break;
842 case AtomicAttr::no_remote_memory:
843 AO.remote_memory = false;
844 break;
845 case AtomicAttr::fine_grained_memory:
846 AO.fine_grained_memory = true;
847 break;
848 case AtomicAttr::no_fine_grained_memory:
849 AO.fine_grained_memory = false;
850 break;
851 case AtomicAttr::ignore_denormal_mode:
852 AO.ignore_denormal_mode = true;
853 break;
854 case AtomicAttr::no_ignore_denormal_mode:
855 AO.ignore_denormal_mode = false;
856 break;
857 }
858 }
859 CGM.setAtomicOpts(AO);
860 }
861
862 CGAtomicOptionsRAII(const CGAtomicOptionsRAII &) = delete;
863 CGAtomicOptionsRAII &operator=(const CGAtomicOptionsRAII &) = delete;
864 ~CGAtomicOptionsRAII() { CGM.setAtomicOpts(SavedAtomicOpts); }
865
866 private:
867 CodeGenModule &CGM;
868 AtomicOptions SavedAtomicOpts;
869 };
870
871public:
872 /// ObjCEHValueStack - Stack of Objective-C exception values, used for
873 /// rethrows.
874 SmallVector<llvm::Value *, 8> ObjCEHValueStack;
875
876 /// A class controlling the emission of a finally block.
877 class FinallyInfo {
878 /// Where the catchall's edge through the cleanup should go.
879 JumpDest RethrowDest;
880
881 /// A function to call to enter the catch.
882 llvm::FunctionCallee BeginCatchFn;
883
884 /// An i1 variable indicating whether or not the @finally is
885 /// running for an exception.
886 llvm::AllocaInst *ForEHVar = nullptr;
887
888 /// An i8* variable into which the exception pointer to rethrow
889 /// has been saved.
890 llvm::AllocaInst *SavedExnVar = nullptr;
891
892 public:
893 void enter(CodeGenFunction &CGF, const Stmt *Finally,
894 llvm::FunctionCallee beginCatchFn,
895 llvm::FunctionCallee endCatchFn, llvm::FunctionCallee rethrowFn);
896 void exit(CodeGenFunction &CGF);
897 };
898
899 /// Returns true inside SEH __try blocks.
900 bool isSEHTryScope() const { return !SEHTryEpilogueStack.empty(); }
901
902 /// Returns true while emitting a cleanuppad.
903 bool isCleanupPadScope() const {
904 return CurrentFuncletPad && isa<llvm::CleanupPadInst>(Val: CurrentFuncletPad);
905 }
906
907 /// pushFullExprCleanup - Push a cleanup to be run at the end of the
908 /// current full-expression. Safe against the possibility that
909 /// we're currently inside a conditionally-evaluated expression.
910 template <class T, class... As>
911 void pushFullExprCleanup(CleanupKind kind, As... A) {
912 // If we're not in a conditional branch, or if none of the
913 // arguments requires saving, then use the unconditional cleanup.
914 if (!isInConditionalBranch())
915 return EHStack.pushCleanup<T>(kind, A...);
916
917 // Stash values in a tuple so we can guarantee the order of saves.
918 typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
919 SavedTuple Saved{saveValueInCond(A)...};
920
921 typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
922 EHStack.pushCleanupTuple<CleanupType>(kind, Saved);
923 initFullExprCleanup();
924 }
925
926 /// Queue a cleanup to be pushed after finishing the current full-expression,
927 /// potentially with an active flag.
928 template <class T, class... As>
929 void pushCleanupAfterFullExpr(CleanupKind Kind, As... A) {
930 if (!isInConditionalBranch())
931 return pushCleanupAfterFullExprWithActiveFlag<T>(
932 Kind, RawAddress::invalid(), A...);
933
934 RawAddress ActiveFlag = createCleanupActiveFlag();
935 assert(!DominatingValue<Address>::needsSaving(ActiveFlag) &&
936 "cleanup active flag should never need saving");
937
938 typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
939 SavedTuple Saved{saveValueInCond(A)...};
940
941 typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
942 pushCleanupAfterFullExprWithActiveFlag<CleanupType>(Kind, ActiveFlag,
943 Saved);
944 }
945
946 template <class T, class... As>
947 void pushCleanupAfterFullExprWithActiveFlag(CleanupKind Kind,
948 RawAddress ActiveFlag, As... A) {
949 LifetimeExtendedCleanupHeader Header = {.Size: sizeof(T), .Kind: Kind,
950 .IsConditional: ActiveFlag.isValid()};
951
952 size_t OldSize = LifetimeExtendedCleanupStack.size();
953 LifetimeExtendedCleanupStack.resize(
954 N: LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size +
955 (Header.IsConditional ? sizeof(ActiveFlag) : 0));
956
957 static_assert((alignof(LifetimeExtendedCleanupHeader) == alignof(T)) &&
958 (alignof(T) == alignof(RawAddress)),
959 "Cleanup will be allocated on misaligned address");
960 char *Buffer = &LifetimeExtendedCleanupStack[OldSize];
961 new (Buffer) LifetimeExtendedCleanupHeader(Header);
962 new (Buffer + sizeof(Header)) T(A...);
963 if (Header.IsConditional)
964 new (Buffer + sizeof(Header) + sizeof(T)) RawAddress(ActiveFlag);
965 }
966
967 // Push a cleanup onto EHStack and deactivate it later. It is usually
968 // deactivated when exiting a `CleanupDeactivationScope` (for example: after a
969 // full expression).
970 template <class T, class... As>
971 void pushCleanupAndDeferDeactivation(CleanupKind Kind, As... A) {
972 // Placeholder dominating IP for this cleanup.
973 llvm::Instruction *DominatingIP =
974 Builder.CreateFlagLoad(Addr: llvm::Constant::getNullValue(Ty: Int8PtrTy));
975 EHStack.pushCleanup<T>(Kind, A...);
976 DeferredDeactivationCleanupStack.push_back(
977 Elt: {.Cleanup: EHStack.stable_begin(), .DominatingIP: DominatingIP});
978 }
979
980 /// Set up the last cleanup that was pushed as a conditional
981 /// full-expression cleanup.
982 void initFullExprCleanup() {
983 initFullExprCleanupWithFlag(ActiveFlag: createCleanupActiveFlag());
984 }
985
986 void initFullExprCleanupWithFlag(RawAddress ActiveFlag);
987 RawAddress createCleanupActiveFlag();
988
989 /// PushDestructorCleanup - Push a cleanup to call the
990 /// complete-object destructor of an object of the given type at the
991 /// given address. Does nothing if T is not a C++ class type with a
992 /// non-trivial destructor.
993 void PushDestructorCleanup(QualType T, Address Addr);
994
995 /// PushDestructorCleanup - Push a cleanup to call the
996 /// complete-object variant of the given destructor on the object at
997 /// the given address.
998 void PushDestructorCleanup(const CXXDestructorDecl *Dtor, QualType T,
999 Address Addr);
1000
1001 /// PopCleanupBlock - Will pop the cleanup entry on the stack and
1002 /// process all branch fixups.
1003 void PopCleanupBlock(bool FallThroughIsBranchThrough = false,
1004 bool ForDeactivation = false);
1005
1006 /// DeactivateCleanupBlock - Deactivates the given cleanup block.
1007 /// The block cannot be reactivated. Pops it if it's the top of the
1008 /// stack.
1009 ///
1010 /// \param DominatingIP - An instruction which is known to
1011 /// dominate the current IP (if set) and which lies along
1012 /// all paths of execution between the current IP and the
1013 /// the point at which the cleanup comes into scope.
1014 void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
1015 llvm::Instruction *DominatingIP);
1016
1017 /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
1018 /// Cannot be used to resurrect a deactivated cleanup.
1019 ///
1020 /// \param DominatingIP - An instruction which is known to
1021 /// dominate the current IP (if set) and which lies along
1022 /// all paths of execution between the current IP and the
1023 /// the point at which the cleanup comes into scope.
1024 void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
1025 llvm::Instruction *DominatingIP);
1026
1027 /// Enters a new scope for capturing cleanups, all of which
1028 /// will be executed once the scope is exited.
1029 class RunCleanupsScope {
1030 EHScopeStack::stable_iterator CleanupStackDepth, OldCleanupScopeDepth;
1031 size_t LifetimeExtendedCleanupStackSize;
1032 CleanupDeactivationScope DeactivateCleanups;
1033 bool OldDidCallStackSave;
1034
1035 protected:
1036 bool PerformCleanup;
1037
1038 private:
1039 RunCleanupsScope(const RunCleanupsScope &) = delete;
1040 void operator=(const RunCleanupsScope &) = delete;
1041
1042 protected:
1043 CodeGenFunction &CGF;
1044
1045 public:
1046 /// Enter a new cleanup scope.
1047 explicit RunCleanupsScope(CodeGenFunction &CGF)
1048 : DeactivateCleanups(CGF), PerformCleanup(true), CGF(CGF) {
1049 CleanupStackDepth = CGF.EHStack.stable_begin();
1050 LifetimeExtendedCleanupStackSize =
1051 CGF.LifetimeExtendedCleanupStack.size();
1052 OldDidCallStackSave = CGF.DidCallStackSave;
1053 CGF.DidCallStackSave = false;
1054 OldCleanupScopeDepth = CGF.CurrentCleanupScopeDepth;
1055 CGF.CurrentCleanupScopeDepth = CleanupStackDepth;
1056 }
1057
1058 /// Exit this cleanup scope, emitting any accumulated cleanups.
1059 ~RunCleanupsScope() {
1060 if (PerformCleanup)
1061 ForceCleanup();
1062 }
1063
1064 /// Determine whether this scope requires any cleanups.
1065 bool requiresCleanups() const {
1066 return CGF.EHStack.stable_begin() != CleanupStackDepth;
1067 }
1068
1069 /// Force the emission of cleanups now, instead of waiting
1070 /// until this object is destroyed.
1071 /// \param ValuesToReload - A list of values that need to be available at
1072 /// the insertion point after cleanup emission. If cleanup emission created
1073 /// a shared cleanup block, these value pointers will be rewritten.
1074 /// Otherwise, they not will be modified.
1075 void
1076 ForceCleanup(std::initializer_list<llvm::Value **> ValuesToReload = {}) {
1077 assert(PerformCleanup && "Already forced cleanup");
1078 CGF.DidCallStackSave = OldDidCallStackSave;
1079 DeactivateCleanups.ForceDeactivate();
1080 CGF.PopCleanupBlocks(OldCleanupStackSize: CleanupStackDepth, OldLifetimeExtendedStackSize: LifetimeExtendedCleanupStackSize,
1081 ValuesToReload);
1082 PerformCleanup = false;
1083 CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth;
1084 }
1085 };
1086
1087 // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
1088 EHScopeStack::stable_iterator CurrentCleanupScopeDepth =
1089 EHScopeStack::stable_end();
1090
1091 class LexicalScope : public RunCleanupsScope {
1092 SourceRange Range;
1093 SmallVector<const LabelDecl *, 4> Labels;
1094 LexicalScope *ParentScope;
1095
1096 LexicalScope(const LexicalScope &) = delete;
1097 void operator=(const LexicalScope &) = delete;
1098
1099 public:
1100 /// Enter a new cleanup scope.
1101 explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range);
1102
1103 void addLabel(const LabelDecl *label) {
1104 assert(PerformCleanup && "adding label to dead scope?");
1105 Labels.push_back(Elt: label);
1106 }
1107
1108 /// Exit this cleanup scope, emitting any accumulated
1109 /// cleanups.
1110 ~LexicalScope();
1111
1112 /// Force the emission of cleanups now, instead of waiting
1113 /// until this object is destroyed.
1114 void ForceCleanup() {
1115 CGF.CurLexicalScope = ParentScope;
1116 RunCleanupsScope::ForceCleanup();
1117
1118 if (!Labels.empty())
1119 rescopeLabels();
1120 }
1121
1122 bool hasLabels() const { return !Labels.empty(); }
1123
1124 void rescopeLabels();
1125 };
1126
1127 typedef llvm::DenseMap<const Decl *, Address> DeclMapTy;
1128
1129 /// The class used to assign some variables some temporarily addresses.
1130 class OMPMapVars {
1131 DeclMapTy SavedLocals;
1132 DeclMapTy SavedTempAddresses;
1133 OMPMapVars(const OMPMapVars &) = delete;
1134 void operator=(const OMPMapVars &) = delete;
1135
1136 public:
1137 explicit OMPMapVars() = default;
1138 ~OMPMapVars() {
1139 assert(SavedLocals.empty() && "Did not restored original addresses.");
1140 };
1141
1142 /// Sets the address of the variable \p LocalVD to be \p TempAddr in
1143 /// function \p CGF.
1144 /// \return true if at least one variable was set already, false otherwise.
1145 bool setVarAddr(CodeGenFunction &CGF, const VarDecl *LocalVD,
1146 Address TempAddr) {
1147 LocalVD = LocalVD->getCanonicalDecl();
1148 // Only save it once.
1149 if (SavedLocals.count(Val: LocalVD))
1150 return false;
1151
1152 // Copy the existing local entry to SavedLocals.
1153 auto it = CGF.LocalDeclMap.find(Val: LocalVD);
1154 if (it != CGF.LocalDeclMap.end())
1155 SavedLocals.try_emplace(Key: LocalVD, Args&: it->second);
1156 else
1157 SavedLocals.try_emplace(Key: LocalVD, Args: Address::invalid());
1158
1159 // Generate the private entry.
1160 QualType VarTy = LocalVD->getType();
1161 if (VarTy->isReferenceType()) {
1162 Address Temp = CGF.CreateMemTemp(T: VarTy);
1163 CGF.Builder.CreateStore(Val: TempAddr.emitRawPointer(CGF), Addr: Temp);
1164 TempAddr = Temp;
1165 }
1166 SavedTempAddresses.try_emplace(Key: LocalVD, Args&: TempAddr);
1167
1168 return true;
1169 }
1170
1171 /// Applies new addresses to the list of the variables.
1172 /// \return true if at least one variable is using new address, false
1173 /// otherwise.
1174 bool apply(CodeGenFunction &CGF) {
1175 copyInto(Src: SavedTempAddresses, Dest&: CGF.LocalDeclMap);
1176 SavedTempAddresses.clear();
1177 return !SavedLocals.empty();
1178 }
1179
1180 /// Restores original addresses of the variables.
1181 void restore(CodeGenFunction &CGF) {
1182 if (!SavedLocals.empty()) {
1183 copyInto(Src: SavedLocals, Dest&: CGF.LocalDeclMap);
1184 SavedLocals.clear();
1185 }
1186 }
1187
1188 private:
1189 /// Copy all the entries in the source map over the corresponding
1190 /// entries in the destination, which must exist.
1191 static void copyInto(const DeclMapTy &Src, DeclMapTy &Dest) {
1192 for (auto &[Decl, Addr] : Src) {
1193 if (!Addr.isValid())
1194 Dest.erase(Val: Decl);
1195 else
1196 Dest.insert_or_assign(Key: Decl, Val: Addr);
1197 }
1198 }
1199 };
1200
1201 /// The scope used to remap some variables as private in the OpenMP loop body
1202 /// (or other captured region emitted without outlining), and to restore old
1203 /// vars back on exit.
1204 class OMPPrivateScope : public RunCleanupsScope {
1205 OMPMapVars MappedVars;
1206 OMPPrivateScope(const OMPPrivateScope &) = delete;
1207 void operator=(const OMPPrivateScope &) = delete;
1208
1209 public:
1210 /// Enter a new OpenMP private scope.
1211 explicit OMPPrivateScope(CodeGenFunction &CGF) : RunCleanupsScope(CGF) {}
1212
1213 /// Registers \p LocalVD variable as a private with \p Addr as the address
1214 /// of the corresponding private variable. \p
1215 /// PrivateGen is the address of the generated private variable.
1216 /// \return true if the variable is registered as private, false if it has
1217 /// been privatized already.
1218 bool addPrivate(const VarDecl *LocalVD, Address Addr) {
1219 assert(PerformCleanup && "adding private to dead scope");
1220 return MappedVars.setVarAddr(CGF, LocalVD, TempAddr: Addr);
1221 }
1222
1223 /// Privatizes local variables previously registered as private.
1224 /// Registration is separate from the actual privatization to allow
1225 /// initializers use values of the original variables, not the private one.
1226 /// This is important, for example, if the private variable is a class
1227 /// variable initialized by a constructor that references other private
1228 /// variables. But at initialization original variables must be used, not
1229 /// private copies.
1230 /// \return true if at least one variable was privatized, false otherwise.
1231 bool Privatize() { return MappedVars.apply(CGF); }
1232
1233 void ForceCleanup() {
1234 RunCleanupsScope::ForceCleanup();
1235 restoreMap();
1236 }
1237
1238 /// Exit scope - all the mapped variables are restored.
1239 ~OMPPrivateScope() {
1240 if (PerformCleanup)
1241 ForceCleanup();
1242 }
1243
1244 /// Checks if the global variable is captured in current function.
1245 bool isGlobalVarCaptured(const VarDecl *VD) const {
1246 VD = VD->getCanonicalDecl();
1247 return !VD->isLocalVarDeclOrParm() && CGF.LocalDeclMap.count(Val: VD) > 0;
1248 }
1249
1250 /// Restore all mapped variables w/o clean up. This is usefully when we want
1251 /// to reference the original variables but don't want the clean up because
1252 /// that could emit lifetime end too early, causing backend issue #56913.
1253 void restoreMap() { MappedVars.restore(CGF); }
1254 };
1255
1256 /// Save/restore original map of previously emitted local vars in case when we
1257 /// need to duplicate emission of the same code several times in the same
1258 /// function for OpenMP code.
1259 class OMPLocalDeclMapRAII {
1260 CodeGenFunction &CGF;
1261 DeclMapTy SavedMap;
1262
1263 public:
1264 OMPLocalDeclMapRAII(CodeGenFunction &CGF)
1265 : CGF(CGF), SavedMap(CGF.LocalDeclMap) {}
1266 ~OMPLocalDeclMapRAII() { SavedMap.swap(RHS&: CGF.LocalDeclMap); }
1267 };
1268
1269 /// Takes the old cleanup stack size and emits the cleanup blocks
1270 /// that have been added.
1271 void
1272 PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
1273 std::initializer_list<llvm::Value **> ValuesToReload = {});
1274
1275 /// Takes the old cleanup stack size and emits the cleanup blocks
1276 /// that have been added, then adds all lifetime-extended cleanups from
1277 /// the given position to the stack.
1278 void
1279 PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
1280 size_t OldLifetimeExtendedStackSize,
1281 std::initializer_list<llvm::Value **> ValuesToReload = {});
1282
1283 void ResolveBranchFixups(llvm::BasicBlock *Target);
1284
1285 /// The given basic block lies in the current EH scope, but may be a
1286 /// target of a potentially scope-crossing jump; get a stable handle
1287 /// to which we can perform this jump later.
1288 JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) {
1289 return JumpDest(Target, EHStack.getInnermostNormalCleanup(),
1290 NextCleanupDestIndex++);
1291 }
1292
1293 /// The given basic block lies in the current EH scope, but may be a
1294 /// target of a potentially scope-crossing jump; get a stable handle
1295 /// to which we can perform this jump later.
1296 JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
1297 return getJumpDestInCurrentScope(Target: createBasicBlock(name: Name));
1298 }
1299
1300 /// EmitBranchThroughCleanup - Emit a branch from the current insert
1301 /// block through the normal cleanup handling code (if any) and then
1302 /// on to \arg Dest.
1303 void EmitBranchThroughCleanup(JumpDest Dest);
1304
1305 /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
1306 /// specified destination obviously has no cleanups to run. 'false' is always
1307 /// a conservatively correct answer for this method.
1308 bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const;
1309
1310 /// popCatchScope - Pops the catch scope at the top of the EHScope
1311 /// stack, emitting any required code (other than the catch handlers
1312 /// themselves).
1313 void popCatchScope();
1314
1315 llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
1316 llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
1317 llvm::BasicBlock *
1318 getFuncletEHDispatchBlock(EHScopeStack::stable_iterator scope);
1319
1320 /// An object to manage conditionally-evaluated expressions.
1321 class ConditionalEvaluation {
1322 llvm::BasicBlock *StartBB;
1323
1324 public:
1325 ConditionalEvaluation(CodeGenFunction &CGF)
1326 : StartBB(CGF.Builder.GetInsertBlock()) {}
1327
1328 void begin(CodeGenFunction &CGF) {
1329 assert(CGF.OutermostConditional != this);
1330 if (!CGF.OutermostConditional)
1331 CGF.OutermostConditional = this;
1332 }
1333
1334 void end(CodeGenFunction &CGF) {
1335 assert(CGF.OutermostConditional != nullptr);
1336 if (CGF.OutermostConditional == this)
1337 CGF.OutermostConditional = nullptr;
1338 }
1339
1340 /// Returns a block which will be executed prior to each
1341 /// evaluation of the conditional code.
1342 llvm::BasicBlock *getStartingBlock() const { return StartBB; }
1343 };
1344
1345 /// isInConditionalBranch - Return true if we're currently emitting
1346 /// one branch or the other of a conditional expression.
1347 bool isInConditionalBranch() const { return OutermostConditional != nullptr; }
1348
1349 void setBeforeOutermostConditional(llvm::Value *value, Address addr,
1350 CodeGenFunction &CGF) {
1351 assert(isInConditionalBranch());
1352 llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
1353 auto store = new llvm::StoreInst(value, addr.emitRawPointer(CGF),
1354 block->back().getIterator());
1355 store->setAlignment(addr.getAlignment().getAsAlign());
1356 }
1357
1358 /// An RAII object to record that we're evaluating a statement
1359 /// expression.
1360 class StmtExprEvaluation {
1361 CodeGenFunction &CGF;
1362
1363 /// We have to save the outermost conditional: cleanups in a
1364 /// statement expression aren't conditional just because the
1365 /// StmtExpr is.
1366 ConditionalEvaluation *SavedOutermostConditional;
1367
1368 public:
1369 StmtExprEvaluation(CodeGenFunction &CGF)
1370 : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
1371 CGF.OutermostConditional = nullptr;
1372 }
1373
1374 ~StmtExprEvaluation() {
1375 CGF.OutermostConditional = SavedOutermostConditional;
1376 CGF.EnsureInsertPoint();
1377 }
1378 };
1379
1380 /// An object which temporarily prevents a value from being
1381 /// destroyed by aggressive peephole optimizations that assume that
1382 /// all uses of a value have been realized in the IR.
1383 class PeepholeProtection {
1384 llvm::Instruction *Inst = nullptr;
1385 friend class CodeGenFunction;
1386
1387 public:
1388 PeepholeProtection() = default;
1389 };
1390
1391 /// A non-RAII class containing all the information about a bound
1392 /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
1393 /// this which makes individual mappings very simple; using this
1394 /// class directly is useful when you have a variable number of
1395 /// opaque values or don't want the RAII functionality for some
1396 /// reason.
1397 class OpaqueValueMappingData {
1398 const OpaqueValueExpr *OpaqueValue;
1399 bool BoundLValue;
1400 CodeGenFunction::PeepholeProtection Protection;
1401
1402 OpaqueValueMappingData(const OpaqueValueExpr *ov, bool boundLValue)
1403 : OpaqueValue(ov), BoundLValue(boundLValue) {}
1404
1405 public:
1406 OpaqueValueMappingData() : OpaqueValue(nullptr) {}
1407
1408 static bool shouldBindAsLValue(const Expr *expr) {
1409 // gl-values should be bound as l-values for obvious reasons.
1410 // Records should be bound as l-values because IR generation
1411 // always keeps them in memory. Expressions of function type
1412 // act exactly like l-values but are formally required to be
1413 // r-values in C.
1414 return expr->isGLValue() || expr->getType()->isFunctionType() ||
1415 hasAggregateEvaluationKind(T: expr->getType());
1416 }
1417
1418 static OpaqueValueMappingData
1419 bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const Expr *e) {
1420 if (shouldBindAsLValue(expr: ov))
1421 return bind(CGF, ov, lv: CGF.EmitLValue(E: e));
1422 return bind(CGF, ov, rv: CGF.EmitAnyExpr(E: e));
1423 }
1424
1425 static OpaqueValueMappingData
1426 bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const LValue &lv) {
1427 assert(shouldBindAsLValue(ov));
1428 CGF.OpaqueLValues.insert(KV: std::make_pair(x&: ov, y: lv));
1429 return OpaqueValueMappingData(ov, true);
1430 }
1431
1432 static OpaqueValueMappingData
1433 bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const RValue &rv) {
1434 assert(!shouldBindAsLValue(ov));
1435 CGF.OpaqueRValues.insert(KV: std::make_pair(x&: ov, y: rv));
1436
1437 OpaqueValueMappingData data(ov, false);
1438
1439 // Work around an extremely aggressive peephole optimization in
1440 // EmitScalarConversion which assumes that all other uses of a
1441 // value are extant.
1442 data.Protection = CGF.protectFromPeepholes(rvalue: rv);
1443
1444 return data;
1445 }
1446
1447 bool isValid() const { return OpaqueValue != nullptr; }
1448 void clear() { OpaqueValue = nullptr; }
1449
1450 void unbind(CodeGenFunction &CGF) {
1451 assert(OpaqueValue && "no data to unbind!");
1452
1453 if (BoundLValue) {
1454 CGF.OpaqueLValues.erase(Val: OpaqueValue);
1455 } else {
1456 CGF.OpaqueRValues.erase(Val: OpaqueValue);
1457 CGF.unprotectFromPeepholes(protection: Protection);
1458 }
1459 }
1460 };
1461
1462 /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
1463 class OpaqueValueMapping {
1464 CodeGenFunction &CGF;
1465 OpaqueValueMappingData Data;
1466
1467 public:
1468 static bool shouldBindAsLValue(const Expr *expr) {
1469 return OpaqueValueMappingData::shouldBindAsLValue(expr);
1470 }
1471
1472 /// Build the opaque value mapping for the given conditional
1473 /// operator if it's the GNU ?: extension. This is a common
1474 /// enough pattern that the convenience operator is really
1475 /// helpful.
1476 ///
1477 OpaqueValueMapping(CodeGenFunction &CGF,
1478 const AbstractConditionalOperator *op)
1479 : CGF(CGF) {
1480 if (isa<ConditionalOperator>(Val: op))
1481 // Leave Data empty.
1482 return;
1483
1484 const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(Val: op);
1485 Data = OpaqueValueMappingData::bind(CGF, ov: e->getOpaqueValue(),
1486 e: e->getCommon());
1487 }
1488
1489 /// Build the opaque value mapping for an OpaqueValueExpr whose source
1490 /// expression is set to the expression the OVE represents.
1491 OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *OV)
1492 : CGF(CGF) {
1493 if (OV) {
1494 assert(OV->getSourceExpr() && "wrong form of OpaqueValueMapping used "
1495 "for OVE with no source expression");
1496 Data = OpaqueValueMappingData::bind(CGF, ov: OV, e: OV->getSourceExpr());
1497 }
1498 }
1499
1500 OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *opaqueValue,
1501 LValue lvalue)
1502 : CGF(CGF),
1503 Data(OpaqueValueMappingData::bind(CGF, ov: opaqueValue, lv: lvalue)) {}
1504
1505 OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *opaqueValue,
1506 RValue rvalue)
1507 : CGF(CGF),
1508 Data(OpaqueValueMappingData::bind(CGF, ov: opaqueValue, rv: rvalue)) {}
1509
1510 void pop() {
1511 Data.unbind(CGF);
1512 Data.clear();
1513 }
1514
1515 ~OpaqueValueMapping() {
1516 if (Data.isValid())
1517 Data.unbind(CGF);
1518 }
1519 };
1520
1521private:
1522 CGDebugInfo *DebugInfo;
1523 /// Used to create unique names for artificial VLA size debug info variables.
1524 unsigned VLAExprCounter = 0;
1525 bool DisableDebugInfo = false;
1526
1527 /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
1528 /// calling llvm.stacksave for multiple VLAs in the same scope.
1529 bool DidCallStackSave = false;
1530
1531 /// IndirectBranch - The first time an indirect goto is seen we create a block
1532 /// with an indirect branch. Every time we see the address of a label taken,
1533 /// we add the label to the indirect goto. Every subsequent indirect goto is
1534 /// codegen'd as a jump to the IndirectBranch's basic block.
1535 llvm::IndirectBrInst *IndirectBranch = nullptr;
1536
1537 /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
1538 /// decls.
1539 DeclMapTy LocalDeclMap;
1540
1541 // Keep track of the cleanups for callee-destructed parameters pushed to the
1542 // cleanup stack so that they can be deactivated later.
1543 llvm::DenseMap<const ParmVarDecl *, EHScopeStack::stable_iterator>
1544 CalleeDestructedParamCleanups;
1545
1546 /// SizeArguments - If a ParmVarDecl had the pass_object_size attribute, this
1547 /// will contain a mapping from said ParmVarDecl to its implicit "object_size"
1548 /// parameter.
1549 llvm::SmallDenseMap<const ParmVarDecl *, const ImplicitParamDecl *, 2>
1550 SizeArguments;
1551
1552 /// Track escaped local variables with auto storage. Used during SEH
1553 /// outlining to produce a call to llvm.localescape.
1554 llvm::DenseMap<llvm::AllocaInst *, int> EscapedLocals;
1555
1556 /// LabelMap - This keeps track of the LLVM basic block for each C label.
1557 llvm::DenseMap<const LabelDecl *, JumpDest> LabelMap;
1558
1559 // BreakContinueStack - This keeps track of where break and continue
1560 // statements should jump to.
1561 struct BreakContinue {
1562 BreakContinue(const Stmt &LoopOrSwitch, JumpDest Break, JumpDest Continue)
1563 : LoopOrSwitch(&LoopOrSwitch), BreakBlock(Break),
1564 ContinueBlock(Continue) {}
1565
1566 const Stmt *LoopOrSwitch;
1567 JumpDest BreakBlock;
1568 JumpDest ContinueBlock;
1569 };
1570 SmallVector<BreakContinue, 8> BreakContinueStack;
1571
1572 /// Handles cancellation exit points in OpenMP-related constructs.
1573 class OpenMPCancelExitStack {
1574 /// Tracks cancellation exit point and join point for cancel-related exit
1575 /// and normal exit.
1576 struct CancelExit {
1577 CancelExit() = default;
1578 CancelExit(OpenMPDirectiveKind Kind, JumpDest ExitBlock,
1579 JumpDest ContBlock)
1580 : Kind(Kind), ExitBlock(ExitBlock), ContBlock(ContBlock) {}
1581 OpenMPDirectiveKind Kind = llvm::omp::OMPD_unknown;
1582 /// true if the exit block has been emitted already by the special
1583 /// emitExit() call, false if the default codegen is used.
1584 bool HasBeenEmitted = false;
1585 JumpDest ExitBlock;
1586 JumpDest ContBlock;
1587 };
1588
1589 SmallVector<CancelExit, 8> Stack;
1590
1591 public:
1592 OpenMPCancelExitStack() : Stack(1) {}
1593 ~OpenMPCancelExitStack() = default;
1594 /// Fetches the exit block for the current OpenMP construct.
1595 JumpDest getExitBlock() const { return Stack.back().ExitBlock; }
1596 /// Emits exit block with special codegen procedure specific for the related
1597 /// OpenMP construct + emits code for normal construct cleanup.
1598 void emitExit(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
1599 const llvm::function_ref<void(CodeGenFunction &)> CodeGen) {
1600 if (Stack.back().Kind == Kind && getExitBlock().isValid()) {
1601 assert(CGF.getOMPCancelDestination(Kind).isValid());
1602 assert(CGF.HaveInsertPoint());
1603 assert(!Stack.back().HasBeenEmitted);
1604 auto IP = CGF.Builder.saveAndClearIP();
1605 CGF.EmitBlock(BB: Stack.back().ExitBlock.getBlock());
1606 CodeGen(CGF);
1607 CGF.EmitBranch(Block: Stack.back().ContBlock.getBlock());
1608 CGF.Builder.restoreIP(IP);
1609 Stack.back().HasBeenEmitted = true;
1610 }
1611 CodeGen(CGF);
1612 }
1613 /// Enter the cancel supporting \a Kind construct.
1614 /// \param Kind OpenMP directive that supports cancel constructs.
1615 /// \param HasCancel true, if the construct has inner cancel directive,
1616 /// false otherwise.
1617 void enter(CodeGenFunction &CGF, OpenMPDirectiveKind Kind, bool HasCancel) {
1618 Stack.push_back(Elt: {Kind,
1619 HasCancel ? CGF.getJumpDestInCurrentScope(Name: "cancel.exit")
1620 : JumpDest(),
1621 HasCancel ? CGF.getJumpDestInCurrentScope(Name: "cancel.cont")
1622 : JumpDest()});
1623 }
1624 /// Emits default exit point for the cancel construct (if the special one
1625 /// has not be used) + join point for cancel/normal exits.
1626 void exit(CodeGenFunction &CGF) {
1627 if (getExitBlock().isValid()) {
1628 assert(CGF.getOMPCancelDestination(Stack.back().Kind).isValid());
1629 bool HaveIP = CGF.HaveInsertPoint();
1630 if (!Stack.back().HasBeenEmitted) {
1631 if (HaveIP)
1632 CGF.EmitBranchThroughCleanup(Dest: Stack.back().ContBlock);
1633 CGF.EmitBlock(BB: Stack.back().ExitBlock.getBlock());
1634 CGF.EmitBranchThroughCleanup(Dest: Stack.back().ContBlock);
1635 }
1636 CGF.EmitBlock(BB: Stack.back().ContBlock.getBlock());
1637 if (!HaveIP) {
1638 CGF.Builder.CreateUnreachable();
1639 CGF.Builder.ClearInsertionPoint();
1640 }
1641 }
1642 Stack.pop_back();
1643 }
1644 };
1645 OpenMPCancelExitStack OMPCancelStack;
1646
1647 /// Lower the Likelihood knowledge about the \p Cond via llvm.expect intrin.
1648 llvm::Value *emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
1649 Stmt::Likelihood LH);
1650
1651 std::unique_ptr<CodeGenPGO> PGO;
1652
1653 /// Calculate branch weights appropriate for PGO data
1654 llvm::MDNode *createProfileWeights(uint64_t TrueCount,
1655 uint64_t FalseCount) const;
1656 llvm::MDNode *createProfileWeights(ArrayRef<uint64_t> Weights) const;
1657 llvm::MDNode *createProfileWeightsForLoop(const Stmt *Cond,
1658 uint64_t LoopCount) const;
1659
1660public:
1661 bool hasSkipCounter(const Stmt *S) const;
1662
1663 void markStmtAsUsed(bool Skipped, const Stmt *S);
1664 void markStmtMaybeUsed(const Stmt *S);
1665
1666 /// Used to specify which counter in a pair shall be incremented.
1667 /// For non-binary counters, a skip counter is derived as (Parent - Exec).
1668 /// In contrast for binary counters, a skip counter cannot be computed from
1669 /// the Parent counter. In such cases, dedicated SkipPath counters must be
1670 /// allocated and marked (incremented as binary counters). (Parent can be
1671 /// synthesized with (Exec + Skip) in simple cases)
1672 enum CounterForIncrement {
1673 UseExecPath = 0, ///< Exec (true)
1674 UseSkipPath, ///< Skip (false)
1675 };
1676
1677 /// Increment the profiler's counter for the given statement by \p StepV.
1678 /// If \p StepV is null, the default increment is 1.
1679 void incrementProfileCounter(const Stmt *S, llvm::Value *StepV = nullptr) {
1680 incrementProfileCounter(ExecSkip: UseExecPath, S, UseBoth: false, StepV);
1681 }
1682
1683 /// Emit increment of Counter.
1684 /// \param ExecSkip Use `Skipped` Counter if UseSkipPath is specified.
1685 /// \param S The Stmt that Counter is associated.
1686 /// \param UseBoth Mark both Exec/Skip as used. (for verification)
1687 /// \param StepV The offset Value for adding to Counter.
1688 void incrementProfileCounter(CounterForIncrement ExecSkip, const Stmt *S,
1689 bool UseBoth = false,
1690 llvm::Value *StepV = nullptr);
1691
1692 bool isMCDCCoverageEnabled() const {
1693 return (CGM.getCodeGenOpts().hasProfileClangInstr() &&
1694 CGM.getCodeGenOpts().MCDCCoverage &&
1695 !CurFn->hasFnAttribute(Kind: llvm::Attribute::NoProfile));
1696 }
1697
1698 /// Allocate a temp value on the stack that MCDC can use to track condition
1699 /// results.
1700 void maybeCreateMCDCCondBitmap();
1701
1702 bool isBinaryLogicalOp(const Expr *E) const {
1703 const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val: E->IgnoreParens());
1704 return (BOp && BOp->isLogicalOp());
1705 }
1706
1707 bool isMCDCDecisionExpr(const Expr *E) const;
1708 bool isMCDCBranchExpr(const Expr *E) const;
1709
1710 /// Zero-init the MCDC temp value.
1711 void maybeResetMCDCCondBitmap(const Expr *E);
1712
1713 /// Increment the profiler's counter for the given expression by \p StepV.
1714 /// If \p StepV is null, the default increment is 1.
1715 void maybeUpdateMCDCTestVectorBitmap(const Expr *E);
1716
1717 /// Update the MCDC temp value with the condition's evaluated result.
1718 void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val);
1719
1720 /// Get the profiler's count for the given statement.
1721 uint64_t getProfileCount(const Stmt *S);
1722
1723 /// Set the profiler's current count.
1724 void setCurrentProfileCount(uint64_t Count);
1725
1726 /// Get the profiler's current count. This is generally the count for the most
1727 /// recently incremented counter.
1728 uint64_t getCurrentProfileCount();
1729
1730 /// See CGDebugInfo::addInstToCurrentSourceAtom.
1731 void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction,
1732 llvm::Value *Backup);
1733
1734 /// See CGDebugInfo::addInstToSpecificSourceAtom.
1735 void addInstToSpecificSourceAtom(llvm::Instruction *KeyInstruction,
1736 llvm::Value *Backup, uint64_t Atom);
1737
1738 /// Add \p KeyInstruction and an optional \p Backup instruction to a new atom
1739 /// group (See ApplyAtomGroup for more info).
1740 void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction,
1741 llvm::Value *Backup);
1742
1743 /// Copy all PFP fields from SrcPtr to DestPtr while updating signatures,
1744 /// assuming that DestPtr was already memcpy'd from SrcPtr.
1745 void emitPFPPostCopyUpdates(Address DestPtr, Address SrcPtr, QualType Ty);
1746
1747private:
1748 /// SwitchInsn - This is nearest current switch instruction. It is null if
1749 /// current context is not in a switch.
1750 llvm::SwitchInst *SwitchInsn = nullptr;
1751 /// The branch weights of SwitchInsn when doing instrumentation based PGO.
1752 SmallVector<uint64_t, 16> *SwitchWeights = nullptr;
1753
1754 /// The likelihood attributes of the SwitchCase.
1755 SmallVector<Stmt::Likelihood, 16> *SwitchLikelihood = nullptr;
1756
1757 /// CaseRangeBlock - This block holds if condition check for last case
1758 /// statement range in current switch instruction.
1759 llvm::BasicBlock *CaseRangeBlock = nullptr;
1760
1761 /// OpaqueLValues - Keeps track of the current set of opaque value
1762 /// expressions.
1763 llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
1764 llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
1765
1766 // VLASizeMap - This keeps track of the associated size for each VLA type.
1767 // We track this by the size expression rather than the type itself because
1768 // in certain situations, like a const qualifier applied to an VLA typedef,
1769 // multiple VLA types can share the same size expression.
1770 // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
1771 // enter/leave scopes.
1772 llvm::DenseMap<const Expr *, llvm::Value *> VLASizeMap;
1773
1774 /// A block containing a single 'unreachable' instruction. Created
1775 /// lazily by getUnreachableBlock().
1776 llvm::BasicBlock *UnreachableBlock = nullptr;
1777
1778 /// Counts of the number return expressions in the function.
1779 unsigned NumReturnExprs = 0;
1780
1781 /// Count the number of simple (constant) return expressions in the function.
1782 unsigned NumSimpleReturnExprs = 0;
1783
1784 /// The last regular (non-return) debug location (breakpoint) in the function.
1785 SourceLocation LastStopPoint;
1786
1787public:
1788 /// Source location information about the default argument or member
1789 /// initializer expression we're evaluating, if any.
1790 CurrentSourceLocExprScope CurSourceLocExprScope;
1791 using SourceLocExprScopeGuard =
1792 CurrentSourceLocExprScope::SourceLocExprScopeGuard;
1793
1794 /// A scope within which we are constructing the fields of an object which
1795 /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use
1796 /// if we need to evaluate a CXXDefaultInitExpr within the evaluation.
1797 class FieldConstructionScope {
1798 public:
1799 FieldConstructionScope(CodeGenFunction &CGF, Address This)
1800 : CGF(CGF), OldCXXDefaultInitExprThis(CGF.CXXDefaultInitExprThis) {
1801 CGF.CXXDefaultInitExprThis = This;
1802 }
1803 ~FieldConstructionScope() {
1804 CGF.CXXDefaultInitExprThis = OldCXXDefaultInitExprThis;
1805 }
1806
1807 private:
1808 CodeGenFunction &CGF;
1809 Address OldCXXDefaultInitExprThis;
1810 };
1811
1812 /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
1813 /// is overridden to be the object under construction.
1814 class CXXDefaultInitExprScope {
1815 public:
1816 CXXDefaultInitExprScope(CodeGenFunction &CGF, const CXXDefaultInitExpr *E)
1817 : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue),
1818 OldCXXThisAlignment(CGF.CXXThisAlignment),
1819 SourceLocScope(E, CGF.CurSourceLocExprScope) {
1820 CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getBasePointer();
1821 CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment();
1822 }
1823 ~CXXDefaultInitExprScope() {
1824 CGF.CXXThisValue = OldCXXThisValue;
1825 CGF.CXXThisAlignment = OldCXXThisAlignment;
1826 }
1827
1828 public:
1829 CodeGenFunction &CGF;
1830 llvm::Value *OldCXXThisValue;
1831 CharUnits OldCXXThisAlignment;
1832 SourceLocExprScopeGuard SourceLocScope;
1833 };
1834
1835 struct CXXDefaultArgExprScope : SourceLocExprScopeGuard {
1836 CXXDefaultArgExprScope(CodeGenFunction &CGF, const CXXDefaultArgExpr *E)
1837 : SourceLocExprScopeGuard(E, CGF.CurSourceLocExprScope) {}
1838 };
1839
1840 /// The scope of an ArrayInitLoopExpr. Within this scope, the value of the
1841 /// current loop index is overridden.
1842 class ArrayInitLoopExprScope {
1843 public:
1844 ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
1845 : CGF(CGF), OldArrayInitIndex(CGF.ArrayInitIndex) {
1846 CGF.ArrayInitIndex = Index;
1847 }
1848 ~ArrayInitLoopExprScope() { CGF.ArrayInitIndex = OldArrayInitIndex; }
1849
1850 private:
1851 CodeGenFunction &CGF;
1852 llvm::Value *OldArrayInitIndex;
1853 };
1854
1855 class InlinedInheritingConstructorScope {
1856 public:
1857 InlinedInheritingConstructorScope(CodeGenFunction &CGF, GlobalDecl GD)
1858 : CGF(CGF), OldCurGD(CGF.CurGD), OldCurFuncDecl(CGF.CurFuncDecl),
1859 OldCurCodeDecl(CGF.CurCodeDecl),
1860 OldCXXABIThisDecl(CGF.CXXABIThisDecl),
1861 OldCXXABIThisValue(CGF.CXXABIThisValue),
1862 OldCXXThisValue(CGF.CXXThisValue),
1863 OldCXXABIThisAlignment(CGF.CXXABIThisAlignment),
1864 OldCXXThisAlignment(CGF.CXXThisAlignment),
1865 OldReturnValue(CGF.ReturnValue), OldFnRetTy(CGF.FnRetTy),
1866 OldCXXInheritedCtorInitExprArgs(
1867 std::move(CGF.CXXInheritedCtorInitExprArgs)) {
1868 CGF.CurGD = GD;
1869 CGF.CurFuncDecl = CGF.CurCodeDecl =
1870 cast<CXXConstructorDecl>(Val: GD.getDecl());
1871 CGF.CXXABIThisDecl = nullptr;
1872 CGF.CXXABIThisValue = nullptr;
1873 CGF.CXXThisValue = nullptr;
1874 CGF.CXXABIThisAlignment = CharUnits();
1875 CGF.CXXThisAlignment = CharUnits();
1876 CGF.ReturnValue = Address::invalid();
1877 CGF.FnRetTy = QualType();
1878 CGF.CXXInheritedCtorInitExprArgs.clear();
1879 }
1880 ~InlinedInheritingConstructorScope() {
1881 CGF.CurGD = OldCurGD;
1882 CGF.CurFuncDecl = OldCurFuncDecl;
1883 CGF.CurCodeDecl = OldCurCodeDecl;
1884 CGF.CXXABIThisDecl = OldCXXABIThisDecl;
1885 CGF.CXXABIThisValue = OldCXXABIThisValue;
1886 CGF.CXXThisValue = OldCXXThisValue;
1887 CGF.CXXABIThisAlignment = OldCXXABIThisAlignment;
1888 CGF.CXXThisAlignment = OldCXXThisAlignment;
1889 CGF.ReturnValue = OldReturnValue;
1890 CGF.FnRetTy = OldFnRetTy;
1891 CGF.CXXInheritedCtorInitExprArgs =
1892 std::move(OldCXXInheritedCtorInitExprArgs);
1893 }
1894
1895 private:
1896 CodeGenFunction &CGF;
1897 GlobalDecl OldCurGD;
1898 const Decl *OldCurFuncDecl;
1899 const Decl *OldCurCodeDecl;
1900 ImplicitParamDecl *OldCXXABIThisDecl;
1901 llvm::Value *OldCXXABIThisValue;
1902 llvm::Value *OldCXXThisValue;
1903 CharUnits OldCXXABIThisAlignment;
1904 CharUnits OldCXXThisAlignment;
1905 Address OldReturnValue;
1906 QualType OldFnRetTy;
1907 CallArgList OldCXXInheritedCtorInitExprArgs;
1908 };
1909
1910 // Helper class for the OpenMP IR Builder. Allows reusability of code used for
1911 // region body, and finalization codegen callbacks. This will class will also
1912 // contain privatization functions used by the privatization call backs
1913 //
1914 // TODO: this is temporary class for things that are being moved out of
1915 // CGOpenMPRuntime, new versions of current CodeGenFunction methods, or
1916 // utility function for use with the OMPBuilder. Once that move to use the
1917 // OMPBuilder is done, everything here will either become part of CodeGenFunc.
1918 // directly, or a new helper class that will contain functions used by both
1919 // this and the OMPBuilder
1920
1921 struct OMPBuilderCBHelpers {
1922
1923 OMPBuilderCBHelpers() = delete;
1924 OMPBuilderCBHelpers(const OMPBuilderCBHelpers &) = delete;
1925 OMPBuilderCBHelpers &operator=(const OMPBuilderCBHelpers &) = delete;
1926
1927 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
1928
1929 /// Cleanup action for allocate support.
1930 class OMPAllocateCleanupTy final : public EHScopeStack::Cleanup {
1931
1932 private:
1933 llvm::CallInst *RTLFnCI;
1934
1935 public:
1936 OMPAllocateCleanupTy(llvm::CallInst *RLFnCI) : RTLFnCI(RLFnCI) {
1937 RLFnCI->removeFromParent();
1938 }
1939
1940 void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
1941 if (!CGF.HaveInsertPoint())
1942 return;
1943 CGF.Builder.Insert(I: RTLFnCI);
1944 }
1945 };
1946
1947 /// Returns address of the threadprivate variable for the current
1948 /// thread. This Also create any necessary OMP runtime calls.
1949 ///
1950 /// \param VD VarDecl for Threadprivate variable.
1951 /// \param VDAddr Address of the Vardecl
1952 /// \param Loc The location where the barrier directive was encountered
1953 static Address getAddrOfThreadPrivate(CodeGenFunction &CGF,
1954 const VarDecl *VD, Address VDAddr,
1955 SourceLocation Loc);
1956
1957 /// Gets the OpenMP-specific address of the local variable /p VD.
1958 static Address getAddressOfLocalVariable(CodeGenFunction &CGF,
1959 const VarDecl *VD);
1960 /// Get the platform-specific name separator.
1961 /// \param Parts different parts of the final name that needs separation
1962 /// \param FirstSeparator First separator used between the initial two
1963 /// parts of the name.
1964 /// \param Separator separator used between all of the rest consecutinve
1965 /// parts of the name
1966 static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
1967 StringRef FirstSeparator = ".",
1968 StringRef Separator = ".");
1969 /// Emit the Finalization for an OMP region
1970 /// \param CGF The Codegen function this belongs to
1971 /// \param IP Insertion point for generating the finalization code.
1972 static void FinalizeOMPRegion(CodeGenFunction &CGF, InsertPointTy IP) {
1973 CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1974 assert(IP.getBlock()->end() != IP.getPoint() &&
1975 "OpenMP IR Builder should cause terminated block!");
1976
1977 llvm::BasicBlock *IPBB = IP.getBlock();
1978 llvm::BasicBlock *DestBB = IPBB->getUniqueSuccessor();
1979 assert(DestBB && "Finalization block should have one successor!");
1980
1981 // erase and replace with cleanup branch.
1982 IPBB->getTerminator()->eraseFromParent();
1983 CGF.Builder.SetInsertPoint(IPBB);
1984 CodeGenFunction::JumpDest Dest = CGF.getJumpDestInCurrentScope(Target: DestBB);
1985 CGF.EmitBranchThroughCleanup(Dest);
1986 }
1987
1988 /// Emit the body of an OMP region
1989 /// \param CGF The Codegen function this belongs to
1990 /// \param RegionBodyStmt The body statement for the OpenMP region being
1991 /// generated
1992 /// \param AllocaIP Where to insert alloca instructions
1993 /// \param CodeGenIP Where to insert the region code
1994 /// \param RegionName Name to be used for new blocks
1995 static void EmitOMPInlinedRegionBody(CodeGenFunction &CGF,
1996 const Stmt *RegionBodyStmt,
1997 InsertPointTy AllocaIP,
1998 InsertPointTy CodeGenIP,
1999 Twine RegionName);
2000
2001 static void EmitCaptureStmt(CodeGenFunction &CGF, InsertPointTy CodeGenIP,
2002 llvm::BasicBlock &FiniBB, llvm::Function *Fn,
2003 ArrayRef<llvm::Value *> Args) {
2004 llvm::BasicBlock *CodeGenIPBB = CodeGenIP.getBlock();
2005 if (llvm::Instruction *CodeGenIPBBTI = CodeGenIPBB->getTerminator())
2006 CodeGenIPBBTI->eraseFromParent();
2007
2008 CGF.Builder.SetInsertPoint(CodeGenIPBB);
2009
2010 if (Fn->doesNotThrow())
2011 CGF.EmitNounwindRuntimeCall(callee: Fn, args: Args);
2012 else
2013 CGF.EmitRuntimeCall(callee: Fn, args: Args);
2014
2015 if (CGF.Builder.saveIP().isSet())
2016 CGF.Builder.CreateBr(Dest: &FiniBB);
2017 }
2018
2019 /// Emit the body of an OMP region that will be outlined in
2020 /// OpenMPIRBuilder::finalize().
2021 /// \param CGF The Codegen function this belongs to
2022 /// \param RegionBodyStmt The body statement for the OpenMP region being
2023 /// generated
2024 /// \param AllocaIP Where to insert alloca instructions
2025 /// \param CodeGenIP Where to insert the region code
2026 /// \param RegionName Name to be used for new blocks
2027 static void EmitOMPOutlinedRegionBody(CodeGenFunction &CGF,
2028 const Stmt *RegionBodyStmt,
2029 InsertPointTy AllocaIP,
2030 InsertPointTy CodeGenIP,
2031 Twine RegionName);
2032
2033 /// RAII for preserving necessary info during Outlined region body codegen.
2034 class OutlinedRegionBodyRAII {
2035
2036 llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
2037 CodeGenFunction::JumpDest OldReturnBlock;
2038 CodeGenFunction &CGF;
2039
2040 public:
2041 OutlinedRegionBodyRAII(CodeGenFunction &cgf, InsertPointTy &AllocaIP,
2042 llvm::BasicBlock &RetBB)
2043 : CGF(cgf) {
2044 assert(AllocaIP.isSet() &&
2045 "Must specify Insertion point for allocas of outlined function");
2046 OldAllocaIP = CGF.AllocaInsertPt;
2047 CGF.AllocaInsertPt = &*AllocaIP.getPoint();
2048
2049 OldReturnBlock = CGF.ReturnBlock;
2050 CGF.ReturnBlock = CGF.getJumpDestInCurrentScope(Target: &RetBB);
2051 }
2052
2053 ~OutlinedRegionBodyRAII() {
2054 CGF.AllocaInsertPt = OldAllocaIP;
2055 CGF.ReturnBlock = OldReturnBlock;
2056 }
2057 };
2058
2059 /// RAII for preserving necessary info during inlined region body codegen.
2060 class InlinedRegionBodyRAII {
2061
2062 llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
2063 CodeGenFunction &CGF;
2064
2065 public:
2066 InlinedRegionBodyRAII(CodeGenFunction &cgf, InsertPointTy &AllocaIP,
2067 llvm::BasicBlock &FiniBB)
2068 : CGF(cgf) {
2069 // Alloca insertion block should be in the entry block of the containing
2070 // function so it expects an empty AllocaIP in which case will reuse the
2071 // old alloca insertion point, or a new AllocaIP in the same block as
2072 // the old one
2073 assert((!AllocaIP.isSet() ||
2074 CGF.AllocaInsertPt->getParent() == AllocaIP.getBlock()) &&
2075 "Insertion point should be in the entry block of containing "
2076 "function!");
2077 OldAllocaIP = CGF.AllocaInsertPt;
2078 if (AllocaIP.isSet())
2079 CGF.AllocaInsertPt = &*AllocaIP.getPoint();
2080
2081 // TODO: Remove the call, after making sure the counter is not used by
2082 // the EHStack.
2083 // Since this is an inlined region, it should not modify the
2084 // ReturnBlock, and should reuse the one for the enclosing outlined
2085 // region. So, the JumpDest being return by the function is discarded
2086 (void)CGF.getJumpDestInCurrentScope(Target: &FiniBB);
2087 }
2088
2089 ~InlinedRegionBodyRAII() { CGF.AllocaInsertPt = OldAllocaIP; }
2090 };
2091 };
2092
2093private:
2094 /// CXXThisDecl - When generating code for a C++ member function,
2095 /// this will hold the implicit 'this' declaration.
2096 ImplicitParamDecl *CXXABIThisDecl = nullptr;
2097 llvm::Value *CXXABIThisValue = nullptr;
2098 llvm::Value *CXXThisValue = nullptr;
2099 CharUnits CXXABIThisAlignment;
2100 CharUnits CXXThisAlignment;
2101
2102 /// The value of 'this' to use when evaluating CXXDefaultInitExprs within
2103 /// this expression.
2104 Address CXXDefaultInitExprThis = Address::invalid();
2105
2106 /// The current array initialization index when evaluating an
2107 /// ArrayInitIndexExpr within an ArrayInitLoopExpr.
2108 llvm::Value *ArrayInitIndex = nullptr;
2109
2110 /// The values of function arguments to use when evaluating
2111 /// CXXInheritedCtorInitExprs within this context.
2112 CallArgList CXXInheritedCtorInitExprArgs;
2113
2114 /// CXXStructorImplicitParamDecl - When generating code for a constructor or
2115 /// destructor, this will hold the implicit argument (e.g. VTT).
2116 ImplicitParamDecl *CXXStructorImplicitParamDecl = nullptr;
2117 llvm::Value *CXXStructorImplicitParamValue = nullptr;
2118
2119 /// OutermostConditional - Points to the outermost active
2120 /// conditional control. This is used so that we know if a
2121 /// temporary should be destroyed conditionally.
2122 ConditionalEvaluation *OutermostConditional = nullptr;
2123
2124 /// The current lexical scope.
2125 LexicalScope *CurLexicalScope = nullptr;
2126
2127 /// The current source location that should be used for exception
2128 /// handling code.
2129 SourceLocation CurEHLocation;
2130
2131 /// BlockByrefInfos - For each __block variable, contains
2132 /// information about the layout of the variable.
2133 llvm::DenseMap<const ValueDecl *, BlockByrefInfo> BlockByrefInfos;
2134
2135 /// Used by -fsanitize=nullability-return to determine whether the return
2136 /// value can be checked.
2137 llvm::Value *RetValNullabilityPrecondition = nullptr;
2138
2139 /// Check if -fsanitize=nullability-return instrumentation is required for
2140 /// this function.
2141 bool requiresReturnValueNullabilityCheck() const {
2142 return RetValNullabilityPrecondition;
2143 }
2144
2145 /// Used to store precise source locations for return statements by the
2146 /// runtime return value checks.
2147 Address ReturnLocation = Address::invalid();
2148
2149 /// Check if the return value of this function requires sanitization.
2150 bool requiresReturnValueCheck() const;
2151
2152 bool isInAllocaArgument(CGCXXABI &ABI, QualType Ty);
2153 bool hasInAllocaArg(const CXXMethodDecl *MD);
2154
2155 llvm::BasicBlock *TerminateLandingPad = nullptr;
2156 llvm::BasicBlock *TerminateHandler = nullptr;
2157 llvm::SmallVector<llvm::BasicBlock *, 2> TrapBBs;
2158
2159 /// Terminate funclets keyed by parent funclet pad.
2160 llvm::MapVector<llvm::Value *, llvm::BasicBlock *> TerminateFunclets;
2161
2162 /// Largest vector width used in ths function. Will be used to create a
2163 /// function attribute.
2164 unsigned LargestVectorWidth = 0;
2165
2166 /// True if we need emit the life-time markers. This is initially set in
2167 /// the constructor, but could be overwritten to true if this is a coroutine.
2168 bool ShouldEmitLifetimeMarkers;
2169
2170 /// Add OpenCL kernel arg metadata and the kernel attribute metadata to
2171 /// the function metadata.
2172 void EmitKernelMetadata(const FunctionDecl *FD, llvm::Function *Fn);
2173
2174public:
2175 CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext = false);
2176 ~CodeGenFunction();
2177
2178 CodeGenTypes &getTypes() const { return CGM.getTypes(); }
2179 ASTContext &getContext() const { return CGM.getContext(); }
2180 CGDebugInfo *getDebugInfo() {
2181 if (DisableDebugInfo)
2182 return nullptr;
2183 return DebugInfo;
2184 }
2185 void disableDebugInfo() { DisableDebugInfo = true; }
2186 void enableDebugInfo() { DisableDebugInfo = false; }
2187
2188 bool shouldUseFusedARCCalls() {
2189 return CGM.getCodeGenOpts().OptimizationLevel == 0;
2190 }
2191
2192 const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
2193
2194 /// Returns a pointer to the function's exception object and selector slot,
2195 /// which is assigned in every landing pad.
2196 Address getExceptionSlot();
2197 Address getEHSelectorSlot();
2198
2199 /// Returns the contents of the function's exception object and selector
2200 /// slots.
2201 llvm::Value *getExceptionFromSlot();
2202 llvm::Value *getSelectorFromSlot();
2203
2204 RawAddress getNormalCleanupDestSlot();
2205
2206 llvm::BasicBlock *getUnreachableBlock() {
2207 if (!UnreachableBlock) {
2208 UnreachableBlock = createBasicBlock(name: "unreachable");
2209 new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
2210 }
2211 return UnreachableBlock;
2212 }
2213
2214 llvm::BasicBlock *getInvokeDest() {
2215 if (!EHStack.requiresLandingPad())
2216 return nullptr;
2217 return getInvokeDestImpl();
2218 }
2219
2220 bool currentFunctionUsesSEHTry() const { return !!CurSEHParent; }
2221
2222 const TargetInfo &getTarget() const { return Target; }
2223 llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
2224 const TargetCodeGenInfo &getTargetHooks() const {
2225 return CGM.getTargetCodeGenInfo();
2226 }
2227
2228 //===--------------------------------------------------------------------===//
2229 // Cleanups
2230 //===--------------------------------------------------------------------===//
2231
2232 typedef void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty);
2233
2234 void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
2235 Address arrayEndPointer,
2236 QualType elementType,
2237 CharUnits elementAlignment,
2238 Destroyer *destroyer);
2239 void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
2240 llvm::Value *arrayEnd,
2241 QualType elementType,
2242 CharUnits elementAlignment,
2243 Destroyer *destroyer);
2244
2245 void pushDestroy(QualType::DestructionKind dtorKind, Address addr,
2246 QualType type);
2247 void pushEHDestroy(QualType::DestructionKind dtorKind, Address addr,
2248 QualType type);
2249 void pushDestroy(CleanupKind kind, Address addr, QualType type,
2250 Destroyer *destroyer, bool useEHCleanupForArray);
2251 void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind,
2252 Address addr, QualType type);
2253 void pushDestroyAndDeferDeactivation(CleanupKind cleanupKind, Address addr,
2254 QualType type, Destroyer *destroyer,
2255 bool useEHCleanupForArray);
2256 void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr,
2257 QualType type, Destroyer *destroyer,
2258 bool useEHCleanupForArray);
2259 void pushLifetimeExtendedDestroy(QualType::DestructionKind dtorKind,
2260 Address addr, QualType type);
2261 void pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
2262 llvm::Value *CompletePtr,
2263 QualType ElementType);
2264 void pushStackRestore(CleanupKind kind, Address SPMem);
2265 void pushKmpcAllocFree(CleanupKind Kind,
2266 std::pair<llvm::Value *, llvm::Value *> AddrSizePair);
2267 void emitDestroy(Address addr, QualType type, Destroyer *destroyer,
2268 bool useEHCleanupForArray);
2269 llvm::Function *generateDestroyHelper(Address addr, QualType type,
2270 Destroyer *destroyer,
2271 bool useEHCleanupForArray,
2272 const VarDecl *VD);
2273 void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
2274 QualType elementType, CharUnits elementAlign,
2275 Destroyer *destroyer, bool checkZeroLength,
2276 bool useEHCleanup);
2277
2278 Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
2279
2280 /// Determines whether an EH cleanup is required to destroy a type
2281 /// with the given destruction kind.
2282 bool needsEHCleanup(QualType::DestructionKind kind) {
2283 switch (kind) {
2284 case QualType::DK_none:
2285 return false;
2286 case QualType::DK_cxx_destructor:
2287 case QualType::DK_objc_weak_lifetime:
2288 case QualType::DK_nontrivial_c_struct:
2289 return getLangOpts().Exceptions;
2290 case QualType::DK_objc_strong_lifetime:
2291 return getLangOpts().Exceptions &&
2292 CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
2293 }
2294 llvm_unreachable("bad destruction kind");
2295 }
2296
2297 CleanupKind getCleanupKind(QualType::DestructionKind kind) {
2298 return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup);
2299 }
2300
2301 //===--------------------------------------------------------------------===//
2302 // Objective-C
2303 //===--------------------------------------------------------------------===//
2304
2305 void GenerateObjCMethod(const ObjCMethodDecl *OMD);
2306
2307 void StartObjCMethod(const ObjCMethodDecl *MD, const ObjCContainerDecl *CD);
2308
2309 /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
2310 void GenerateObjCGetter(ObjCImplementationDecl *IMP,
2311 const ObjCPropertyImplDecl *PID);
2312 void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
2313 const ObjCPropertyImplDecl *propImpl,
2314 const ObjCMethodDecl *GetterMothodDecl,
2315 llvm::Constant *AtomicHelperFn);
2316
2317 void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
2318 ObjCMethodDecl *MD, bool ctor);
2319
2320 /// GenerateObjCSetter - Synthesize an Objective-C property setter function
2321 /// for the given property.
2322 void GenerateObjCSetter(ObjCImplementationDecl *IMP,
2323 const ObjCPropertyImplDecl *PID);
2324 void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
2325 const ObjCPropertyImplDecl *propImpl,
2326 llvm::Constant *AtomicHelperFn);
2327
2328 //===--------------------------------------------------------------------===//
2329 // Block Bits
2330 //===--------------------------------------------------------------------===//
2331
2332 /// Emit block literal.
2333 /// \return an LLVM value which is a pointer to a struct which contains
2334 /// information about the block, including the block invoke function, the
2335 /// captured variables, etc.
2336 llvm::Value *EmitBlockLiteral(const BlockExpr *);
2337
2338 llvm::Function *GenerateBlockFunction(GlobalDecl GD, const CGBlockInfo &Info,
2339 const DeclMapTy &ldm,
2340 bool IsLambdaConversionToBlock,
2341 bool BuildGlobalBlock);
2342
2343 /// Check if \p T is a C++ class that has a destructor that can throw.
2344 static bool cxxDestructorCanThrow(QualType T);
2345
2346 llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
2347 llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
2348 llvm::Constant *
2349 GenerateObjCAtomicSetterCopyHelperFunction(const ObjCPropertyImplDecl *PID);
2350 llvm::Constant *
2351 GenerateObjCAtomicGetterCopyHelperFunction(const ObjCPropertyImplDecl *PID);
2352 llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
2353
2354 void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags,
2355 bool CanThrow);
2356
2357 class AutoVarEmission;
2358
2359 void emitByrefStructureInit(const AutoVarEmission &emission);
2360
2361 /// Enter a cleanup to destroy a __block variable. Note that this
2362 /// cleanup should be a no-op if the variable hasn't left the stack
2363 /// yet; if a cleanup is required for the variable itself, that needs
2364 /// to be done externally.
2365 ///
2366 /// \param Kind Cleanup kind.
2367 ///
2368 /// \param Addr When \p LoadBlockVarAddr is false, the address of the __block
2369 /// structure that will be passed to _Block_object_dispose. When
2370 /// \p LoadBlockVarAddr is true, the address of the field of the block
2371 /// structure that holds the address of the __block structure.
2372 ///
2373 /// \param Flags The flag that will be passed to _Block_object_dispose.
2374 ///
2375 /// \param LoadBlockVarAddr Indicates whether we need to emit a load from
2376 /// \p Addr to get the address of the __block structure.
2377 void enterByrefCleanup(CleanupKind Kind, Address Addr, BlockFieldFlags Flags,
2378 bool LoadBlockVarAddr, bool CanThrow);
2379
2380 void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum,
2381 llvm::Value *ptr);
2382
2383 Address LoadBlockStruct();
2384 Address GetAddrOfBlockDecl(const VarDecl *var);
2385
2386 /// BuildBlockByrefAddress - Computes the location of the
2387 /// data in a variable which is declared as __block.
2388 Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V,
2389 bool followForward = true);
2390 Address emitBlockByrefAddress(Address baseAddr, const BlockByrefInfo &info,
2391 bool followForward, const llvm::Twine &name);
2392
2393 const BlockByrefInfo &getBlockByrefInfo(const VarDecl *var);
2394
2395 QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args);
2396
2397 void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
2398 const CGFunctionInfo &FnInfo);
2399
2400 /// Annotate the function with an attribute that disables TSan checking at
2401 /// runtime.
2402 void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn);
2403
2404 /// Emit code for the start of a function.
2405 /// \param Loc The location to be associated with the function.
2406 /// \param StartLoc The location of the function body.
2407 void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn,
2408 const CGFunctionInfo &FnInfo, const FunctionArgList &Args,
2409 SourceLocation Loc = SourceLocation(),
2410 SourceLocation StartLoc = SourceLocation());
2411
2412 static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor);
2413
2414 void EmitConstructorBody(FunctionArgList &Args);
2415 void EmitDestructorBody(FunctionArgList &Args);
2416 void emitImplicitAssignmentOperatorBody(FunctionArgList &Args);
2417 void EmitFunctionBody(const Stmt *Body);
2418 void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S);
2419
2420 void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator,
2421 CallArgList &CallArgs,
2422 const CGFunctionInfo *CallOpFnInfo = nullptr,
2423 llvm::Constant *CallOpFn = nullptr);
2424 void EmitLambdaBlockInvokeBody();
2425 void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD);
2426 void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD,
2427 CallArgList &CallArgs);
2428 void EmitLambdaInAllocaImplFn(const CXXMethodDecl *CallOp,
2429 const CGFunctionInfo **ImplFnInfo,
2430 llvm::Function **ImplFn);
2431 void EmitLambdaInAllocaCallOpBody(const CXXMethodDecl *MD);
2432 void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV) {
2433 EmitStoreThroughLValue(Src: RValue::get(V: VLASizeMap[VAT->getSizeExpr()]), Dst: LV);
2434 }
2435 void EmitAsanPrologueOrEpilogue(bool Prologue);
2436
2437 /// Emit the unified return block, trying to avoid its emission when
2438 /// possible.
2439 /// \return The debug location of the user written return statement if the
2440 /// return block is avoided.
2441 llvm::DebugLoc EmitReturnBlock();
2442
2443 /// FinishFunction - Complete IR generation of the current function. It is
2444 /// legal to call this function even if there is no current insertion point.
2445 void FinishFunction(SourceLocation EndLoc = SourceLocation());
2446
2447 void StartThunk(llvm::Function *Fn, GlobalDecl GD,
2448 const CGFunctionInfo &FnInfo, bool IsUnprototyped);
2449
2450 void EmitCallAndReturnForThunk(llvm::FunctionCallee Callee,
2451 const ThunkInfo *Thunk, bool IsUnprototyped);
2452
2453 void FinishThunk();
2454
2455 /// Emit a musttail call for a thunk with a potentially adjusted this pointer.
2456 void EmitMustTailThunk(GlobalDecl GD, llvm::Value *AdjustedThisPtr,
2457 llvm::FunctionCallee Callee);
2458
2459 /// Generate a thunk for the given method.
2460 void generateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
2461 GlobalDecl GD, const ThunkInfo &Thunk,
2462 bool IsUnprototyped);
2463
2464 llvm::Function *GenerateVarArgsThunk(llvm::Function *Fn,
2465 const CGFunctionInfo &FnInfo,
2466 GlobalDecl GD, const ThunkInfo &Thunk);
2467
2468 void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
2469 FunctionArgList &Args);
2470
2471 void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init);
2472
2473 /// Struct with all information about dynamic [sub]class needed to set vptr.
2474 struct VPtr {
2475 BaseSubobject Base;
2476 const CXXRecordDecl *NearestVBase;
2477 CharUnits OffsetFromNearestVBase;
2478 const CXXRecordDecl *VTableClass;
2479 };
2480
2481 /// Initialize the vtable pointer of the given subobject.
2482 void InitializeVTablePointer(const VPtr &vptr);
2483
2484 typedef llvm::SmallVector<VPtr, 4> VPtrsVector;
2485
2486 typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
2487 VPtrsVector getVTablePointers(const CXXRecordDecl *VTableClass);
2488
2489 void getVTablePointers(BaseSubobject Base, const CXXRecordDecl *NearestVBase,
2490 CharUnits OffsetFromNearestVBase,
2491 bool BaseIsNonVirtualPrimaryBase,
2492 const CXXRecordDecl *VTableClass,
2493 VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs);
2494
2495 void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
2496
2497 // VTableTrapMode - whether we guarantee that loading the
2498 // vtable is guaranteed to trap on authentication failure,
2499 // even if the resulting vtable pointer is unused.
2500 enum class VTableAuthMode {
2501 Authenticate,
2502 MustTrap,
2503 UnsafeUbsanStrip // Should only be used for Vptr UBSan check
2504 };
2505 /// GetVTablePtr - Return the Value of the vtable pointer member pointed
2506 /// to by This.
2507 llvm::Value *
2508 GetVTablePtr(Address This, llvm::Type *VTableTy,
2509 const CXXRecordDecl *VTableClass,
2510 VTableAuthMode AuthMode = VTableAuthMode::Authenticate);
2511
2512 enum CFITypeCheckKind {
2513 CFITCK_VCall,
2514 CFITCK_NVCall,
2515 CFITCK_DerivedCast,
2516 CFITCK_UnrelatedCast,
2517 CFITCK_ICall,
2518 CFITCK_NVMFCall,
2519 CFITCK_VMFCall,
2520 };
2521
2522 /// Derived is the presumed address of an object of type T after a
2523 /// cast. If T is a polymorphic class type, emit a check that the virtual
2524 /// table for Derived belongs to a class derived from T.
2525 void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull,
2526 CFITypeCheckKind TCK, SourceLocation Loc);
2527
2528 /// EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable.
2529 /// If vptr CFI is enabled, emit a check that VTable is valid.
2530 void EmitVTablePtrCheckForCall(const CXXRecordDecl *RD, llvm::Value *VTable,
2531 CFITypeCheckKind TCK, SourceLocation Loc);
2532
2533 /// EmitVTablePtrCheck - Emit a check that VTable is a valid virtual table for
2534 /// RD using llvm.type.test.
2535 void EmitVTablePtrCheck(const CXXRecordDecl *RD, llvm::Value *VTable,
2536 CFITypeCheckKind TCK, SourceLocation Loc);
2537
2538 /// If whole-program virtual table optimization is enabled, emit an assumption
2539 /// that VTable is a member of RD's type identifier. Or, if vptr CFI is
2540 /// enabled, emit a check that VTable is a member of RD's type identifier.
2541 void EmitTypeMetadataCodeForVCall(const CXXRecordDecl *RD,
2542 llvm::Value *VTable, SourceLocation Loc);
2543
2544 /// Returns whether we should perform a type checked load when loading a
2545 /// virtual function for virtual calls to members of RD. This is generally
2546 /// true when both vcall CFI and whole-program-vtables are enabled.
2547 bool ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD);
2548
2549 /// Emit a type checked load from the given vtable.
2550 llvm::Value *EmitVTableTypeCheckedLoad(const CXXRecordDecl *RD,
2551 llvm::Value *VTable,
2552 llvm::Type *VTableTy,
2553 uint64_t VTableByteOffset);
2554
2555 /// EnterDtorCleanups - Enter the cleanups necessary to complete the
2556 /// given phase of destruction for a destructor. The end result
2557 /// should call destructors on members and base classes in reverse
2558 /// order of their construction.
2559 void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
2560
2561 /// ShouldInstrumentFunction - Return true if the current function should be
2562 /// instrumented with __cyg_profile_func_* calls
2563 bool ShouldInstrumentFunction();
2564
2565 /// ShouldSkipSanitizerInstrumentation - Return true if the current function
2566 /// should not be instrumented with sanitizers.
2567 bool ShouldSkipSanitizerInstrumentation();
2568
2569 /// ShouldXRayInstrument - Return true if the current function should be
2570 /// instrumented with XRay nop sleds.
2571 bool ShouldXRayInstrumentFunction() const;
2572
2573 /// AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit
2574 /// XRay custom event handling calls.
2575 bool AlwaysEmitXRayCustomEvents() const;
2576
2577 /// AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit
2578 /// XRay typed event handling calls.
2579 bool AlwaysEmitXRayTypedEvents() const;
2580
2581 /// Return a type hash constant for a function instrumented by
2582 /// -fsanitize=function.
2583 llvm::ConstantInt *getUBSanFunctionTypeHash(QualType T) const;
2584
2585 /// EmitFunctionProlog - Emit the target specific LLVM code to load the
2586 /// arguments for the given function. This is also responsible for naming the
2587 /// LLVM function arguments.
2588 void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn,
2589 const FunctionArgList &Args);
2590
2591 /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
2592 /// given temporary. Specify the source location atom group (Key Instructions
2593 /// debug info feature) for the `ret` using \p RetKeyInstructionsSourceAtom.
2594 /// If it's 0, the `ret` will get added to a new source atom group.
2595 void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc,
2596 SourceLocation EndLoc,
2597 uint64_t RetKeyInstructionsSourceAtom);
2598
2599 /// Emit a test that checks if the return value \p RV is nonnull.
2600 void EmitReturnValueCheck(llvm::Value *RV);
2601
2602 /// EmitStartEHSpec - Emit the start of the exception spec.
2603 void EmitStartEHSpec(const Decl *D);
2604
2605 /// EmitEndEHSpec - Emit the end of the exception spec.
2606 void EmitEndEHSpec(const Decl *D);
2607
2608 /// getTerminateLandingPad - Return a landing pad that just calls terminate.
2609 llvm::BasicBlock *getTerminateLandingPad();
2610
2611 /// getTerminateLandingPad - Return a cleanup funclet that just calls
2612 /// terminate.
2613 llvm::BasicBlock *getTerminateFunclet();
2614
2615 /// getTerminateHandler - Return a handler (not a landing pad, just
2616 /// a catch handler) that just calls terminate. This is used when
2617 /// a terminate scope encloses a try.
2618 llvm::BasicBlock *getTerminateHandler();
2619
2620 llvm::Type *ConvertTypeForMem(QualType T);
2621 llvm::Type *ConvertType(QualType T);
2622 llvm::Type *convertTypeForLoadStore(QualType ASTTy,
2623 llvm::Type *LLVMTy = nullptr);
2624 llvm::Type *ConvertType(const TypeDecl *T) {
2625 return ConvertType(T: getContext().getTypeDeclType(Decl: T));
2626 }
2627
2628 /// LoadObjCSelf - Load the value of self. This function is only valid while
2629 /// generating code for an Objective-C method.
2630 llvm::Value *LoadObjCSelf();
2631
2632 /// TypeOfSelfObject - Return type of object that this self represents.
2633 QualType TypeOfSelfObject();
2634
2635 /// getEvaluationKind - Return the TypeEvaluationKind of QualType \c T.
2636 static TypeEvaluationKind getEvaluationKind(QualType T);
2637
2638 static bool hasScalarEvaluationKind(QualType T) {
2639 return getEvaluationKind(T) == TEK_Scalar;
2640 }
2641
2642 static bool hasAggregateEvaluationKind(QualType T) {
2643 return getEvaluationKind(T) == TEK_Aggregate;
2644 }
2645
2646 /// createBasicBlock - Create an LLVM basic block.
2647 llvm::BasicBlock *createBasicBlock(const Twine &name = "",
2648 llvm::Function *parent = nullptr,
2649 llvm::BasicBlock *before = nullptr) {
2650 return llvm::BasicBlock::Create(Context&: getLLVMContext(), Name: name, Parent: parent, InsertBefore: before);
2651 }
2652
2653 /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
2654 /// label maps to.
2655 JumpDest getJumpDestForLabel(const LabelDecl *S);
2656
2657 /// SimplifyForwardingBlocks - If the given basic block is only a branch to
2658 /// another basic block, simplify it. This assumes that no other code could
2659 /// potentially reference the basic block.
2660 void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
2661
2662 /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
2663 /// adding a fall-through branch from the current insert block if
2664 /// necessary. It is legal to call this function even if there is no current
2665 /// insertion point.
2666 ///
2667 /// IsFinished - If true, indicates that the caller has finished emitting
2668 /// branches to the given block and does not expect to emit code into it. This
2669 /// means the block can be ignored if it is unreachable.
2670 void EmitBlock(llvm::BasicBlock *BB, bool IsFinished = false);
2671
2672 /// EmitBlockAfterUses - Emit the given block somewhere hopefully
2673 /// near its uses, and leave the insertion point in it.
2674 void EmitBlockAfterUses(llvm::BasicBlock *BB);
2675
2676 /// EmitBranch - Emit a branch to the specified basic block from the current
2677 /// insert block, taking care to avoid creation of branches from dummy
2678 /// blocks. It is legal to call this function even if there is no current
2679 /// insertion point.
2680 ///
2681 /// This function clears the current insertion point. The caller should follow
2682 /// calls to this function with calls to Emit*Block prior to generation new
2683 /// code.
2684 void EmitBranch(llvm::BasicBlock *Block);
2685
2686 /// HaveInsertPoint - True if an insertion point is defined. If not, this
2687 /// indicates that the current code being emitted is unreachable.
2688 bool HaveInsertPoint() const { return Builder.GetInsertBlock() != nullptr; }
2689
2690 /// EnsureInsertPoint - Ensure that an insertion point is defined so that
2691 /// emitted IR has a place to go. Note that by definition, if this function
2692 /// creates a block then that block is unreachable; callers may do better to
2693 /// detect when no insertion point is defined and simply skip IR generation.
2694 void EnsureInsertPoint() {
2695 if (!HaveInsertPoint())
2696 EmitBlock(BB: createBasicBlock());
2697 }
2698
2699 /// ErrorUnsupported - Print out an error that codegen doesn't support the
2700 /// specified stmt yet.
2701 void ErrorUnsupported(const Stmt *S, const char *Type);
2702
2703 //===--------------------------------------------------------------------===//
2704 // Helpers
2705 //===--------------------------------------------------------------------===//
2706
2707 Address mergeAddressesInConditionalExpr(Address LHS, Address RHS,
2708 llvm::BasicBlock *LHSBlock,
2709 llvm::BasicBlock *RHSBlock,
2710 llvm::BasicBlock *MergeBlock,
2711 QualType MergedType) {
2712 Builder.SetInsertPoint(MergeBlock);
2713 llvm::PHINode *PtrPhi = Builder.CreatePHI(Ty: LHS.getType(), NumReservedValues: 2, Name: "cond");
2714 PtrPhi->addIncoming(V: LHS.getBasePointer(), BB: LHSBlock);
2715 PtrPhi->addIncoming(V: RHS.getBasePointer(), BB: RHSBlock);
2716 LHS.replaceBasePointer(P: PtrPhi);
2717 LHS.setAlignment(std::min(a: LHS.getAlignment(), b: RHS.getAlignment()));
2718 return LHS;
2719 }
2720
2721 /// Construct an address with the natural alignment of T. If a pointer to T
2722 /// is expected to be signed, the pointer passed to this function must have
2723 /// been signed, and the returned Address will have the pointer authentication
2724 /// information needed to authenticate the signed pointer.
2725 Address makeNaturalAddressForPointer(
2726 llvm::Value *Ptr, QualType T, CharUnits Alignment = CharUnits::Zero(),
2727 bool ForPointeeType = false, LValueBaseInfo *BaseInfo = nullptr,
2728 TBAAAccessInfo *TBAAInfo = nullptr,
2729 KnownNonNull_t IsKnownNonNull = NotKnownNonNull) {
2730 if (Alignment.isZero())
2731 Alignment =
2732 CGM.getNaturalTypeAlignment(T, BaseInfo, TBAAInfo, forPointeeType: ForPointeeType);
2733 return Address(Ptr, ConvertTypeForMem(T), Alignment,
2734 CGM.getPointerAuthInfoForPointeeType(type: T), /*Offset=*/nullptr,
2735 IsKnownNonNull);
2736 }
2737
2738 LValue MakeAddrLValue(Address Addr, QualType T,
2739 AlignmentSource Source = AlignmentSource::Type) {
2740 return MakeAddrLValue(Addr, T, BaseInfo: LValueBaseInfo(Source),
2741 TBAAInfo: CGM.getTBAAAccessInfo(AccessType: T));
2742 }
2743
2744 LValue MakeAddrLValue(Address Addr, QualType T, LValueBaseInfo BaseInfo,
2745 TBAAAccessInfo TBAAInfo) {
2746 return LValue::MakeAddr(Addr, type: T, Context&: getContext(), BaseInfo, TBAAInfo);
2747 }
2748
2749 LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
2750 AlignmentSource Source = AlignmentSource::Type) {
2751 return MakeAddrLValue(Addr: makeNaturalAddressForPointer(Ptr: V, T, Alignment), T,
2752 BaseInfo: LValueBaseInfo(Source), TBAAInfo: CGM.getTBAAAccessInfo(AccessType: T));
2753 }
2754
2755 /// Same as MakeAddrLValue above except that the pointer is known to be
2756 /// unsigned.
2757 LValue MakeRawAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
2758 AlignmentSource Source = AlignmentSource::Type) {
2759 Address Addr(V, ConvertTypeForMem(T), Alignment);
2760 return LValue::MakeAddr(Addr, type: T, Context&: getContext(), BaseInfo: LValueBaseInfo(Source),
2761 TBAAInfo: CGM.getTBAAAccessInfo(AccessType: T));
2762 }
2763
2764 LValue
2765 MakeAddrLValueWithoutTBAA(Address Addr, QualType T,
2766 AlignmentSource Source = AlignmentSource::Type) {
2767 return LValue::MakeAddr(Addr, type: T, Context&: getContext(), BaseInfo: LValueBaseInfo(Source),
2768 TBAAInfo: TBAAAccessInfo());
2769 }
2770
2771 /// Given a value of type T* that may not be to a complete object, construct
2772 /// an l-value with the natural pointee alignment of T.
2773 LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T);
2774
2775 LValue
2776 MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T,
2777 KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
2778
2779 /// Same as MakeNaturalAlignPointeeAddrLValue except that the pointer is known
2780 /// to be unsigned.
2781 LValue MakeNaturalAlignPointeeRawAddrLValue(llvm::Value *V, QualType T);
2782
2783 LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T);
2784
2785 Address EmitLoadOfReference(LValue RefLVal,
2786 LValueBaseInfo *PointeeBaseInfo = nullptr,
2787 TBAAAccessInfo *PointeeTBAAInfo = nullptr);
2788 LValue EmitLoadOfReferenceLValue(LValue RefLVal);
2789 LValue
2790 EmitLoadOfReferenceLValue(Address RefAddr, QualType RefTy,
2791 AlignmentSource Source = AlignmentSource::Type) {
2792 LValue RefLVal = MakeAddrLValue(Addr: RefAddr, T: RefTy, BaseInfo: LValueBaseInfo(Source),
2793 TBAAInfo: CGM.getTBAAAccessInfo(AccessType: RefTy));
2794 return EmitLoadOfReferenceLValue(RefLVal);
2795 }
2796
2797 /// Load a pointer with type \p PtrTy stored at address \p Ptr.
2798 /// Note that \p PtrTy is the type of the loaded pointer, not the addresses
2799 /// it is loaded from.
2800 Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy,
2801 LValueBaseInfo *BaseInfo = nullptr,
2802 TBAAAccessInfo *TBAAInfo = nullptr);
2803 LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy);
2804
2805private:
2806 struct AllocaTracker {
2807 void Add(llvm::AllocaInst *I) { Allocas.push_back(Elt: I); }
2808 llvm::SmallVector<llvm::AllocaInst *> Take() { return std::move(Allocas); }
2809
2810 private:
2811 llvm::SmallVector<llvm::AllocaInst *> Allocas;
2812 };
2813 AllocaTracker *Allocas = nullptr;
2814
2815 /// CGDecl helper.
2816 void emitStoresForConstant(const VarDecl &D, Address Loc, bool isVolatile,
2817 llvm::Constant *constant, bool IsAutoInit);
2818 /// CGDecl helper.
2819 void emitStoresForZeroInit(const VarDecl &D, Address Loc, bool isVolatile);
2820 /// CGDecl helper.
2821 void emitStoresForPatternInit(const VarDecl &D, Address Loc, bool isVolatile);
2822 /// CGDecl helper.
2823 void emitStoresForInitAfterBZero(llvm::Constant *Init, Address Loc,
2824 bool isVolatile, bool IsAutoInit);
2825
2826public:
2827 // Captures all the allocas created during the scope of its RAII object.
2828 struct AllocaTrackerRAII {
2829 AllocaTrackerRAII(CodeGenFunction &CGF)
2830 : CGF(CGF), OldTracker(CGF.Allocas) {
2831 CGF.Allocas = &Tracker;
2832 }
2833 ~AllocaTrackerRAII() { CGF.Allocas = OldTracker; }
2834
2835 llvm::SmallVector<llvm::AllocaInst *> Take() { return Tracker.Take(); }
2836
2837 private:
2838 CodeGenFunction &CGF;
2839 AllocaTracker *OldTracker;
2840 AllocaTracker Tracker;
2841 };
2842
2843private:
2844 /// If \p Alloca is not in the same address space as \p DestLangAS, insert an
2845 /// address space cast and return a new RawAddress based on this value.
2846 RawAddress MaybeCastStackAddressSpace(RawAddress Alloca, LangAS DestLangAS,
2847 llvm::Value *ArraySize = nullptr);
2848
2849public:
2850 /// CreateTempAlloca - This creates an alloca and inserts it into the entry
2851 /// block if \p ArraySize is nullptr, otherwise inserts it at the current
2852 /// insertion point of the builder. The caller is responsible for setting an
2853 /// appropriate alignment on
2854 /// the alloca.
2855 ///
2856 /// \p ArraySize is the number of array elements to be allocated if it
2857 /// is not nullptr.
2858 ///
2859 /// LangAS::Default is the address space of pointers to local variables and
2860 /// temporaries, as exposed in the source language. In certain
2861 /// configurations, this is not the same as the alloca address space, and a
2862 /// cast is needed to lift the pointer from the alloca AS into
2863 /// LangAS::Default. This can happen when the target uses a restricted
2864 /// address space for the stack but the source language requires
2865 /// LangAS::Default to be a generic address space. The latter condition is
2866 /// common for most programming languages; OpenCL is an exception in that
2867 /// LangAS::Default is the private address space, which naturally maps
2868 /// to the stack.
2869 ///
2870 /// Because the address of a temporary is often exposed to the program in
2871 /// various ways, this function will perform the cast. The original alloca
2872 /// instruction is returned through \p Alloca if it is not nullptr.
2873 ///
2874 /// The cast is not performaed in CreateTempAllocaWithoutCast. This is
2875 /// more efficient if the caller knows that the address will not be exposed.
2876 llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty, const Twine &Name = "tmp",
2877 llvm::Value *ArraySize = nullptr);
2878
2879 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
2880 /// block. The alloca is casted to the address space of \p UseAddrSpace if
2881 /// necessary.
2882 RawAddress CreateTempAlloca(llvm::Type *Ty, LangAS UseAddrSpace,
2883 CharUnits align, const Twine &Name = "tmp",
2884 llvm::Value *ArraySize = nullptr,
2885 RawAddress *Alloca = nullptr);
2886
2887 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
2888 /// block. The alloca is casted to default address space if necessary.
2889 ///
2890 /// FIXME: This version should be removed, and context should provide the
2891 /// context use address space used instead of default.
2892 RawAddress CreateTempAlloca(llvm::Type *Ty, CharUnits align,
2893 const Twine &Name = "tmp",
2894 llvm::Value *ArraySize = nullptr,
2895 RawAddress *Alloca = nullptr) {
2896 return CreateTempAlloca(Ty, UseAddrSpace: LangAS::Default, align, Name, ArraySize,
2897 Alloca);
2898 }
2899
2900 RawAddress CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align,
2901 const Twine &Name = "tmp",
2902 llvm::Value *ArraySize = nullptr);
2903
2904 /// CreateDefaultAlignedTempAlloca - This creates an alloca with the
2905 /// default ABI alignment of the given LLVM type.
2906 ///
2907 /// IMPORTANT NOTE: This is *not* generally the right alignment for
2908 /// any given AST type that happens to have been lowered to the
2909 /// given IR type. This should only ever be used for function-local,
2910 /// IR-driven manipulations like saving and restoring a value. Do
2911 /// not hand this address off to arbitrary IRGen routines, and especially
2912 /// do not pass it as an argument to a function that might expect a
2913 /// properly ABI-aligned value.
2914 RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty,
2915 const Twine &Name = "tmp");
2916
2917 /// CreateIRTempWithoutCast - Create a temporary IR object of the given type,
2918 /// with appropriate alignment. This routine should only be used when an
2919 /// temporary value needs to be stored into an alloca (for example, to avoid
2920 /// explicit PHI construction), but the type is the IR type, not the type
2921 /// appropriate for storing in memory.
2922 ///
2923 /// That is, this is exactly equivalent to CreateMemTemp, but calling
2924 /// ConvertType instead of ConvertTypeForMem.
2925 RawAddress CreateIRTempWithoutCast(QualType T, const Twine &Name = "tmp");
2926
2927 /// CreateMemTemp - Create a temporary memory object of the given type, with
2928 /// appropriate alignmen and cast it to the default address space. Returns
2929 /// the original alloca instruction by \p Alloca if it is not nullptr.
2930 RawAddress CreateMemTemp(QualType T, const Twine &Name = "tmp",
2931 RawAddress *Alloca = nullptr);
2932 RawAddress CreateMemTemp(QualType T, CharUnits Align,
2933 const Twine &Name = "tmp",
2934 RawAddress *Alloca = nullptr);
2935
2936 /// CreateMemTemp - Create a temporary memory object of the given type, with
2937 /// appropriate alignmen without casting it to the default address space.
2938 RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp");
2939 RawAddress CreateMemTempWithoutCast(QualType T, CharUnits Align,
2940 const Twine &Name = "tmp");
2941
2942 /// CreateAggTemp - Create a temporary memory object for the given
2943 /// aggregate type.
2944 AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp",
2945 RawAddress *Alloca = nullptr) {
2946 return AggValueSlot::forAddr(
2947 addr: CreateMemTemp(T, Name, Alloca), quals: T.getQualifiers(),
2948 isDestructed: AggValueSlot::IsNotDestructed, needsGC: AggValueSlot::DoesNotNeedGCBarriers,
2949 isAliased: AggValueSlot::IsNotAliased, mayOverlap: AggValueSlot::DoesNotOverlap);
2950 }
2951
2952 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
2953 /// expression and compare the result against zero, returning an Int1Ty value.
2954 llvm::Value *EvaluateExprAsBool(const Expr *E);
2955
2956 /// Retrieve the implicit cast expression of the rhs in a binary operator
2957 /// expression by passing pointers to Value and QualType
2958 /// This is used for implicit bitfield conversion checks, which
2959 /// must compare with the value before potential truncation.
2960 llvm::Value *EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E,
2961 llvm::Value **Previous,
2962 QualType *SrcType);
2963
2964 /// Emit a check that an [implicit] conversion of a bitfield. It is not UB,
2965 /// so we use the value after conversion.
2966 void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType,
2967 llvm::Value *Dst, QualType DstType,
2968 const CGBitFieldInfo &Info,
2969 SourceLocation Loc);
2970
2971 /// EmitIgnoredExpr - Emit an expression in a context which ignores the
2972 /// result.
2973 void EmitIgnoredExpr(const Expr *E);
2974
2975 /// EmitAnyExpr - Emit code to compute the specified expression which can have
2976 /// any type. The result is returned as an RValue struct. If this is an
2977 /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
2978 /// the result should be returned.
2979 ///
2980 /// \param ignoreResult True if the resulting value isn't used.
2981 RValue EmitAnyExpr(const Expr *E,
2982 AggValueSlot aggSlot = AggValueSlot::ignored(),
2983 bool ignoreResult = false);
2984
2985 // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
2986 // or the value of the expression, depending on how va_list is defined.
2987 Address EmitVAListRef(const Expr *E);
2988
2989 /// Emit a "reference" to a __builtin_ms_va_list; this is
2990 /// always the value of the expression, because a __builtin_ms_va_list is a
2991 /// pointer to a char.
2992 Address EmitMSVAListRef(const Expr *E);
2993
2994 /// EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will
2995 /// always be accessible even if no aggregate location is provided.
2996 RValue EmitAnyExprToTemp(const Expr *E);
2997
2998 /// EmitAnyExprToMem - Emits the code necessary to evaluate an
2999 /// arbitrary expression into the given memory location.
3000 void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals,
3001 bool IsInitializer);
3002
3003 void EmitAnyExprToExn(const Expr *E, Address Addr);
3004
3005 /// EmitInitializationToLValue - Emit an initializer to an LValue.
3006 void EmitInitializationToLValue(
3007 const Expr *E, LValue LV,
3008 AggValueSlot::IsZeroed_t IsZeroed = AggValueSlot::IsNotZeroed);
3009
3010 /// EmitExprAsInit - Emits the code necessary to initialize a
3011 /// location in memory with the given initializer.
3012 void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue,
3013 bool capturedByInit);
3014
3015 /// hasVolatileMember - returns true if aggregate type has a volatile
3016 /// member.
3017 bool hasVolatileMember(QualType T) {
3018 if (const auto *RD = T->getAsRecordDecl())
3019 return RD->hasVolatileMember();
3020 return false;
3021 }
3022
3023 /// Determine whether a return value slot may overlap some other object.
3024 AggValueSlot::Overlap_t getOverlapForReturnValue() {
3025 // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
3026 // class subobjects. These cases may need to be revisited depending on the
3027 // resolution of the relevant core issue.
3028 return AggValueSlot::DoesNotOverlap;
3029 }
3030
3031 /// Determine whether a field initialization may overlap some other object.
3032 AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD);
3033
3034 /// Determine whether a base class initialization may overlap some other
3035 /// object.
3036 AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *RD,
3037 const CXXRecordDecl *BaseRD,
3038 bool IsVirtual);
3039
3040 /// Emit an aggregate assignment.
3041 void EmitAggregateAssign(LValue Dest, LValue Src, QualType EltTy) {
3042 ApplyAtomGroup Grp(getDebugInfo());
3043 bool IsVolatile = hasVolatileMember(T: EltTy);
3044 EmitAggregateCopy(Dest, Src, EltTy, MayOverlap: AggValueSlot::MayOverlap, isVolatile: IsVolatile);
3045 }
3046
3047 void EmitAggregateCopyCtor(LValue Dest, LValue Src,
3048 AggValueSlot::Overlap_t MayOverlap) {
3049 EmitAggregateCopy(Dest, Src, EltTy: Src.getType(), MayOverlap);
3050 }
3051
3052 /// EmitAggregateCopy - Emit an aggregate copy.
3053 ///
3054 /// \param isVolatile \c true iff either the source or the destination is
3055 /// volatile.
3056 /// \param MayOverlap Whether the tail padding of the destination might be
3057 /// occupied by some other object. More efficient code can often be
3058 /// generated if not.
3059 void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy,
3060 AggValueSlot::Overlap_t MayOverlap,
3061 bool isVolatile = false);
3062
3063 /// GetAddrOfLocalVar - Return the address of a local variable.
3064 Address GetAddrOfLocalVar(const VarDecl *VD) {
3065 auto it = LocalDeclMap.find(Val: VD);
3066 assert(it != LocalDeclMap.end() &&
3067 "Invalid argument to GetAddrOfLocalVar(), no decl!");
3068 return it->second;
3069 }
3070
3071 /// Given an opaque value expression, return its LValue mapping if it exists,
3072 /// otherwise create one.
3073 LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e);
3074
3075 /// Given an opaque value expression, return its RValue mapping if it exists,
3076 /// otherwise create one.
3077 RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e);
3078
3079 /// isOpaqueValueEmitted - Return true if the opaque value expression has
3080 /// already been emitted.
3081 bool isOpaqueValueEmitted(const OpaqueValueExpr *E);
3082
3083 /// Get the index of the current ArrayInitLoopExpr, if any.
3084 llvm::Value *getArrayInitIndex() { return ArrayInitIndex; }
3085
3086 /// getAccessedFieldNo - Given an encoded value and a result number, return
3087 /// the input field number being accessed.
3088 static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
3089
3090 llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
3091 llvm::BasicBlock *GetIndirectGotoBlock();
3092
3093 /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts.
3094 static bool IsWrappedCXXThis(const Expr *E);
3095
3096 /// EmitNullInitialization - Generate code to set a value of the given type to
3097 /// null, If the type contains data member pointers, they will be initialized
3098 /// to -1 in accordance with the Itanium C++ ABI.
3099 void EmitNullInitialization(Address DestPtr, QualType Ty);
3100
3101 /// Emits a call to an LLVM variable-argument intrinsic, either
3102 /// \c llvm.va_start or \c llvm.va_end.
3103 /// \param ArgValue A reference to the \c va_list as emitted by either
3104 /// \c EmitVAListRef or \c EmitMSVAListRef.
3105 /// \param IsStart If \c true, emits a call to \c llvm.va_start; otherwise,
3106 /// calls \c llvm.va_end.
3107 llvm::Value *EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart);
3108
3109 /// Generate code to get an argument from the passed in pointer
3110 /// and update it accordingly.
3111 /// \param VE The \c VAArgExpr for which to generate code.
3112 /// \param VAListAddr Receives a reference to the \c va_list as emitted by
3113 /// either \c EmitVAListRef or \c EmitMSVAListRef.
3114 /// \returns A pointer to the argument.
3115 // FIXME: We should be able to get rid of this method and use the va_arg
3116 // instruction in LLVM instead once it works well enough.
3117 RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr,
3118 AggValueSlot Slot = AggValueSlot::ignored());
3119
3120 /// emitArrayLength - Compute the length of an array, even if it's a
3121 /// VLA, and drill down to the base element type.
3122 llvm::Value *emitArrayLength(const ArrayType *arrayType, QualType &baseType,
3123 Address &addr);
3124
3125 /// EmitVLASize - Capture all the sizes for the VLA expressions in
3126 /// the given variably-modified type and store them in the VLASizeMap.
3127 ///
3128 /// This function can be called with a null (unreachable) insert point.
3129 void EmitVariablyModifiedType(QualType Ty);
3130
3131 struct VlaSizePair {
3132 llvm::Value *NumElts;
3133 QualType Type;
3134
3135 VlaSizePair(llvm::Value *NE, QualType T) : NumElts(NE), Type(T) {}
3136 };
3137
3138 /// Return the number of elements for a single dimension
3139 /// for the given array type.
3140 VlaSizePair getVLAElements1D(const VariableArrayType *vla);
3141 VlaSizePair getVLAElements1D(QualType vla);
3142
3143 /// Returns an LLVM value that corresponds to the size,
3144 /// in non-variably-sized elements, of a variable length array type,
3145 /// plus that largest non-variably-sized element type. Assumes that
3146 /// the type has already been emitted with EmitVariablyModifiedType.
3147 VlaSizePair getVLASize(const VariableArrayType *vla);
3148 VlaSizePair getVLASize(QualType vla);
3149
3150 /// LoadCXXThis - Load the value of 'this'. This function is only valid while
3151 /// generating code for an C++ member function.
3152 llvm::Value *LoadCXXThis() {
3153 assert(CXXThisValue && "no 'this' value for this function");
3154 return CXXThisValue;
3155 }
3156 Address LoadCXXThisAddress();
3157
3158 /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
3159 /// virtual bases.
3160 // FIXME: Every place that calls LoadCXXVTT is something
3161 // that needs to be abstracted properly.
3162 llvm::Value *LoadCXXVTT() {
3163 assert(CXXStructorImplicitParamValue && "no VTT value for this function");
3164 return CXXStructorImplicitParamValue;
3165 }
3166
3167 /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
3168 /// complete class to the given direct base.
3169 Address GetAddressOfDirectBaseInCompleteClass(Address Value,
3170 const CXXRecordDecl *Derived,
3171 const CXXRecordDecl *Base,
3172 bool BaseIsVirtual);
3173
3174 static bool ShouldNullCheckClassCastValue(const CastExpr *Cast);
3175
3176 /// GetAddressOfBaseClass - This function will add the necessary delta to the
3177 /// load of 'this' and returns address of the base class.
3178 Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived,
3179 CastExpr::path_const_iterator PathBegin,
3180 CastExpr::path_const_iterator PathEnd,
3181 bool NullCheckValue, SourceLocation Loc);
3182
3183 Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived,
3184 CastExpr::path_const_iterator PathBegin,
3185 CastExpr::path_const_iterator PathEnd,
3186 bool NullCheckValue);
3187
3188 /// GetVTTParameter - Return the VTT parameter that should be passed to a
3189 /// base constructor/destructor with virtual bases.
3190 /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
3191 /// to ItaniumCXXABI.cpp together with all the references to VTT.
3192 llvm::Value *GetVTTParameter(GlobalDecl GD, bool ForVirtualBase,
3193 bool Delegating);
3194
3195 void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
3196 CXXCtorType CtorType,
3197 const FunctionArgList &Args,
3198 SourceLocation Loc);
3199 // It's important not to confuse this and the previous function. Delegating
3200 // constructors are the C++0x feature. The constructor delegate optimization
3201 // is used to reduce duplication in the base and complete consturctors where
3202 // they are substantially the same.
3203 void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
3204 const FunctionArgList &Args);
3205
3206 /// Emit a call to an inheriting constructor (that is, one that invokes a
3207 /// constructor inherited from a base class) by inlining its definition. This
3208 /// is necessary if the ABI does not support forwarding the arguments to the
3209 /// base class constructor (because they're variadic or similar).
3210 void EmitInlinedInheritingCXXConstructorCall(const CXXConstructorDecl *Ctor,
3211 CXXCtorType CtorType,
3212 bool ForVirtualBase,
3213 bool Delegating,
3214 CallArgList &Args);
3215
3216 /// Emit a call to a constructor inherited from a base class, passing the
3217 /// current constructor's arguments along unmodified (without even making
3218 /// a copy).
3219 void EmitInheritedCXXConstructorCall(const CXXConstructorDecl *D,
3220 bool ForVirtualBase, Address This,
3221 bool InheritedFromVBase,
3222 const CXXInheritedCtorInitExpr *E);
3223
3224 void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
3225 bool ForVirtualBase, bool Delegating,
3226 AggValueSlot ThisAVS, const CXXConstructExpr *E);
3227
3228 void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
3229 bool ForVirtualBase, bool Delegating,
3230 Address This, CallArgList &Args,
3231 AggValueSlot::Overlap_t Overlap,
3232 SourceLocation Loc, bool NewPointerIsChecked,
3233 llvm::CallBase **CallOrInvoke = nullptr);
3234
3235 /// Emit assumption load for all bases. Requires to be called only on
3236 /// most-derived class and not under construction of the object.
3237 void EmitVTableAssumptionLoads(const CXXRecordDecl *ClassDecl, Address This);
3238
3239 /// Emit assumption that vptr load == global vtable.
3240 void EmitVTableAssumptionLoad(const VPtr &vptr, Address This);
3241
3242 void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D, Address This,
3243 Address Src, const CXXConstructExpr *E);
3244
3245 void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
3246 const ArrayType *ArrayTy, Address ArrayPtr,
3247 const CXXConstructExpr *E,
3248 bool NewPointerIsChecked,
3249 bool ZeroInitialization = false);
3250
3251 void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
3252 llvm::Value *NumElements, Address ArrayPtr,
3253 const CXXConstructExpr *E,
3254 bool NewPointerIsChecked,
3255 bool ZeroInitialization = false);
3256
3257 static Destroyer destroyCXXObject;
3258
3259 void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
3260 bool ForVirtualBase, bool Delegating, Address This,
3261 QualType ThisTy);
3262
3263 void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
3264 llvm::Type *ElementTy, Address NewPtr,
3265 llvm::Value *NumElements,
3266 llvm::Value *AllocSizeWithoutCookie);
3267
3268 void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
3269 Address Ptr);
3270
3271 void EmitSehCppScopeBegin();
3272 void EmitSehCppScopeEnd();
3273 void EmitSehTryScopeBegin();
3274 void EmitSehTryScopeEnd();
3275
3276 bool EmitLifetimeStart(llvm::Value *Addr);
3277 void EmitLifetimeEnd(llvm::Value *Addr);
3278
3279 llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
3280 void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
3281
3282 void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
3283 QualType DeleteTy, llvm::Value *NumElements = nullptr,
3284 CharUnits CookieSize = CharUnits());
3285
3286 RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
3287 const CallExpr *TheCallExpr, bool IsDelete);
3288
3289 llvm::Value *EmitCXXTypeidExpr(const CXXTypeidExpr *E);
3290 llvm::Value *EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE);
3291 Address EmitCXXUuidofExpr(const CXXUuidofExpr *E);
3292
3293 /// Situations in which we might emit a check for the suitability of a
3294 /// pointer or glvalue. Needs to be kept in sync with ubsan_handlers.cpp in
3295 /// compiler-rt.
3296 enum TypeCheckKind {
3297 /// Checking the operand of a load. Must be suitably sized and aligned.
3298 TCK_Load,
3299 /// Checking the destination of a store. Must be suitably sized and aligned.
3300 TCK_Store,
3301 /// Checking the bound value in a reference binding. Must be suitably sized
3302 /// and aligned, but is not required to refer to an object (until the
3303 /// reference is used), per core issue 453.
3304 TCK_ReferenceBinding,
3305 /// Checking the object expression in a non-static data member access. Must
3306 /// be an object within its lifetime.
3307 TCK_MemberAccess,
3308 /// Checking the 'this' pointer for a call to a non-static member function.
3309 /// Must be an object within its lifetime.
3310 TCK_MemberCall,
3311 /// Checking the 'this' pointer for a constructor call.
3312 TCK_ConstructorCall,
3313 /// Checking the operand of a static_cast to a derived pointer type. Must be
3314 /// null or an object within its lifetime.
3315 TCK_DowncastPointer,
3316 /// Checking the operand of a static_cast to a derived reference type. Must
3317 /// be an object within its lifetime.
3318 TCK_DowncastReference,
3319 /// Checking the operand of a cast to a base object. Must be suitably sized
3320 /// and aligned.
3321 TCK_Upcast,
3322 /// Checking the operand of a cast to a virtual base object. Must be an
3323 /// object within its lifetime.
3324 TCK_UpcastToVirtualBase,
3325 /// Checking the value assigned to a _Nonnull pointer. Must not be null.
3326 TCK_NonnullAssign,
3327 /// Checking the operand of a dynamic_cast or a typeid expression. Must be
3328 /// null or an object within its lifetime.
3329 TCK_DynamicOperation
3330 };
3331
3332 /// Determine whether the pointer type check \p TCK permits null pointers.
3333 static bool isNullPointerAllowed(TypeCheckKind TCK);
3334
3335 /// Determine whether the pointer type check \p TCK requires a vptr check.
3336 static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty);
3337
3338 /// Whether any type-checking sanitizers are enabled. If \c false,
3339 /// calls to EmitTypeCheck can be skipped.
3340 bool sanitizePerformTypeCheck() const;
3341
3342 void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV,
3343 QualType Type, SanitizerSet SkippedChecks = SanitizerSet(),
3344 llvm::Value *ArraySize = nullptr) {
3345 if (!sanitizePerformTypeCheck())
3346 return;
3347 EmitTypeCheck(TCK, Loc, V: LV.emitRawPointer(CGF&: *this), Type, Alignment: LV.getAlignment(),
3348 SkippedChecks, ArraySize);
3349 }
3350
3351 void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, Address Addr,
3352 QualType Type, CharUnits Alignment = CharUnits::Zero(),
3353 SanitizerSet SkippedChecks = SanitizerSet(),
3354 llvm::Value *ArraySize = nullptr) {
3355 if (!sanitizePerformTypeCheck())
3356 return;
3357 EmitTypeCheck(TCK, Loc, V: Addr.emitRawPointer(CGF&: *this), Type, Alignment,
3358 SkippedChecks, ArraySize);
3359 }
3360
3361 /// Emit a check that \p V is the address of storage of the
3362 /// appropriate size and alignment for an object of type \p Type
3363 /// (or if ArraySize is provided, for an array of that bound).
3364 void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V,
3365 QualType Type, CharUnits Alignment = CharUnits::Zero(),
3366 SanitizerSet SkippedChecks = SanitizerSet(),
3367 llvm::Value *ArraySize = nullptr);
3368
3369 /// Emit a check that \p Base points into an array object, which
3370 /// we can access at index \p Index. \p Accessed should be \c false if we
3371 /// this expression is used as an lvalue, for instance in "&Arr[Idx]".
3372 void EmitBoundsCheck(const Expr *ArrayExpr, const Expr *ArrayExprBase,
3373 llvm::Value *Index, QualType IndexType, bool Accessed);
3374 void EmitBoundsCheckImpl(const Expr *ArrayExpr, QualType ArrayBaseType,
3375 llvm::Value *IndexVal, QualType IndexType,
3376 llvm::Value *BoundsVal, QualType BoundsType,
3377 bool Accessed);
3378
3379 /// Returns debug info, with additional annotation if
3380 /// CGM.getCodeGenOpts().SanitizeAnnotateDebugInfo[Ordinal] is enabled for
3381 /// any of the ordinals.
3382 llvm::DILocation *
3383 SanitizerAnnotateDebugInfo(ArrayRef<SanitizerKind::SanitizerOrdinal> Ordinals,
3384 SanitizerHandler Handler);
3385
3386 /// Build metadata used by the AllocToken instrumentation.
3387 llvm::MDNode *buildAllocToken(QualType AllocType);
3388 /// Emit and set additional metadata used by the AllocToken instrumentation.
3389 void EmitAllocToken(llvm::CallBase *CB, QualType AllocType);
3390 /// Build additional metadata used by the AllocToken instrumentation,
3391 /// inferring the type from an allocation call expression.
3392 llvm::MDNode *buildAllocToken(const CallExpr *E);
3393 /// Emit and set additional metadata used by the AllocToken instrumentation,
3394 /// inferring the type from an allocation call expression.
3395 void EmitAllocToken(llvm::CallBase *CB, const CallExpr *E);
3396
3397 llvm::Value *GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FD,
3398 const FieldDecl *CountDecl);
3399
3400 /// Build an expression accessing the "counted_by" field.
3401 llvm::Value *EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FD,
3402 const FieldDecl *CountDecl);
3403
3404 // Emit bounds checking for flexible array and pointer members with the
3405 // counted_by attribute.
3406 void EmitCountedByBoundsChecking(const Expr *ArrayExpr, QualType ArrayType,
3407 Address ArrayInst, QualType IndexType,
3408 llvm::Value *IndexVal, bool Accessed,
3409 bool FlexibleArray);
3410
3411 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
3412 bool isInc, bool isPre);
3413 ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
3414 bool isInc, bool isPre);
3415
3416 /// Converts Location to a DebugLoc, if debug information is enabled.
3417 llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location);
3418
3419 /// Get the record field index as represented in debug info.
3420 unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex);
3421
3422 //===--------------------------------------------------------------------===//
3423 // Declaration Emission
3424 //===--------------------------------------------------------------------===//
3425
3426 /// EmitDecl - Emit a declaration.
3427 ///
3428 /// This function can be called with a null (unreachable) insert point.
3429 void EmitDecl(const Decl &D, bool EvaluateConditionDecl = false);
3430
3431 /// EmitVarDecl - Emit a local variable declaration.
3432 ///
3433 /// This function can be called with a null (unreachable) insert point.
3434 void EmitVarDecl(const VarDecl &D);
3435
3436 void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue,
3437 bool capturedByInit);
3438
3439 typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
3440 llvm::Value *Address);
3441
3442 /// Determine whether the given initializer is trivial in the sense
3443 /// that it requires no code to be generated.
3444 bool isTrivialInitializer(const Expr *Init);
3445
3446 /// EmitAutoVarDecl - Emit an auto variable declaration.
3447 ///
3448 /// This function can be called with a null (unreachable) insert point.
3449 void EmitAutoVarDecl(const VarDecl &D);
3450
3451 class AutoVarEmission {
3452 friend class CodeGenFunction;
3453
3454 const VarDecl *Variable;
3455
3456 /// The address of the alloca for languages with explicit address space
3457 /// (e.g. OpenCL) or alloca casted to generic pointer for address space
3458 /// agnostic languages (e.g. C++). Invalid if the variable was emitted
3459 /// as a global constant.
3460 Address Addr;
3461
3462 llvm::Value *NRVOFlag;
3463
3464 /// True if the variable is a __block variable that is captured by an
3465 /// escaping block.
3466 bool IsEscapingByRef;
3467
3468 /// True if the variable is of aggregate type and has a constant
3469 /// initializer.
3470 bool IsConstantAggregate;
3471
3472 /// True if lifetime markers should be used.
3473 bool UseLifetimeMarkers;
3474
3475 /// Address with original alloca instruction. Invalid if the variable was
3476 /// emitted as a global constant.
3477 RawAddress AllocaAddr;
3478
3479 struct Invalid {};
3480 AutoVarEmission(Invalid)
3481 : Variable(nullptr), Addr(Address::invalid()),
3482 AllocaAddr(RawAddress::invalid()) {}
3483
3484 AutoVarEmission(const VarDecl &variable)
3485 : Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
3486 IsEscapingByRef(false), IsConstantAggregate(false),
3487 UseLifetimeMarkers(false), AllocaAddr(RawAddress::invalid()) {}
3488
3489 bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
3490
3491 public:
3492 static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
3493
3494 bool useLifetimeMarkers() const { return UseLifetimeMarkers; }
3495
3496 /// Returns the raw, allocated address, which is not necessarily
3497 /// the address of the object itself. It is casted to default
3498 /// address space for address space agnostic languages.
3499 Address getAllocatedAddress() const { return Addr; }
3500
3501 /// Returns the address for the original alloca instruction.
3502 RawAddress getOriginalAllocatedAddress() const { return AllocaAddr; }
3503
3504 /// Returns the address of the object within this declaration.
3505 /// Note that this does not chase the forwarding pointer for
3506 /// __block decls.
3507 Address getObjectAddress(CodeGenFunction &CGF) const {
3508 if (!IsEscapingByRef)
3509 return Addr;
3510
3511 return CGF.emitBlockByrefAddress(baseAddr: Addr, V: Variable, /*forward*/ followForward: false);
3512 }
3513 };
3514 AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
3515 void EmitAutoVarInit(const AutoVarEmission &emission);
3516 void EmitAutoVarCleanups(const AutoVarEmission &emission);
3517 void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
3518 QualType::DestructionKind dtorKind);
3519
3520 void MaybeEmitDeferredVarDeclInit(const VarDecl *var);
3521
3522 /// Emits the alloca and debug information for the size expressions for each
3523 /// dimension of an array. It registers the association of its (1-dimensional)
3524 /// QualTypes and size expression's debug node, so that CGDebugInfo can
3525 /// reference this node when creating the DISubrange object to describe the
3526 /// array types.
3527 void EmitAndRegisterVariableArrayDimensions(CGDebugInfo *DI, const VarDecl &D,
3528 bool EmitDebugInfo);
3529
3530 void EmitStaticVarDecl(const VarDecl &D,
3531 llvm::GlobalValue::LinkageTypes Linkage);
3532
3533 class ParamValue {
3534 union {
3535 Address Addr;
3536 llvm::Value *Value;
3537 };
3538
3539 bool IsIndirect;
3540
3541 ParamValue(llvm::Value *V) : Value(V), IsIndirect(false) {}
3542 ParamValue(Address A) : Addr(A), IsIndirect(true) {}
3543
3544 public:
3545 static ParamValue forDirect(llvm::Value *value) {
3546 return ParamValue(value);
3547 }
3548 static ParamValue forIndirect(Address addr) {
3549 assert(!addr.getAlignment().isZero());
3550 return ParamValue(addr);
3551 }
3552
3553 bool isIndirect() const { return IsIndirect; }
3554 llvm::Value *getAnyValue() const {
3555 if (!isIndirect())
3556 return Value;
3557 assert(!Addr.hasOffset() && "unexpected offset");
3558 return Addr.getBasePointer();
3559 }
3560
3561 llvm::Value *getDirectValue() const {
3562 assert(!isIndirect());
3563 return Value;
3564 }
3565
3566 Address getIndirectAddress() const {
3567 assert(isIndirect());
3568 return Addr;
3569 }
3570 };
3571
3572 /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
3573 void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo);
3574
3575 /// protectFromPeepholes - Protect a value that we're intending to
3576 /// store to the side, but which will probably be used later, from
3577 /// aggressive peepholing optimizations that might delete it.
3578 ///
3579 /// Pass the result to unprotectFromPeepholes to declare that
3580 /// protection is no longer required.
3581 ///
3582 /// There's no particular reason why this shouldn't apply to
3583 /// l-values, it's just that no existing peepholes work on pointers.
3584 PeepholeProtection protectFromPeepholes(RValue rvalue);
3585 void unprotectFromPeepholes(PeepholeProtection protection);
3586
3587 void emitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty,
3588 SourceLocation Loc,
3589 SourceLocation AssumptionLoc,
3590 llvm::Value *Alignment,
3591 llvm::Value *OffsetValue,
3592 llvm::Value *TheCheck,
3593 llvm::Instruction *Assumption);
3594
3595 void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty,
3596 SourceLocation Loc, SourceLocation AssumptionLoc,
3597 llvm::Value *Alignment,
3598 llvm::Value *OffsetValue = nullptr);
3599
3600 void emitAlignmentAssumption(llvm::Value *PtrValue, const Expr *E,
3601 SourceLocation AssumptionLoc,
3602 llvm::Value *Alignment,
3603 llvm::Value *OffsetValue = nullptr);
3604
3605 //===--------------------------------------------------------------------===//
3606 // Statement Emission
3607 //===--------------------------------------------------------------------===//
3608
3609 /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
3610 void EmitStopPoint(const Stmt *S);
3611
3612 /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
3613 /// this function even if there is no current insertion point.
3614 ///
3615 /// This function may clear the current insertion point; callers should use
3616 /// EnsureInsertPoint if they wish to subsequently generate code without first
3617 /// calling EmitBlock, EmitBranch, or EmitStmt.
3618 void EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs = {});
3619
3620 /// EmitSimpleStmt - Try to emit a "simple" statement which does not
3621 /// necessarily require an insertion point or debug information; typically
3622 /// because the statement amounts to a jump or a container of other
3623 /// statements.
3624 ///
3625 /// \return True if the statement was handled.
3626 bool EmitSimpleStmt(const Stmt *S, ArrayRef<const Attr *> Attrs);
3627
3628 Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
3629 AggValueSlot AVS = AggValueSlot::ignored());
3630 Address
3631 EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast = false,
3632 AggValueSlot AVS = AggValueSlot::ignored());
3633
3634 /// EmitLabel - Emit the block for the given label. It is legal to call this
3635 /// function even if there is no current insertion point.
3636 void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
3637
3638 void EmitLabelStmt(const LabelStmt &S);
3639 void EmitAttributedStmt(const AttributedStmt &S);
3640 void EmitGotoStmt(const GotoStmt &S);
3641 void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
3642 void EmitIfStmt(const IfStmt &S);
3643
3644 void EmitWhileStmt(const WhileStmt &S, ArrayRef<const Attr *> Attrs = {});
3645 void EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> Attrs = {});
3646 void EmitForStmt(const ForStmt &S, ArrayRef<const Attr *> Attrs = {});
3647 void EmitReturnStmt(const ReturnStmt &S);
3648 void EmitDeclStmt(const DeclStmt &S);
3649 void EmitBreakStmt(const BreakStmt &S);
3650 void EmitContinueStmt(const ContinueStmt &S);
3651 void EmitSwitchStmt(const SwitchStmt &S);
3652 void EmitDefaultStmt(const DefaultStmt &S, ArrayRef<const Attr *> Attrs);
3653 void EmitCaseStmt(const CaseStmt &S, ArrayRef<const Attr *> Attrs);
3654 void EmitCaseStmtRange(const CaseStmt &S, ArrayRef<const Attr *> Attrs);
3655 void EmitDeferStmt(const DeferStmt &S);
3656 void EmitAsmStmt(const AsmStmt &S);
3657
3658 const BreakContinue *GetDestForLoopControlStmt(const LoopControlStmt &S);
3659
3660 void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
3661 void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
3662 void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
3663 void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
3664 void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
3665
3666 void EmitCoroutineBody(const CoroutineBodyStmt &S);
3667 void EmitCoreturnStmt(const CoreturnStmt &S);
3668 RValue EmitCoawaitExpr(const CoawaitExpr &E,
3669 AggValueSlot aggSlot = AggValueSlot::ignored(),
3670 bool ignoreResult = false);
3671 LValue EmitCoawaitLValue(const CoawaitExpr *E);
3672 RValue EmitCoyieldExpr(const CoyieldExpr &E,
3673 AggValueSlot aggSlot = AggValueSlot::ignored(),
3674 bool ignoreResult = false);
3675 LValue EmitCoyieldLValue(const CoyieldExpr *E);
3676 RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID);
3677
3678 void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
3679 void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
3680
3681 void EmitCXXTryStmt(const CXXTryStmt &S);
3682 void EmitSEHTryStmt(const SEHTryStmt &S);
3683 void EmitSEHLeaveStmt(const SEHLeaveStmt &S);
3684 void EnterSEHTryStmt(const SEHTryStmt &S);
3685 void ExitSEHTryStmt(const SEHTryStmt &S);
3686 void VolatilizeTryBlocks(llvm::BasicBlock *BB,
3687 llvm::SmallPtrSet<llvm::BasicBlock *, 10> &V);
3688
3689 void pushSEHCleanup(CleanupKind kind, llvm::Function *FinallyFunc);
3690 void startOutlinedSEHHelper(CodeGenFunction &ParentCGF, bool IsFilter,
3691 const Stmt *OutlinedStmt);
3692
3693 llvm::Function *GenerateSEHFilterFunction(CodeGenFunction &ParentCGF,
3694 const SEHExceptStmt &Except);
3695
3696 llvm::Function *GenerateSEHFinallyFunction(CodeGenFunction &ParentCGF,
3697 const SEHFinallyStmt &Finally);
3698
3699 void EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF,
3700 llvm::Value *ParentFP, llvm::Value *EntryEBP);
3701 llvm::Value *EmitSEHExceptionCode();
3702 llvm::Value *EmitSEHExceptionInfo();
3703 llvm::Value *EmitSEHAbnormalTermination();
3704
3705 /// Emit simple code for OpenMP directives in Simd-only mode.
3706 void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D);
3707
3708 /// Scan the outlined statement for captures from the parent function. For
3709 /// each capture, mark the capture as escaped and emit a call to
3710 /// llvm.localrecover. Insert the localrecover result into the LocalDeclMap.
3711 void EmitCapturedLocals(CodeGenFunction &ParentCGF, const Stmt *OutlinedStmt,
3712 bool IsFilter);
3713
3714 /// Recovers the address of a local in a parent function. ParentVar is the
3715 /// address of the variable used in the immediate parent function. It can
3716 /// either be an alloca or a call to llvm.localrecover if there are nested
3717 /// outlined functions. ParentFP is the frame pointer of the outermost parent
3718 /// frame.
3719 Address recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
3720 Address ParentVar, llvm::Value *ParentFP);
3721
3722 void EmitCXXForRangeStmt(const CXXForRangeStmt &S,
3723 ArrayRef<const Attr *> Attrs = {});
3724
3725 /// Controls insertion of cancellation exit blocks in worksharing constructs.
3726 class OMPCancelStackRAII {
3727 CodeGenFunction &CGF;
3728
3729 public:
3730 OMPCancelStackRAII(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
3731 bool HasCancel)
3732 : CGF(CGF) {
3733 CGF.OMPCancelStack.enter(CGF, Kind, HasCancel);
3734 }
3735 ~OMPCancelStackRAII() { CGF.OMPCancelStack.exit(CGF); }
3736 };
3737
3738 /// Returns calculated size of the specified type.
3739 llvm::Value *getTypeSize(QualType Ty);
3740 LValue InitCapturedStruct(const CapturedStmt &S);
3741 llvm::Function *EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K);
3742 llvm::Function *GenerateCapturedStmtFunction(const CapturedStmt &S);
3743 Address GenerateCapturedStmtArgument(const CapturedStmt &S);
3744 llvm::Function *
3745 GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S,
3746 const OMPExecutableDirective &D);
3747 void GenerateOpenMPCapturedVars(const CapturedStmt &S,
3748 SmallVectorImpl<llvm::Value *> &CapturedVars);
3749 void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy,
3750 SourceLocation Loc);
3751 /// Perform element by element copying of arrays with type \a
3752 /// OriginalType from \a SrcAddr to \a DestAddr using copying procedure
3753 /// generated by \a CopyGen.
3754 ///
3755 /// \param DestAddr Address of the destination array.
3756 /// \param SrcAddr Address of the source array.
3757 /// \param OriginalType Type of destination and source arrays.
3758 /// \param CopyGen Copying procedure that copies value of single array element
3759 /// to another single array element.
3760 void EmitOMPAggregateAssign(
3761 Address DestAddr, Address SrcAddr, QualType OriginalType,
3762 const llvm::function_ref<void(Address, Address)> CopyGen);
3763 /// Emit proper copying of data from one variable to another.
3764 ///
3765 /// \param OriginalType Original type of the copied variables.
3766 /// \param DestAddr Destination address.
3767 /// \param SrcAddr Source address.
3768 /// \param DestVD Destination variable used in \a CopyExpr (for arrays, has
3769 /// type of the base array element).
3770 /// \param SrcVD Source variable used in \a CopyExpr (for arrays, has type of
3771 /// the base array element).
3772 /// \param Copy Actual copygin expression for copying data from \a SrcVD to \a
3773 /// DestVD.
3774 void EmitOMPCopy(QualType OriginalType, Address DestAddr, Address SrcAddr,
3775 const VarDecl *DestVD, const VarDecl *SrcVD,
3776 const Expr *Copy);
3777 /// Emit atomic update code for constructs: \a X = \a X \a BO \a E or
3778 /// \a X = \a E \a BO \a E.
3779 ///
3780 /// \param X Value to be updated.
3781 /// \param E Update value.
3782 /// \param BO Binary operation for update operation.
3783 /// \param IsXLHSInRHSPart true if \a X is LHS in RHS part of the update
3784 /// expression, false otherwise.
3785 /// \param AO Atomic ordering of the generated atomic instructions.
3786 /// \param CommonGen Code generator for complex expressions that cannot be
3787 /// expressed through atomicrmw instruction.
3788 /// \returns <true, OldAtomicValue> if simple 'atomicrmw' instruction was
3789 /// generated, <false, RValue::get(nullptr)> otherwise.
3790 std::pair<bool, RValue> EmitOMPAtomicSimpleUpdateExpr(
3791 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
3792 llvm::AtomicOrdering AO, SourceLocation Loc,
3793 const llvm::function_ref<RValue(RValue)> CommonGen);
3794 bool EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
3795 OMPPrivateScope &PrivateScope);
3796 void EmitOMPPrivateClause(const OMPExecutableDirective &D,
3797 OMPPrivateScope &PrivateScope);
3798 void EmitOMPUseDevicePtrClause(
3799 const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope,
3800 const llvm::DenseMap<const ValueDecl *, llvm::Value *>
3801 CaptureDeviceAddrMap);
3802 void EmitOMPUseDeviceAddrClause(
3803 const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope,
3804 const llvm::DenseMap<const ValueDecl *, llvm::Value *>
3805 CaptureDeviceAddrMap);
3806 /// Emit code for copyin clause in \a D directive. The next code is
3807 /// generated at the start of outlined functions for directives:
3808 /// \code
3809 /// threadprivate_var1 = master_threadprivate_var1;
3810 /// operator=(threadprivate_var2, master_threadprivate_var2);
3811 /// ...
3812 /// __kmpc_barrier(&loc, global_tid);
3813 /// \endcode
3814 ///
3815 /// \param D OpenMP directive possibly with 'copyin' clause(s).
3816 /// \returns true if at least one copyin variable is found, false otherwise.
3817 bool EmitOMPCopyinClause(const OMPExecutableDirective &D);
3818 /// Emit initial code for lastprivate variables. If some variable is
3819 /// not also firstprivate, then the default initialization is used. Otherwise
3820 /// initialization of this variable is performed by EmitOMPFirstprivateClause
3821 /// method.
3822 ///
3823 /// \param D Directive that may have 'lastprivate' directives.
3824 /// \param PrivateScope Private scope for capturing lastprivate variables for
3825 /// proper codegen in internal captured statement.
3826 ///
3827 /// \returns true if there is at least one lastprivate variable, false
3828 /// otherwise.
3829 bool EmitOMPLastprivateClauseInit(const OMPExecutableDirective &D,
3830 OMPPrivateScope &PrivateScope);
3831 /// Emit final copying of lastprivate values to original variables at
3832 /// the end of the worksharing or simd directive.
3833 ///
3834 /// \param D Directive that has at least one 'lastprivate' directives.
3835 /// \param IsLastIterCond Boolean condition that must be set to 'i1 true' if
3836 /// it is the last iteration of the loop code in associated directive, or to
3837 /// 'i1 false' otherwise. If this item is nullptr, no final check is required.
3838 void EmitOMPLastprivateClauseFinal(const OMPExecutableDirective &D,
3839 bool NoFinals,
3840 llvm::Value *IsLastIterCond = nullptr);
3841 /// Emit initial code for linear clauses.
3842 void EmitOMPLinearClause(const OMPLoopDirective &D,
3843 CodeGenFunction::OMPPrivateScope &PrivateScope);
3844 /// Emit final code for linear clauses.
3845 /// \param CondGen Optional conditional code for final part of codegen for
3846 /// linear clause.
3847 void EmitOMPLinearClauseFinal(
3848 const OMPLoopDirective &D,
3849 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
3850 /// Emit initial code for reduction variables. Creates reduction copies
3851 /// and initializes them with the values according to OpenMP standard.
3852 ///
3853 /// \param D Directive (possibly) with the 'reduction' clause.
3854 /// \param PrivateScope Private scope for capturing reduction variables for
3855 /// proper codegen in internal captured statement.
3856 ///
3857 void EmitOMPReductionClauseInit(const OMPExecutableDirective &D,
3858 OMPPrivateScope &PrivateScope,
3859 bool ForInscan = false);
3860 /// Emit final update of reduction values to original variables at
3861 /// the end of the directive.
3862 ///
3863 /// \param D Directive that has at least one 'reduction' directives.
3864 /// \param ReductionKind The kind of reduction to perform.
3865 void EmitOMPReductionClauseFinal(const OMPExecutableDirective &D,
3866 const OpenMPDirectiveKind ReductionKind);
3867 /// Emit initial code for linear variables. Creates private copies
3868 /// and initializes them with the values according to OpenMP standard.
3869 ///
3870 /// \param D Directive (possibly) with the 'linear' clause.
3871 /// \return true if at least one linear variable is found that should be
3872 /// initialized with the value of the original variable, false otherwise.
3873 bool EmitOMPLinearClauseInit(const OMPLoopDirective &D);
3874
3875 typedef const llvm::function_ref<void(CodeGenFunction & /*CGF*/,
3876 llvm::Function * /*OutlinedFn*/,
3877 const OMPTaskDataTy & /*Data*/)>
3878 TaskGenTy;
3879 void EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
3880 const OpenMPDirectiveKind CapturedRegion,
3881 const RegionCodeGenTy &BodyGen,
3882 const TaskGenTy &TaskGen, OMPTaskDataTy &Data);
3883 struct OMPTargetDataInfo {
3884 Address BasePointersArray = Address::invalid();
3885 Address PointersArray = Address::invalid();
3886 Address SizesArray = Address::invalid();
3887 Address MappersArray = Address::invalid();
3888 unsigned NumberOfTargetItems = 0;
3889 explicit OMPTargetDataInfo() = default;
3890 OMPTargetDataInfo(Address BasePointersArray, Address PointersArray,
3891 Address SizesArray, Address MappersArray,
3892 unsigned NumberOfTargetItems)
3893 : BasePointersArray(BasePointersArray), PointersArray(PointersArray),
3894 SizesArray(SizesArray), MappersArray(MappersArray),
3895 NumberOfTargetItems(NumberOfTargetItems) {}
3896 };
3897 void EmitOMPTargetTaskBasedDirective(const OMPExecutableDirective &S,
3898 const RegionCodeGenTy &BodyGen,
3899 OMPTargetDataInfo &InputInfo);
3900 void processInReduction(const OMPExecutableDirective &S, OMPTaskDataTy &Data,
3901 CodeGenFunction &CGF, const CapturedStmt *CS,
3902 OMPPrivateScope &Scope);
3903 void EmitOMPMetaDirective(const OMPMetaDirective &S);
3904 void EmitOMPParallelDirective(const OMPParallelDirective &S);
3905 void EmitOMPSimdDirective(const OMPSimdDirective &S);
3906 void EmitOMPTileDirective(const OMPTileDirective &S);
3907 void EmitOMPStripeDirective(const OMPStripeDirective &S);
3908 void EmitOMPUnrollDirective(const OMPUnrollDirective &S);
3909 void EmitOMPReverseDirective(const OMPReverseDirective &S);
3910 void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S);
3911 void EmitOMPFuseDirective(const OMPFuseDirective &S);
3912 void EmitOMPForDirective(const OMPForDirective &S);
3913 void EmitOMPForSimdDirective(const OMPForSimdDirective &S);
3914 void EmitOMPScopeDirective(const OMPScopeDirective &S);
3915 void EmitOMPSectionsDirective(const OMPSectionsDirective &S);
3916 void EmitOMPSectionDirective(const OMPSectionDirective &S);
3917 void EmitOMPSingleDirective(const OMPSingleDirective &S);
3918 void EmitOMPMasterDirective(const OMPMasterDirective &S);
3919 void EmitOMPMaskedDirective(const OMPMaskedDirective &S);
3920 void EmitOMPCriticalDirective(const OMPCriticalDirective &S);
3921 void EmitOMPParallelForDirective(const OMPParallelForDirective &S);
3922 void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S);
3923 void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S);
3924 void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S);
3925 void EmitOMPTaskDirective(const OMPTaskDirective &S);
3926 void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S);
3927 void EmitOMPErrorDirective(const OMPErrorDirective &S);
3928 void EmitOMPBarrierDirective(const OMPBarrierDirective &S);
3929 void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S);
3930 void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S);
3931 void EmitOMPFlushDirective(const OMPFlushDirective &S);
3932 void EmitOMPDepobjDirective(const OMPDepobjDirective &S);
3933 void EmitOMPScanDirective(const OMPScanDirective &S);
3934 void EmitOMPOrderedDirective(const OMPOrderedDirective &S);
3935 void EmitOMPAtomicDirective(const OMPAtomicDirective &S);
3936 void EmitOMPTargetDirective(const OMPTargetDirective &S);
3937 void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S);
3938 void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S);
3939 void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S);
3940 void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S);
3941 void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S);
3942 void
3943 EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S);
3944 void EmitOMPTeamsDirective(const OMPTeamsDirective &S);
3945 void
3946 EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S);
3947 void EmitOMPCancelDirective(const OMPCancelDirective &S);
3948 void EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S);
3949 void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S);
3950 void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S);
3951 void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S);
3952 void EmitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &S);
3953 void
3954 EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S);
3955 void
3956 EmitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &S);
3957 void EmitOMPParallelMasterTaskLoopDirective(
3958 const OMPParallelMasterTaskLoopDirective &S);
3959 void EmitOMPParallelMaskedTaskLoopDirective(
3960 const OMPParallelMaskedTaskLoopDirective &S);
3961 void EmitOMPParallelMasterTaskLoopSimdDirective(
3962 const OMPParallelMasterTaskLoopSimdDirective &S);
3963 void EmitOMPParallelMaskedTaskLoopSimdDirective(
3964 const OMPParallelMaskedTaskLoopSimdDirective &S);
3965 void EmitOMPDistributeDirective(const OMPDistributeDirective &S);
3966 void EmitOMPDistributeParallelForDirective(
3967 const OMPDistributeParallelForDirective &S);
3968 void EmitOMPDistributeParallelForSimdDirective(
3969 const OMPDistributeParallelForSimdDirective &S);
3970 void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S);
3971 void EmitOMPTargetParallelForSimdDirective(
3972 const OMPTargetParallelForSimdDirective &S);
3973 void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S);
3974 void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S);
3975 void
3976 EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S);
3977 void EmitOMPTeamsDistributeParallelForSimdDirective(
3978 const OMPTeamsDistributeParallelForSimdDirective &S);
3979 void EmitOMPTeamsDistributeParallelForDirective(
3980 const OMPTeamsDistributeParallelForDirective &S);
3981 void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S);
3982 void EmitOMPTargetTeamsDistributeDirective(
3983 const OMPTargetTeamsDistributeDirective &S);
3984 void EmitOMPTargetTeamsDistributeParallelForDirective(
3985 const OMPTargetTeamsDistributeParallelForDirective &S);
3986 void EmitOMPTargetTeamsDistributeParallelForSimdDirective(
3987 const OMPTargetTeamsDistributeParallelForSimdDirective &S);
3988 void EmitOMPTargetTeamsDistributeSimdDirective(
3989 const OMPTargetTeamsDistributeSimdDirective &S);
3990 void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S);
3991 void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S);
3992 void EmitOMPTargetParallelGenericLoopDirective(
3993 const OMPTargetParallelGenericLoopDirective &S);
3994 void EmitOMPTargetTeamsGenericLoopDirective(
3995 const OMPTargetTeamsGenericLoopDirective &S);
3996 void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S);
3997 void EmitOMPInteropDirective(const OMPInteropDirective &S);
3998 void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S);
3999 void EmitOMPAssumeDirective(const OMPAssumeDirective &S);
4000
4001 /// Emit device code for the target directive.
4002 static void EmitOMPTargetDeviceFunction(CodeGenModule &CGM,
4003 StringRef ParentName,
4004 const OMPTargetDirective &S);
4005 static void
4006 EmitOMPTargetParallelDeviceFunction(CodeGenModule &CGM, StringRef ParentName,
4007 const OMPTargetParallelDirective &S);
4008 /// Emit device code for the target parallel for directive.
4009 static void EmitOMPTargetParallelForDeviceFunction(
4010 CodeGenModule &CGM, StringRef ParentName,
4011 const OMPTargetParallelForDirective &S);
4012 /// Emit device code for the target parallel for simd directive.
4013 static void EmitOMPTargetParallelForSimdDeviceFunction(
4014 CodeGenModule &CGM, StringRef ParentName,
4015 const OMPTargetParallelForSimdDirective &S);
4016 /// Emit device code for the target teams directive.
4017 static void
4018 EmitOMPTargetTeamsDeviceFunction(CodeGenModule &CGM, StringRef ParentName,
4019 const OMPTargetTeamsDirective &S);
4020 /// Emit device code for the target teams distribute directive.
4021 static void EmitOMPTargetTeamsDistributeDeviceFunction(
4022 CodeGenModule &CGM, StringRef ParentName,
4023 const OMPTargetTeamsDistributeDirective &S);
4024 /// Emit device code for the target teams distribute simd directive.
4025 static void EmitOMPTargetTeamsDistributeSimdDeviceFunction(
4026 CodeGenModule &CGM, StringRef ParentName,
4027 const OMPTargetTeamsDistributeSimdDirective &S);
4028 /// Emit device code for the target simd directive.
4029 static void EmitOMPTargetSimdDeviceFunction(CodeGenModule &CGM,
4030 StringRef ParentName,
4031 const OMPTargetSimdDirective &S);
4032 /// Emit device code for the target teams distribute parallel for simd
4033 /// directive.
4034 static void EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
4035 CodeGenModule &CGM, StringRef ParentName,
4036 const OMPTargetTeamsDistributeParallelForSimdDirective &S);
4037
4038 /// Emit device code for the target teams loop directive.
4039 static void EmitOMPTargetTeamsGenericLoopDeviceFunction(
4040 CodeGenModule &CGM, StringRef ParentName,
4041 const OMPTargetTeamsGenericLoopDirective &S);
4042
4043 /// Emit device code for the target parallel loop directive.
4044 static void EmitOMPTargetParallelGenericLoopDeviceFunction(
4045 CodeGenModule &CGM, StringRef ParentName,
4046 const OMPTargetParallelGenericLoopDirective &S);
4047
4048 static void EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
4049 CodeGenModule &CGM, StringRef ParentName,
4050 const OMPTargetTeamsDistributeParallelForDirective &S);
4051
4052 /// Emit the Stmt \p S and return its topmost canonical loop, if any.
4053 /// TODO: The \p Depth paramter is not yet implemented and must be 1. In the
4054 /// future it is meant to be the number of loops expected in the loop nests
4055 /// (usually specified by the "collapse" clause) that are collapsed to a
4056 /// single loop by this function.
4057 llvm::CanonicalLoopInfo *EmitOMPCollapsedCanonicalLoopNest(const Stmt *S,
4058 int Depth);
4059
4060 /// Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
4061 void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S);
4062
4063 /// Emit inner loop of the worksharing/simd construct.
4064 ///
4065 /// \param S Directive, for which the inner loop must be emitted.
4066 /// \param RequiresCleanup true, if directive has some associated private
4067 /// variables.
4068 /// \param LoopCond Bollean condition for loop continuation.
4069 /// \param IncExpr Increment expression for loop control variable.
4070 /// \param BodyGen Generator for the inner body of the inner loop.
4071 /// \param PostIncGen Genrator for post-increment code (required for ordered
4072 /// loop directvies).
4073 void EmitOMPInnerLoop(
4074 const OMPExecutableDirective &S, bool RequiresCleanup,
4075 const Expr *LoopCond, const Expr *IncExpr,
4076 const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
4077 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen);
4078
4079 JumpDest getOMPCancelDestination(OpenMPDirectiveKind Kind);
4080 /// Emit initial code for loop counters of loop-based directives.
4081 void EmitOMPPrivateLoopCounters(const OMPLoopDirective &S,
4082 OMPPrivateScope &LoopScope);
4083
4084 /// Helper for the OpenMP loop directives.
4085 void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit);
4086
4087 /// Emit code for the worksharing loop-based directive.
4088 /// \return true, if this construct has any lastprivate clause, false -
4089 /// otherwise.
4090 bool EmitOMPWorksharingLoop(const OMPLoopDirective &S, Expr *EUB,
4091 const CodeGenLoopBoundsTy &CodeGenLoopBounds,
4092 const CodeGenDispatchBoundsTy &CGDispatchBounds);
4093
4094 /// Emit code for the distribute loop-based directive.
4095 void EmitOMPDistributeLoop(const OMPLoopDirective &S,
4096 const CodeGenLoopTy &CodeGenLoop, Expr *IncExpr);
4097
4098 /// Helpers for the OpenMP loop directives.
4099 void EmitOMPSimdInit(const OMPLoopDirective &D);
4100 void EmitOMPSimdFinal(
4101 const OMPLoopDirective &D,
4102 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
4103
4104 /// Emits the lvalue for the expression with possibly captured variable.
4105 LValue EmitOMPSharedLValue(const Expr *E);
4106
4107private:
4108 /// Helpers for blocks.
4109 llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
4110
4111 /// struct with the values to be passed to the OpenMP loop-related functions
4112 struct OMPLoopArguments {
4113 /// loop lower bound
4114 Address LB = Address::invalid();
4115 /// loop upper bound
4116 Address UB = Address::invalid();
4117 /// loop stride
4118 Address ST = Address::invalid();
4119 /// isLastIteration argument for runtime functions
4120 Address IL = Address::invalid();
4121 /// Chunk value generated by sema
4122 llvm::Value *Chunk = nullptr;
4123 /// EnsureUpperBound
4124 Expr *EUB = nullptr;
4125 /// IncrementExpression
4126 Expr *IncExpr = nullptr;
4127 /// Loop initialization
4128 Expr *Init = nullptr;
4129 /// Loop exit condition
4130 Expr *Cond = nullptr;
4131 /// Update of LB after a whole chunk has been executed
4132 Expr *NextLB = nullptr;
4133 /// Update of UB after a whole chunk has been executed
4134 Expr *NextUB = nullptr;
4135 /// Distinguish between the for distribute and sections
4136 OpenMPDirectiveKind DKind = llvm::omp::OMPD_unknown;
4137 OMPLoopArguments() = default;
4138 OMPLoopArguments(Address LB, Address UB, Address ST, Address IL,
4139 llvm::Value *Chunk = nullptr, Expr *EUB = nullptr,
4140 Expr *IncExpr = nullptr, Expr *Init = nullptr,
4141 Expr *Cond = nullptr, Expr *NextLB = nullptr,
4142 Expr *NextUB = nullptr)
4143 : LB(LB), UB(UB), ST(ST), IL(IL), Chunk(Chunk), EUB(EUB),
4144 IncExpr(IncExpr), Init(Init), Cond(Cond), NextLB(NextLB),
4145 NextUB(NextUB) {}
4146 };
4147 void EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
4148 const OMPLoopDirective &S, OMPPrivateScope &LoopScope,
4149 const OMPLoopArguments &LoopArgs,
4150 const CodeGenLoopTy &CodeGenLoop,
4151 const CodeGenOrderedTy &CodeGenOrdered);
4152 void EmitOMPForOuterLoop(const OpenMPScheduleTy &ScheduleKind,
4153 bool IsMonotonic, const OMPLoopDirective &S,
4154 OMPPrivateScope &LoopScope, bool Ordered,
4155 const OMPLoopArguments &LoopArgs,
4156 const CodeGenDispatchBoundsTy &CGDispatchBounds);
4157 void EmitOMPDistributeOuterLoop(OpenMPDistScheduleClauseKind ScheduleKind,
4158 const OMPLoopDirective &S,
4159 OMPPrivateScope &LoopScope,
4160 const OMPLoopArguments &LoopArgs,
4161 const CodeGenLoopTy &CodeGenLoopContent);
4162 /// Emit code for sections directive.
4163 void EmitSections(const OMPExecutableDirective &S);
4164
4165public:
4166 //===--------------------------------------------------------------------===//
4167 // OpenACC Emission
4168 //===--------------------------------------------------------------------===//
4169 void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S) {
4170 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4171 // simply emitting its structured block, but in the future we will implement
4172 // some sort of IR.
4173 EmitStmt(S: S.getStructuredBlock());
4174 }
4175
4176 void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S) {
4177 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4178 // simply emitting its loop, but in the future we will implement
4179 // some sort of IR.
4180 EmitStmt(S: S.getLoop());
4181 }
4182
4183 void EmitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &S) {
4184 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4185 // simply emitting its loop, but in the future we will implement
4186 // some sort of IR.
4187 EmitStmt(S: S.getLoop());
4188 }
4189
4190 void EmitOpenACCDataConstruct(const OpenACCDataConstruct &S) {
4191 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4192 // simply emitting its structured block, but in the future we will implement
4193 // some sort of IR.
4194 EmitStmt(S: S.getStructuredBlock());
4195 }
4196
4197 void EmitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &S) {
4198 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4199 // but in the future we will implement some sort of IR.
4200 }
4201
4202 void EmitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &S) {
4203 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4204 // but in the future we will implement some sort of IR.
4205 }
4206
4207 void EmitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &S) {
4208 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4209 // simply emitting its structured block, but in the future we will implement
4210 // some sort of IR.
4211 EmitStmt(S: S.getStructuredBlock());
4212 }
4213
4214 void EmitOpenACCWaitConstruct(const OpenACCWaitConstruct &S) {
4215 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4216 // but in the future we will implement some sort of IR.
4217 }
4218
4219 void EmitOpenACCInitConstruct(const OpenACCInitConstruct &S) {
4220 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4221 // but in the future we will implement some sort of IR.
4222 }
4223
4224 void EmitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &S) {
4225 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4226 // but in the future we will implement some sort of IR.
4227 }
4228
4229 void EmitOpenACCSetConstruct(const OpenACCSetConstruct &S) {
4230 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4231 // but in the future we will implement some sort of IR.
4232 }
4233
4234 void EmitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &S) {
4235 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4236 // but in the future we will implement some sort of IR.
4237 }
4238
4239 void EmitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &S) {
4240 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4241 // simply emitting its associated stmt, but in the future we will implement
4242 // some sort of IR.
4243 EmitStmt(S: S.getAssociatedStmt());
4244 }
4245 void EmitOpenACCCacheConstruct(const OpenACCCacheConstruct &S) {
4246 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4247 // but in the future we will implement some sort of IR.
4248 }
4249
4250 //===--------------------------------------------------------------------===//
4251 // LValue Expression Emission
4252 //===--------------------------------------------------------------------===//
4253
4254 /// Create a check that a scalar RValue is non-null.
4255 llvm::Value *EmitNonNullRValueCheck(RValue RV, QualType T);
4256
4257 /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
4258 RValue GetUndefRValue(QualType Ty);
4259
4260 /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
4261 /// and issue an ErrorUnsupported style diagnostic (using the
4262 /// provided Name).
4263 RValue EmitUnsupportedRValue(const Expr *E, const char *Name);
4264
4265 /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
4266 /// an ErrorUnsupported style diagnostic (using the provided Name).
4267 LValue EmitUnsupportedLValue(const Expr *E, const char *Name);
4268
4269 /// EmitLValue - Emit code to compute a designator that specifies the location
4270 /// of the expression.
4271 ///
4272 /// This can return one of two things: a simple address or a bitfield
4273 /// reference. In either case, the LLVM Value* in the LValue structure is
4274 /// guaranteed to be an LLVM pointer type.
4275 ///
4276 /// If this returns a bitfield reference, nothing about the pointee type of
4277 /// the LLVM value is known: For example, it may not be a pointer to an
4278 /// integer.
4279 ///
4280 /// If this returns a normal address, and if the lvalue's C type is fixed
4281 /// size, this method guarantees that the returned pointer type will point to
4282 /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
4283 /// variable length type, this is not possible.
4284 ///
4285 LValue EmitLValue(const Expr *E,
4286 KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
4287
4288private:
4289 LValue EmitLValueHelper(const Expr *E, KnownNonNull_t IsKnownNonNull);
4290
4291public:
4292 /// Same as EmitLValue but additionally we generate checking code to
4293 /// guard against undefined behavior. This is only suitable when we know
4294 /// that the address will be used to access the object.
4295 LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK);
4296
4297 RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc);
4298
4299 void EmitAtomicInit(Expr *E, LValue lvalue);
4300
4301 bool LValueIsSuitableForInlineAtomic(LValue Src);
4302
4303 RValue EmitAtomicLoad(LValue LV, SourceLocation SL,
4304 AggValueSlot Slot = AggValueSlot::ignored());
4305
4306 RValue EmitAtomicLoad(LValue lvalue, SourceLocation loc,
4307 llvm::AtomicOrdering AO, bool IsVolatile = false,
4308 AggValueSlot slot = AggValueSlot::ignored());
4309
4310 void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit);
4311
4312 void EmitAtomicStore(RValue rvalue, LValue lvalue, llvm::AtomicOrdering AO,
4313 bool IsVolatile, bool isInit);
4314
4315 std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
4316 LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
4317 llvm::AtomicOrdering Success =
4318 llvm::AtomicOrdering::SequentiallyConsistent,
4319 llvm::AtomicOrdering Failure =
4320 llvm::AtomicOrdering::SequentiallyConsistent,
4321 bool IsWeak = false, AggValueSlot Slot = AggValueSlot::ignored());
4322
4323 /// Emit an atomicrmw instruction, and applying relevant metadata when
4324 /// applicable.
4325 llvm::AtomicRMWInst *emitAtomicRMWInst(
4326 llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val,
4327 llvm::AtomicOrdering Order = llvm::AtomicOrdering::SequentiallyConsistent,
4328 llvm::SyncScope::ID SSID = llvm::SyncScope::System,
4329 const AtomicExpr *AE = nullptr);
4330
4331 void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO,
4332 const llvm::function_ref<RValue(RValue)> &UpdateOp,
4333 bool IsVolatile);
4334
4335 /// EmitToMemory - Change a scalar value from its value
4336 /// representation to its in-memory representation.
4337 llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
4338
4339 /// EmitFromMemory - Change a scalar value from its memory
4340 /// representation to its value representation.
4341 llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
4342
4343 /// Check if the scalar \p Value is within the valid range for the given
4344 /// type \p Ty.
4345 ///
4346 /// Returns true if a check is needed (even if the range is unknown).
4347 bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,
4348 SourceLocation Loc);
4349
4350 /// EmitLoadOfScalar - Load a scalar value from an address, taking
4351 /// care to appropriately convert from the memory representation to
4352 /// the LLVM value representation.
4353 llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
4354 SourceLocation Loc,
4355 AlignmentSource Source = AlignmentSource::Type,
4356 bool isNontemporal = false) {
4357 return EmitLoadOfScalar(Addr, Volatile, Ty, Loc, BaseInfo: LValueBaseInfo(Source),
4358 TBAAInfo: CGM.getTBAAAccessInfo(AccessType: Ty), isNontemporal);
4359 }
4360
4361 llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
4362 SourceLocation Loc, LValueBaseInfo BaseInfo,
4363 TBAAAccessInfo TBAAInfo,
4364 bool isNontemporal = false);
4365
4366 /// EmitLoadOfScalar - Load a scalar value from an address, taking
4367 /// care to appropriately convert from the memory representation to
4368 /// the LLVM value representation. The l-value must be a simple
4369 /// l-value.
4370 llvm::Value *EmitLoadOfScalar(LValue lvalue, SourceLocation Loc);
4371
4372 /// EmitStoreOfScalar - Store a scalar value to an address, taking
4373 /// care to appropriately convert from the memory representation to
4374 /// the LLVM value representation.
4375 void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile,
4376 QualType Ty,
4377 AlignmentSource Source = AlignmentSource::Type,
4378 bool isInit = false, bool isNontemporal = false) {
4379 EmitStoreOfScalar(Value, Addr, Volatile, Ty, BaseInfo: LValueBaseInfo(Source),
4380 TBAAInfo: CGM.getTBAAAccessInfo(AccessType: Ty), isInit, isNontemporal);
4381 }
4382
4383 void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile,
4384 QualType Ty, LValueBaseInfo BaseInfo,
4385 TBAAAccessInfo TBAAInfo, bool isInit = false,
4386 bool isNontemporal = false);
4387
4388 /// EmitStoreOfScalar - Store a scalar value to an address, taking
4389 /// care to appropriately convert from the memory representation to
4390 /// the LLVM value representation. The l-value must be a simple
4391 /// l-value. The isInit flag indicates whether this is an initialization.
4392 /// If so, atomic qualifiers are ignored and the store is always non-atomic.
4393 void EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
4394 bool isInit = false);
4395
4396 /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
4397 /// this method emits the address of the lvalue, then loads the result as an
4398 /// rvalue, returning the rvalue.
4399 RValue EmitLoadOfLValue(LValue V, SourceLocation Loc);
4400 RValue EmitLoadOfExtVectorElementLValue(LValue V);
4401 RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc);
4402 RValue EmitLoadOfGlobalRegLValue(LValue LV);
4403
4404 /// Like EmitLoadOfLValue but also handles complex and aggregate types.
4405 RValue EmitLoadOfAnyValue(LValue V,
4406 AggValueSlot Slot = AggValueSlot::ignored(),
4407 SourceLocation Loc = {});
4408
4409 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
4410 /// lvalue, where both are guaranteed to the have the same type, and that type
4411 /// is 'Ty'.
4412 void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit = false);
4413 void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
4414 void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst);
4415
4416 /// EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints
4417 /// as EmitStoreThroughLValue.
4418 ///
4419 /// \param Result [out] - If non-null, this will be set to a Value* for the
4420 /// bit-field contents after the store, appropriate for use as the result of
4421 /// an assignment to the bit-field.
4422 void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
4423 llvm::Value **Result = nullptr);
4424
4425 /// Emit an l-value for an assignment (simple or compound) of complex type.
4426 LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
4427 LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
4428 LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E,
4429 llvm::Value *&Result);
4430
4431 // Note: only available for agg return types
4432 LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
4433 LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
4434 // Note: only available for agg return types
4435 LValue EmitCallExprLValue(const CallExpr *E,
4436 llvm::CallBase **CallOrInvoke = nullptr);
4437 // Note: only available for agg return types
4438 LValue EmitVAArgExprLValue(const VAArgExpr *E);
4439 LValue EmitDeclRefLValue(const DeclRefExpr *E);
4440 LValue EmitStringLiteralLValue(const StringLiteral *E);
4441 LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
4442 LValue EmitPredefinedLValue(const PredefinedExpr *E);
4443 LValue EmitUnaryOpLValue(const UnaryOperator *E);
4444 LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
4445 bool Accessed = false);
4446 llvm::Value *EmitMatrixIndexExpr(const Expr *E);
4447 LValue EmitMatrixSingleSubscriptExpr(const MatrixSingleSubscriptExpr *E);
4448 LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E);
4449 LValue EmitArraySectionExpr(const ArraySectionExpr *E,
4450 bool IsLowerBound = true);
4451 LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
4452 LValue EmitMatrixElementExpr(const MatrixElementExpr *E);
4453 LValue EmitMemberExpr(const MemberExpr *E);
4454 LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
4455 LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
4456 LValue EmitInitListLValue(const InitListExpr *E);
4457 void EmitIgnoredConditionalOperator(const AbstractConditionalOperator *E);
4458 LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
4459 LValue EmitCastLValue(const CastExpr *E);
4460 LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
4461 LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
4462 LValue EmitHLSLArrayAssignLValue(const BinaryOperator *E);
4463
4464 std::pair<LValue, LValue> EmitHLSLOutArgLValues(const HLSLOutArgExpr *E,
4465 QualType Ty);
4466 LValue EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, CallArgList &Args,
4467 QualType Ty);
4468
4469 Address EmitExtVectorElementLValue(LValue V);
4470
4471 RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc);
4472
4473 Address EmitArrayToPointerDecay(const Expr *Array,
4474 LValueBaseInfo *BaseInfo = nullptr,
4475 TBAAAccessInfo *TBAAInfo = nullptr);
4476
4477 class ConstantEmission {
4478 llvm::PointerIntPair<llvm::Constant *, 1, bool> ValueAndIsReference;
4479 ConstantEmission(llvm::Constant *C, bool isReference)
4480 : ValueAndIsReference(C, isReference) {}
4481
4482 public:
4483 ConstantEmission() {}
4484 static ConstantEmission forReference(llvm::Constant *C) {
4485 return ConstantEmission(C, true);
4486 }
4487 static ConstantEmission forValue(llvm::Constant *C) {
4488 return ConstantEmission(C, false);
4489 }
4490
4491 explicit operator bool() const {
4492 return ValueAndIsReference.getOpaqueValue() != nullptr;
4493 }
4494
4495 bool isReference() const { return ValueAndIsReference.getInt(); }
4496 LValue getReferenceLValue(CodeGenFunction &CGF, const Expr *RefExpr) const {
4497 assert(isReference());
4498 return CGF.MakeNaturalAlignAddrLValue(V: ValueAndIsReference.getPointer(),
4499 T: RefExpr->getType());
4500 }
4501
4502 llvm::Constant *getValue() const {
4503 assert(!isReference());
4504 return ValueAndIsReference.getPointer();
4505 }
4506 };
4507
4508 ConstantEmission tryEmitAsConstant(const DeclRefExpr *RefExpr);
4509 ConstantEmission tryEmitAsConstant(const MemberExpr *ME);
4510 llvm::Value *emitScalarConstant(const ConstantEmission &Constant, Expr *E);
4511
4512 RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
4513 AggValueSlot slot = AggValueSlot::ignored());
4514 LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e);
4515
4516 void FlattenAccessAndTypeLValue(LValue LVal,
4517 SmallVectorImpl<LValue> &AccessList);
4518
4519 llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
4520 const ObjCIvarDecl *Ivar);
4521 llvm::Value *EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface,
4522 const ObjCIvarDecl *Ivar);
4523 LValue EmitLValueForField(LValue Base, const FieldDecl *Field,
4524 bool IsInBounds = true);
4525 LValue EmitLValueForLambdaField(const FieldDecl *Field);
4526 LValue EmitLValueForLambdaField(const FieldDecl *Field,
4527 llvm::Value *ThisValue);
4528
4529 /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
4530 /// if the Field is a reference, this will return the address of the reference
4531 /// and not the address of the value stored in the reference.
4532 LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field);
4533
4534 LValue EmitLValueForIvar(QualType ObjectTy, llvm::Value *Base,
4535 const ObjCIvarDecl *Ivar, unsigned CVRQualifiers);
4536
4537 LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
4538 LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
4539 LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
4540 LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E);
4541
4542 LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
4543 LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
4544 LValue EmitStmtExprLValue(const StmtExpr *E);
4545 LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
4546 LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
4547 void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init);
4548
4549 //===--------------------------------------------------------------------===//
4550 // Scalar Expression Emission
4551 //===--------------------------------------------------------------------===//
4552
4553 /// EmitCall - Generate a call of the given function, expecting the given
4554 /// result type, and using the given argument list which specifies both the
4555 /// LLVM arguments and the types they were derived from.
4556 RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
4557 ReturnValueSlot ReturnValue, const CallArgList &Args,
4558 llvm::CallBase **CallOrInvoke, bool IsMustTail,
4559 SourceLocation Loc,
4560 bool IsVirtualFunctionPointerThunk = false);
4561 RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
4562 ReturnValueSlot ReturnValue, const CallArgList &Args,
4563 llvm::CallBase **CallOrInvoke = nullptr,
4564 bool IsMustTail = false) {
4565 return EmitCall(CallInfo, Callee, ReturnValue, Args, CallOrInvoke,
4566 IsMustTail, Loc: SourceLocation());
4567 }
4568 RValue EmitCall(QualType FnType, const CGCallee &Callee, const CallExpr *E,
4569 ReturnValueSlot ReturnValue, llvm::Value *Chain = nullptr,
4570 llvm::CallBase **CallOrInvoke = nullptr,
4571 CGFunctionInfo const **ResolvedFnInfo = nullptr);
4572
4573 // If a Call or Invoke instruction was emitted for this CallExpr, this method
4574 // writes the pointer to `CallOrInvoke` if it's not null.
4575 RValue EmitCallExpr(const CallExpr *E,
4576 ReturnValueSlot ReturnValue = ReturnValueSlot(),
4577 llvm::CallBase **CallOrInvoke = nullptr);
4578 RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue,
4579 llvm::CallBase **CallOrInvoke = nullptr);
4580 CGCallee EmitCallee(const Expr *E);
4581
4582 void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl);
4583 void checkTargetFeatures(SourceLocation Loc, const FunctionDecl *TargetDecl);
4584
4585 llvm::CallInst *EmitRuntimeCall(llvm::FunctionCallee callee,
4586 const Twine &name = "");
4587 llvm::CallInst *EmitRuntimeCall(llvm::FunctionCallee callee,
4588 ArrayRef<llvm::Value *> args,
4589 const Twine &name = "");
4590 llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4591 const Twine &name = "");
4592 llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4593 ArrayRef<Address> args,
4594 const Twine &name = "");
4595 llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4596 ArrayRef<llvm::Value *> args,
4597 const Twine &name = "");
4598
4599 SmallVector<llvm::OperandBundleDef, 1>
4600 getBundlesForFunclet(llvm::Value *Callee);
4601
4602 llvm::CallBase *EmitCallOrInvoke(llvm::FunctionCallee Callee,
4603 ArrayRef<llvm::Value *> Args,
4604 const Twine &Name = "");
4605 llvm::CallBase *EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4606 ArrayRef<llvm::Value *> args,
4607 const Twine &name = "");
4608 llvm::CallBase *EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4609 const Twine &name = "");
4610 void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4611 ArrayRef<llvm::Value *> args);
4612
4613 CGCallee BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
4614 NestedNameSpecifier Qual, llvm::Type *Ty);
4615
4616 CGCallee BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
4617 CXXDtorType Type,
4618 const CXXRecordDecl *RD);
4619
4620 bool isPointerKnownNonNull(const Expr *E);
4621 /// Check whether the underlying base pointer is a constant null.
4622 bool isUnderlyingBasePointerConstantNull(const Expr *E);
4623
4624 /// Create the discriminator from the storage address and the entity hash.
4625 llvm::Value *EmitPointerAuthBlendDiscriminator(llvm::Value *StorageAddress,
4626 llvm::Value *Discriminator);
4627 CGPointerAuthInfo EmitPointerAuthInfo(const PointerAuthSchema &Schema,
4628 llvm::Value *StorageAddress,
4629 GlobalDecl SchemaDecl,
4630 QualType SchemaType);
4631
4632 llvm::Value *EmitPointerAuthSign(const CGPointerAuthInfo &Info,
4633 llvm::Value *Pointer);
4634
4635 llvm::Value *EmitPointerAuthAuth(const CGPointerAuthInfo &Info,
4636 llvm::Value *Pointer);
4637
4638 llvm::Value *emitPointerAuthResign(llvm::Value *Pointer, QualType PointerType,
4639 const CGPointerAuthInfo &CurAuthInfo,
4640 const CGPointerAuthInfo &NewAuthInfo,
4641 bool IsKnownNonNull);
4642 llvm::Value *emitPointerAuthResignCall(llvm::Value *Pointer,
4643 const CGPointerAuthInfo &CurInfo,
4644 const CGPointerAuthInfo &NewInfo);
4645
4646 void EmitPointerAuthOperandBundle(
4647 const CGPointerAuthInfo &Info,
4648 SmallVectorImpl<llvm::OperandBundleDef> &Bundles);
4649
4650 CGPointerAuthInfo EmitPointerAuthInfo(PointerAuthQualifier Qualifier,
4651 Address StorageAddress);
4652 llvm::Value *EmitPointerAuthQualify(PointerAuthQualifier Qualifier,
4653 llvm::Value *Pointer, QualType ValueType,
4654 Address StorageAddress,
4655 bool IsKnownNonNull);
4656 llvm::Value *EmitPointerAuthQualify(PointerAuthQualifier Qualifier,
4657 const Expr *PointerExpr,
4658 Address StorageAddress);
4659 llvm::Value *EmitPointerAuthUnqualify(PointerAuthQualifier Qualifier,
4660 llvm::Value *Pointer,
4661 QualType PointerType,
4662 Address StorageAddress,
4663 bool IsKnownNonNull);
4664 void EmitPointerAuthCopy(PointerAuthQualifier Qualifier, QualType Type,
4665 Address DestField, Address SrcField);
4666
4667 std::pair<llvm::Value *, CGPointerAuthInfo>
4668 EmitOrigPointerRValue(const Expr *E);
4669
4670 llvm::Value *authPointerToPointerCast(llvm::Value *ResultPtr,
4671 QualType SourceType, QualType DestType);
4672 Address authPointerToPointerCast(Address Ptr, QualType SourceType,
4673 QualType DestType);
4674
4675 Address getAsNaturalAddressOf(Address Addr, QualType PointeeTy);
4676
4677 llvm::Value *getAsNaturalPointerTo(Address Addr, QualType PointeeType) {
4678 return getAsNaturalAddressOf(Addr, PointeeTy: PointeeType).getBasePointer();
4679 }
4680
4681 // Return the copy constructor name with the prefix "__copy_constructor_"
4682 // removed.
4683 static std::string getNonTrivialCopyConstructorStr(QualType QT,
4684 CharUnits Alignment,
4685 bool IsVolatile,
4686 ASTContext &Ctx);
4687
4688 // Return the destructor name with the prefix "__destructor_" removed.
4689 static std::string getNonTrivialDestructorStr(QualType QT,
4690 CharUnits Alignment,
4691 bool IsVolatile,
4692 ASTContext &Ctx);
4693
4694 // These functions emit calls to the special functions of non-trivial C
4695 // structs.
4696 void defaultInitNonTrivialCStructVar(LValue Dst);
4697 void callCStructDefaultConstructor(LValue Dst);
4698 void callCStructDestructor(LValue Dst);
4699 void callCStructCopyConstructor(LValue Dst, LValue Src);
4700 void callCStructMoveConstructor(LValue Dst, LValue Src);
4701 void callCStructCopyAssignmentOperator(LValue Dst, LValue Src);
4702 void callCStructMoveAssignmentOperator(LValue Dst, LValue Src);
4703
4704 RValue EmitCXXMemberOrOperatorCall(
4705 const CXXMethodDecl *Method, const CGCallee &Callee,
4706 ReturnValueSlot ReturnValue, llvm::Value *This,
4707 llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *E,
4708 CallArgList *RtlArgs, llvm::CallBase **CallOrInvoke);
4709 RValue EmitCXXDestructorCall(GlobalDecl Dtor, const CGCallee &Callee,
4710 llvm::Value *This, QualType ThisTy,
4711 llvm::Value *ImplicitParam,
4712 QualType ImplicitParamTy, const CallExpr *E,
4713 llvm::CallBase **CallOrInvoke = nullptr);
4714 RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
4715 ReturnValueSlot ReturnValue,
4716 llvm::CallBase **CallOrInvoke = nullptr);
4717 RValue EmitCXXMemberOrOperatorMemberCallExpr(
4718 const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
4719 bool HasQualifier, NestedNameSpecifier Qualifier, bool IsArrow,
4720 const Expr *Base, llvm::CallBase **CallOrInvoke);
4721 // Compute the object pointer.
4722 Address EmitCXXMemberDataPointerAddress(
4723 const Expr *E, Address base, llvm::Value *memberPtr,
4724 const MemberPointerType *memberPtrType, bool IsInBounds,
4725 LValueBaseInfo *BaseInfo = nullptr, TBAAAccessInfo *TBAAInfo = nullptr);
4726 RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
4727 ReturnValueSlot ReturnValue,
4728 llvm::CallBase **CallOrInvoke);
4729
4730 RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
4731 const CXXMethodDecl *MD,
4732 ReturnValueSlot ReturnValue,
4733 llvm::CallBase **CallOrInvoke);
4734 RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E);
4735
4736 RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
4737 ReturnValueSlot ReturnValue,
4738 llvm::CallBase **CallOrInvoke);
4739
4740 RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E);
4741 RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E);
4742
4743 RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
4744 const CallExpr *E, ReturnValueSlot ReturnValue);
4745
4746 RValue emitRotate(const CallExpr *E, bool IsRotateRight);
4747
4748 /// Emit IR for __builtin_os_log_format.
4749 RValue emitBuiltinOSLogFormat(const CallExpr &E);
4750
4751 /// Emit IR for __builtin_is_aligned.
4752 RValue EmitBuiltinIsAligned(const CallExpr *E);
4753 /// Emit IR for __builtin_align_up/__builtin_align_down.
4754 RValue EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp);
4755
4756 llvm::Function *generateBuiltinOSLogHelperFunction(
4757 const analyze_os_log::OSLogBufferLayout &Layout,
4758 CharUnits BufferAlignment);
4759
4760 RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue,
4761 llvm::CallBase **CallOrInvoke);
4762
4763 /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
4764 /// is unhandled by the current target.
4765 llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4766 ReturnValueSlot ReturnValue);
4767
4768 llvm::Value *
4769 EmitAArch64CompareBuiltinExpr(llvm::Value *Op, llvm::Type *Ty,
4770 const llvm::CmpInst::Predicate Pred,
4771 const llvm::Twine &Name = "");
4772 llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4773 ReturnValueSlot ReturnValue,
4774 llvm::Triple::ArchType Arch);
4775 llvm::Value *EmitARMMVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4776 ReturnValueSlot ReturnValue,
4777 llvm::Triple::ArchType Arch);
4778 llvm::Value *EmitARMCDEBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4779 ReturnValueSlot ReturnValue,
4780 llvm::Triple::ArchType Arch);
4781 llvm::Value *EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy,
4782 QualType RTy);
4783 llvm::Value *EmitCMSEClearRecord(llvm::Value *V, llvm::ArrayType *ATy,
4784 QualType RTy);
4785
4786 llvm::Value *
4787 EmitCommonNeonBuiltinExpr(unsigned BuiltinID, unsigned LLVMIntrinsic,
4788 unsigned AltLLVMIntrinsic, const char *NameHint,
4789 unsigned Modifier, const CallExpr *E,
4790 SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0,
4791 Address PtrOp1, llvm::Triple::ArchType Arch);
4792
4793 llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
4794 unsigned Modifier, llvm::Type *ArgTy,
4795 const CallExpr *E);
4796 llvm::Value *EmitNeonCall(llvm::Function *F,
4797 SmallVectorImpl<llvm::Value *> &O, const char *name,
4798 unsigned shift = 0, bool rightshift = false);
4799 llvm::Value *EmitFP8NeonCall(unsigned IID, ArrayRef<llvm::Type *> Tys,
4800 SmallVectorImpl<llvm::Value *> &O,
4801 const CallExpr *E, const char *name);
4802 llvm::Value *EmitFP8NeonCvtCall(unsigned IID, llvm::Type *Ty0,
4803 llvm::Type *Ty1, bool Extract,
4804 SmallVectorImpl<llvm::Value *> &Ops,
4805 const CallExpr *E, const char *name);
4806 llvm::Value *EmitFP8NeonFDOTCall(unsigned IID, bool ExtendLaneArg,
4807 llvm::Type *RetTy,
4808 SmallVectorImpl<llvm::Value *> &Ops,
4809 const CallExpr *E, const char *name);
4810 llvm::Value *EmitFP8NeonFMLACall(unsigned IID, bool ExtendLaneArg,
4811 llvm::Type *RetTy,
4812 SmallVectorImpl<llvm::Value *> &Ops,
4813 const CallExpr *E, const char *name);
4814 llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx,
4815 const llvm::ElementCount &Count);
4816 llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
4817 llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
4818 bool negateForRightShift);
4819 llvm::Value *EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt,
4820 llvm::Type *Ty, bool usgn, const char *name);
4821 llvm::Value *vectorWrapScalar16(llvm::Value *Op);
4822 /// SVEBuiltinMemEltTy - Returns the memory element type for this memory
4823 /// access builtin. Only required if it can't be inferred from the base
4824 /// pointer operand.
4825 llvm::Type *SVEBuiltinMemEltTy(const SVETypeFlags &TypeFlags);
4826
4827 SmallVector<llvm::Type *, 2>
4828 getSVEOverloadTypes(const SVETypeFlags &TypeFlags, llvm::Type *ReturnType,
4829 ArrayRef<llvm::Value *> Ops);
4830 llvm::Type *getEltType(const SVETypeFlags &TypeFlags);
4831 llvm::ScalableVectorType *getSVEType(const SVETypeFlags &TypeFlags);
4832 llvm::ScalableVectorType *getSVEPredType(const SVETypeFlags &TypeFlags);
4833 llvm::Value *EmitSVETupleSetOrGet(const SVETypeFlags &TypeFlags,
4834 ArrayRef<llvm::Value *> Ops);
4835 llvm::Value *EmitSVETupleCreate(const SVETypeFlags &TypeFlags,
4836 llvm::Type *ReturnType,
4837 ArrayRef<llvm::Value *> Ops);
4838 llvm::Value *EmitSVEAllTruePred(const SVETypeFlags &TypeFlags);
4839 llvm::Value *EmitSVEDupX(llvm::Value *Scalar);
4840 llvm::Value *EmitSVEDupX(llvm::Value *Scalar, llvm::Type *Ty);
4841 llvm::Value *EmitSVEReinterpret(llvm::Value *Val, llvm::Type *Ty);
4842 llvm::Value *EmitSVEPMull(const SVETypeFlags &TypeFlags,
4843 llvm::SmallVectorImpl<llvm::Value *> &Ops,
4844 unsigned BuiltinID);
4845 llvm::Value *EmitSVEMovl(const SVETypeFlags &TypeFlags,
4846 llvm::ArrayRef<llvm::Value *> Ops,
4847 unsigned BuiltinID);
4848 llvm::Value *EmitSVEPredicateCast(llvm::Value *Pred,
4849 llvm::ScalableVectorType *VTy);
4850 llvm::Value *EmitSVEPredicateTupleCast(llvm::Value *PredTuple,
4851 llvm::StructType *Ty);
4852 llvm::Value *EmitSVEGatherLoad(const SVETypeFlags &TypeFlags,
4853 llvm::SmallVectorImpl<llvm::Value *> &Ops,
4854 unsigned IntID);
4855 llvm::Value *EmitSVEScatterStore(const SVETypeFlags &TypeFlags,
4856 llvm::SmallVectorImpl<llvm::Value *> &Ops,
4857 unsigned IntID);
4858 llvm::Value *EmitSVEMaskedLoad(const CallExpr *, llvm::Type *ReturnTy,
4859 SmallVectorImpl<llvm::Value *> &Ops,
4860 unsigned BuiltinID, bool IsZExtReturn);
4861 llvm::Value *EmitSVEMaskedStore(const CallExpr *,
4862 SmallVectorImpl<llvm::Value *> &Ops,
4863 unsigned BuiltinID);
4864 llvm::Value *EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags,
4865 SmallVectorImpl<llvm::Value *> &Ops,
4866 unsigned BuiltinID);
4867 llvm::Value *EmitSVEGatherPrefetch(const SVETypeFlags &TypeFlags,
4868 SmallVectorImpl<llvm::Value *> &Ops,
4869 unsigned IntID);
4870 llvm::Value *EmitSVEStructLoad(const SVETypeFlags &TypeFlags,
4871 SmallVectorImpl<llvm::Value *> &Ops,
4872 unsigned IntID);
4873 llvm::Value *EmitSVEStructStore(const SVETypeFlags &TypeFlags,
4874 SmallVectorImpl<llvm::Value *> &Ops,
4875 unsigned IntID);
4876 llvm::Value *EmitAArch64SVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4877
4878 llvm::Value *EmitSMELd1St1(const SVETypeFlags &TypeFlags,
4879 llvm::SmallVectorImpl<llvm::Value *> &Ops,
4880 unsigned IntID);
4881 llvm::Value *EmitSMEReadWrite(const SVETypeFlags &TypeFlags,
4882 llvm::SmallVectorImpl<llvm::Value *> &Ops,
4883 unsigned IntID);
4884 llvm::Value *EmitSMEZero(const SVETypeFlags &TypeFlags,
4885 llvm::SmallVectorImpl<llvm::Value *> &Ops,
4886 unsigned IntID);
4887 llvm::Value *EmitSMELdrStr(const SVETypeFlags &TypeFlags,
4888 llvm::SmallVectorImpl<llvm::Value *> &Ops,
4889 unsigned IntID);
4890
4891 void GetAArch64SVEProcessedOperands(unsigned BuiltinID, const CallExpr *E,
4892 SmallVectorImpl<llvm::Value *> &Ops,
4893 SVETypeFlags TypeFlags);
4894
4895 llvm::Value *EmitAArch64SMEBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4896
4897 llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4898 llvm::Triple::ArchType Arch);
4899 llvm::Value *EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4900
4901 llvm::Value *BuildVector(ArrayRef<llvm::Value *> Ops);
4902 llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4903 llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4904 llvm::Value *EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4905 llvm::Value *EmitHLSLBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4906 ReturnValueSlot ReturnValue);
4907
4908 // Returns a builtin function that the SPIR-V backend will expand into a spec
4909 // constant.
4910 llvm::Function *
4911 getSpecConstantFunction(const clang::QualType &SpecConstantType);
4912
4913 llvm::Value *EmitDirectXBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4914 llvm::Value *EmitSPIRVBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4915 llvm::Value *EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx,
4916 const CallExpr *E);
4917 llvm::Value *EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4918 llvm::Value *EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4919 llvm::Value *EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
4920 const CallExpr *E);
4921 llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4922 llvm::Value *EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4923 ReturnValueSlot ReturnValue);
4924
4925 llvm::Value *EmitRISCVCpuSupports(const CallExpr *E);
4926 llvm::Value *EmitRISCVCpuSupports(ArrayRef<StringRef> FeaturesStrs);
4927 llvm::Value *EmitRISCVCpuInit();
4928 llvm::Value *EmitRISCVCpuIs(const CallExpr *E);
4929 llvm::Value *EmitRISCVCpuIs(StringRef CPUStr);
4930
4931 void AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst,
4932 const CallExpr *E);
4933 void ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope,
4934 llvm::AtomicOrdering &AO,
4935 llvm::SyncScope::ID &SSID);
4936
4937 enum class MSVCIntrin;
4938 llvm::Value *EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E);
4939
4940 llvm::Value *EmitBuiltinAvailable(const VersionTuple &Version);
4941
4942 llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
4943 llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
4944 llvm::Value *EmitObjCBoxedExpr(const ObjCBoxedExpr *E);
4945 llvm::Value *EmitObjCArrayLiteral(const ObjCArrayLiteral *E);
4946 llvm::Value *EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E);
4947 llvm::Value *
4948 EmitObjCCollectionLiteral(const Expr *E,
4949 const ObjCMethodDecl *MethodWithObjects);
4950 llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
4951 RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
4952 ReturnValueSlot Return = ReturnValueSlot());
4953
4954 /// Retrieves the default cleanup kind for an ARC cleanup.
4955 /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
4956 CleanupKind getARCCleanupKind() {
4957 return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions ? NormalAndEHCleanup
4958 : NormalCleanup;
4959 }
4960
4961 // ARC primitives.
4962 void EmitARCInitWeak(Address addr, llvm::Value *value);
4963 void EmitARCDestroyWeak(Address addr);
4964 llvm::Value *EmitARCLoadWeak(Address addr);
4965 llvm::Value *EmitARCLoadWeakRetained(Address addr);
4966 llvm::Value *EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored);
4967 void emitARCCopyAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
4968 void emitARCMoveAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
4969 void EmitARCCopyWeak(Address dst, Address src);
4970 void EmitARCMoveWeak(Address dst, Address src);
4971 llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
4972 llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
4973 llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
4974 bool resultIgnored);
4975 llvm::Value *EmitARCStoreStrongCall(Address addr, llvm::Value *value,
4976 bool resultIgnored);
4977 llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
4978 llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
4979 llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
4980 void EmitARCDestroyStrong(Address addr, ARCPreciseLifetime_t precise);
4981 void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
4982 llvm::Value *EmitARCAutorelease(llvm::Value *value);
4983 llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
4984 llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
4985 llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
4986 llvm::Value *EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value);
4987
4988 llvm::Value *EmitObjCAutorelease(llvm::Value *value, llvm::Type *returnType);
4989 llvm::Value *EmitObjCRetainNonBlock(llvm::Value *value,
4990 llvm::Type *returnType);
4991 void EmitObjCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
4992
4993 std::pair<LValue, llvm::Value *>
4994 EmitARCStoreAutoreleasing(const BinaryOperator *e);
4995 std::pair<LValue, llvm::Value *> EmitARCStoreStrong(const BinaryOperator *e,
4996 bool ignored);
4997 std::pair<LValue, llvm::Value *>
4998 EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored);
4999
5000 llvm::Value *EmitObjCAlloc(llvm::Value *value, llvm::Type *returnType);
5001 llvm::Value *EmitObjCAllocWithZone(llvm::Value *value,
5002 llvm::Type *returnType);
5003 llvm::Value *EmitObjCAllocInit(llvm::Value *value, llvm::Type *resultType);
5004
5005 llvm::Value *EmitObjCThrowOperand(const Expr *expr);
5006 llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
5007 llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
5008
5009 llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
5010 llvm::Value *EmitARCReclaimReturnedObject(const Expr *e,
5011 bool allowUnsafeClaim);
5012 llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
5013 llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
5014 llvm::Value *EmitARCUnsafeUnretainedScalarExpr(const Expr *expr);
5015
5016 void EmitARCIntrinsicUse(ArrayRef<llvm::Value *> values);
5017
5018 void EmitARCNoopIntrinsicUse(ArrayRef<llvm::Value *> values);
5019
5020 static Destroyer destroyARCStrongImprecise;
5021 static Destroyer destroyARCStrongPrecise;
5022 static Destroyer destroyARCWeak;
5023 static Destroyer emitARCIntrinsicUse;
5024 static Destroyer destroyNonTrivialCStruct;
5025
5026 void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
5027 llvm::Value *EmitObjCAutoreleasePoolPush();
5028 llvm::Value *EmitObjCMRRAutoreleasePoolPush();
5029 void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
5030 void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
5031
5032 /// Emits a reference binding to the passed in expression.
5033 RValue EmitReferenceBindingToExpr(const Expr *E);
5034
5035 //===--------------------------------------------------------------------===//
5036 // Expression Emission
5037 //===--------------------------------------------------------------------===//
5038
5039 // Expressions are broken into three classes: scalar, complex, aggregate.
5040
5041 /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
5042 /// scalar type, returning the result.
5043 llvm::Value *EmitScalarExpr(const Expr *E, bool IgnoreResultAssign = false);
5044
5045 /// Emit a conversion from the specified type to the specified destination
5046 /// type, both of which are LLVM scalar types.
5047 llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
5048 QualType DstTy, SourceLocation Loc);
5049
5050 /// Emit a conversion from the specified complex type to the specified
5051 /// destination type, where the destination type is an LLVM scalar type.
5052 llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
5053 QualType DstTy,
5054 SourceLocation Loc);
5055
5056 /// EmitAggExpr - Emit the computation of the specified expression
5057 /// of aggregate type. The result is computed into the given slot,
5058 /// which may be null to indicate that the value is not needed.
5059 void EmitAggExpr(const Expr *E, AggValueSlot AS);
5060
5061 /// EmitAggExprToLValue - Emit the computation of the specified expression of
5062 /// aggregate type into a temporary LValue.
5063 LValue EmitAggExprToLValue(const Expr *E);
5064
5065 enum ExprValueKind { EVK_RValue, EVK_NonRValue };
5066
5067 /// EmitAggFinalDestCopy - Emit copy of the specified aggregate into
5068 /// destination address.
5069 void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src,
5070 ExprValueKind SrcKind);
5071
5072 /// Create a store to \arg DstPtr from \arg Src, truncating the stored value
5073 /// to at most \arg DstSize bytes.
5074 void CreateCoercedStore(llvm::Value *Src, QualType SrcFETy, Address Dst,
5075 llvm::TypeSize DstSize, bool DstIsVolatile);
5076
5077 /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
5078 /// make sure it survives garbage collection until this point.
5079 void EmitExtendGCLifetime(llvm::Value *object);
5080
5081 /// EmitComplexExpr - Emit the computation of the specified expression of
5082 /// complex type, returning the result.
5083 ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal = false,
5084 bool IgnoreImag = false);
5085
5086 /// EmitComplexExprIntoLValue - Emit the given expression of complex
5087 /// type and place its result into the specified l-value.
5088 void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit);
5089
5090 /// EmitStoreOfComplex - Store a complex number into the specified l-value.
5091 void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit);
5092
5093 /// EmitLoadOfComplex - Load a complex number from the specified l-value.
5094 ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc);
5095
5096 ComplexPairTy EmitPromotedComplexExpr(const Expr *E, QualType PromotionType);
5097 llvm::Value *EmitPromotedScalarExpr(const Expr *E, QualType PromotionType);
5098 ComplexPairTy EmitPromotedValue(ComplexPairTy result, QualType PromotionType);
5099 ComplexPairTy EmitUnPromotedValue(ComplexPairTy result,
5100 QualType PromotionType);
5101
5102 Address emitAddrOfRealComponent(Address complex, QualType complexType);
5103 Address emitAddrOfImagComponent(Address complex, QualType complexType);
5104
5105 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
5106 /// global variable that has already been created for it. If the initializer
5107 /// has a different type than GV does, this may free GV and return a different
5108 /// one. Otherwise it just returns GV.
5109 llvm::GlobalVariable *AddInitializerToStaticVarDecl(const VarDecl &D,
5110 llvm::GlobalVariable *GV);
5111
5112 // Emit an @llvm.invariant.start call for the given memory region.
5113 void EmitInvariantStart(llvm::Constant *Addr, CharUnits Size);
5114
5115 /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
5116 /// variable with global storage.
5117 void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::GlobalVariable *GV,
5118 bool PerformInit);
5119
5120 llvm::Constant *createAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor,
5121 llvm::Constant *Addr);
5122
5123 llvm::Function *createTLSAtExitStub(const VarDecl &VD,
5124 llvm::FunctionCallee Dtor,
5125 llvm::Constant *Addr,
5126 llvm::FunctionCallee &AtExit);
5127
5128 /// Call atexit() with a function that passes the given argument to
5129 /// the given function.
5130 void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::FunctionCallee fn,
5131 llvm::Constant *addr);
5132
5133 /// Registers the dtor using 'llvm.global_dtors' for platforms that do not
5134 /// support an 'atexit()' function.
5135 void registerGlobalDtorWithLLVM(const VarDecl &D, llvm::FunctionCallee fn,
5136 llvm::Constant *addr);
5137
5138 /// Call atexit() with function dtorStub.
5139 void registerGlobalDtorWithAtExit(llvm::Constant *dtorStub);
5140
5141 /// Call unatexit() with function dtorStub.
5142 llvm::Value *unregisterGlobalDtorWithUnAtExit(llvm::Constant *dtorStub);
5143
5144 /// Emit code in this function to perform a guarded variable
5145 /// initialization. Guarded initializations are used when it's not
5146 /// possible to prove that an initialization will be done exactly
5147 /// once, e.g. with a static local variable or a static data member
5148 /// of a class template.
5149 void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
5150 bool PerformInit);
5151
5152 enum class GuardKind { VariableGuard, TlsGuard };
5153
5154 /// Emit a branch to select whether or not to perform guarded initialization.
5155 void EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
5156 llvm::BasicBlock *InitBlock,
5157 llvm::BasicBlock *NoInitBlock, GuardKind Kind,
5158 const VarDecl *D);
5159
5160 /// GenerateCXXGlobalInitFunc - Generates code for initializing global
5161 /// variables.
5162 void
5163 GenerateCXXGlobalInitFunc(llvm::Function *Fn,
5164 ArrayRef<llvm::Function *> CXXThreadLocals,
5165 ConstantAddress Guard = ConstantAddress::invalid());
5166
5167 /// GenerateCXXGlobalCleanUpFunc - Generates code for cleaning up global
5168 /// variables.
5169 void GenerateCXXGlobalCleanUpFunc(
5170 llvm::Function *Fn,
5171 ArrayRef<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
5172 llvm::Constant *>>
5173 DtorsOrStermFinalizers);
5174
5175 void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, const VarDecl *D,
5176 llvm::GlobalVariable *Addr,
5177 bool PerformInit);
5178
5179 void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
5180
5181 void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp);
5182
5183 void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true);
5184
5185 RValue EmitAtomicExpr(AtomicExpr *E);
5186
5187 void EmitFakeUse(Address Addr);
5188
5189 //===--------------------------------------------------------------------===//
5190 // Annotations Emission
5191 //===--------------------------------------------------------------------===//
5192
5193 /// Emit an annotation call (intrinsic).
5194 llvm::Value *EmitAnnotationCall(llvm::Function *AnnotationFn,
5195 llvm::Value *AnnotatedVal,
5196 StringRef AnnotationStr,
5197 SourceLocation Location,
5198 const AnnotateAttr *Attr);
5199
5200 /// Emit local annotations for the local variable V, declared by D.
5201 void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
5202
5203 /// Emit field annotations for the given field & value. Returns the
5204 /// annotation result.
5205 Address EmitFieldAnnotations(const FieldDecl *D, Address V);
5206
5207 //===--------------------------------------------------------------------===//
5208 // Internal Helpers
5209 //===--------------------------------------------------------------------===//
5210
5211 /// ContainsLabel - Return true if the statement contains a label in it. If
5212 /// this statement is not executed normally, it not containing a label means
5213 /// that we can just remove the code.
5214 static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
5215
5216 /// containsBreak - Return true if the statement contains a break out of it.
5217 /// If the statement (recursively) contains a switch or loop with a break
5218 /// inside of it, this is fine.
5219 static bool containsBreak(const Stmt *S);
5220
5221 /// Determine if the given statement might introduce a declaration into the
5222 /// current scope, by being a (possibly-labelled) DeclStmt.
5223 static bool mightAddDeclToScope(const Stmt *S);
5224
5225 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
5226 /// to a constant, or if it does but contains a label, return false. If it
5227 /// constant folds return true and set the boolean result in Result.
5228 bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result,
5229 bool AllowLabels = false);
5230
5231 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
5232 /// to a constant, or if it does but contains a label, return false. If it
5233 /// constant folds return true and set the folded value.
5234 bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result,
5235 bool AllowLabels = false);
5236
5237 /// Ignore parentheses and logical-NOT to track conditions consistently.
5238 static const Expr *stripCond(const Expr *C);
5239
5240 /// isInstrumentedCondition - Determine whether the given condition is an
5241 /// instrumentable condition (i.e. no "&&" or "||").
5242 static bool isInstrumentedCondition(const Expr *C);
5243
5244 /// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
5245 /// increments a profile counter based on the semantics of the given logical
5246 /// operator opcode. This is used to instrument branch condition coverage
5247 /// for logical operators.
5248 void EmitBranchToCounterBlock(const Expr *Cond, BinaryOperator::Opcode LOp,
5249 llvm::BasicBlock *TrueBlock,
5250 llvm::BasicBlock *FalseBlock,
5251 uint64_t TrueCount = 0,
5252 Stmt::Likelihood LH = Stmt::LH_None,
5253 const Expr *CntrIdx = nullptr);
5254
5255 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
5256 /// if statement) to the specified blocks. Based on the condition, this might
5257 /// try to simplify the codegen of the conditional based on the branch.
5258 /// TrueCount should be the number of times we expect the condition to
5259 /// evaluate to true based on PGO data.
5260 void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
5261 llvm::BasicBlock *FalseBlock, uint64_t TrueCount,
5262 Stmt::Likelihood LH = Stmt::LH_None,
5263 const Expr *ConditionalOp = nullptr,
5264 const VarDecl *ConditionalDecl = nullptr);
5265
5266 /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is
5267 /// nonnull, if \p LHS is marked _Nonnull.
5268 void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc);
5269
5270 /// An enumeration which makes it easier to specify whether or not an
5271 /// operation is a subtraction.
5272 enum { NotSubtraction = false, IsSubtraction = true };
5273
5274 /// Emit pointer + index arithmetic.
5275 llvm::Value *EmitPointerArithmetic(const BinaryOperator *BO,
5276 Expr *pointerOperand, llvm::Value *pointer,
5277 Expr *indexOperand, llvm::Value *index,
5278 bool isSubtraction);
5279
5280 /// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to
5281 /// detect undefined behavior when the pointer overflow sanitizer is enabled.
5282 /// \p SignedIndices indicates whether any of the GEP indices are signed.
5283 /// \p IsSubtraction indicates whether the expression used to form the GEP
5284 /// is a subtraction.
5285 llvm::Value *EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr,
5286 ArrayRef<llvm::Value *> IdxList,
5287 bool SignedIndices, bool IsSubtraction,
5288 SourceLocation Loc,
5289 const Twine &Name = "");
5290
5291 Address EmitCheckedInBoundsGEP(Address Addr, ArrayRef<llvm::Value *> IdxList,
5292 llvm::Type *elementType, bool SignedIndices,
5293 bool IsSubtraction, SourceLocation Loc,
5294 CharUnits Align, const Twine &Name = "");
5295
5296 /// Specifies which type of sanitizer check to apply when handling a
5297 /// particular builtin.
5298 enum BuiltinCheckKind {
5299 BCK_CTZPassedZero,
5300 BCK_CLZPassedZero,
5301 BCK_AssumePassedFalse,
5302 };
5303
5304 /// Emits an argument for a call to a builtin. If the builtin sanitizer is
5305 /// enabled, a runtime check specified by \p Kind is also emitted.
5306 llvm::Value *EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind);
5307
5308 /// Emits an argument for a call to a `__builtin_assume`. If the builtin
5309 /// sanitizer is enabled, a runtime check is also emitted.
5310 llvm::Value *EmitCheckedArgForAssume(const Expr *E);
5311
5312 /// Emit a description of a type in a format suitable for passing to
5313 /// a runtime sanitizer handler.
5314 llvm::Constant *EmitCheckTypeDescriptor(QualType T);
5315
5316 /// Convert a value into a format suitable for passing to a runtime
5317 /// sanitizer handler.
5318 llvm::Value *EmitCheckValue(llvm::Value *V);
5319
5320 /// Emit a description of a source location in a format suitable for
5321 /// passing to a runtime sanitizer handler.
5322 llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc);
5323
5324 void EmitKCFIOperandBundle(const CGCallee &Callee,
5325 SmallVectorImpl<llvm::OperandBundleDef> &Bundles);
5326
5327 /// Create a basic block that will either trap or call a handler function in
5328 /// the UBSan runtime with the provided arguments, and create a conditional
5329 /// branch to it.
5330 void
5331 EmitCheck(ArrayRef<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
5332 Checked,
5333 SanitizerHandler Check, ArrayRef<llvm::Constant *> StaticArgs,
5334 ArrayRef<llvm::Value *> DynamicArgs,
5335 const TrapReason *TR = nullptr);
5336
5337 /// Emit a slow path cross-DSO CFI check which calls __cfi_slowpath
5338 /// if Cond if false.
5339 void EmitCfiSlowPathCheck(SanitizerKind::SanitizerOrdinal Ordinal,
5340 llvm::Value *Cond, llvm::ConstantInt *TypeId,
5341 llvm::Value *Ptr,
5342 ArrayRef<llvm::Constant *> StaticArgs);
5343
5344 /// Emit a reached-unreachable diagnostic if \p Loc is valid and runtime
5345 /// checking is enabled. Otherwise, just emit an unreachable instruction.
5346 void EmitUnreachable(SourceLocation Loc);
5347
5348 /// Create a basic block that will call the trap intrinsic, and emit a
5349 /// conditional branch to it, for the -ftrapv checks.
5350 void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID,
5351 bool NoMerge = false, const TrapReason *TR = nullptr);
5352
5353 /// Emit a call to trap or debugtrap and attach function attribute
5354 /// "trap-func-name" if specified.
5355 llvm::CallInst *EmitTrapCall(llvm::Intrinsic::ID IntrID);
5356
5357 /// Emit a stub for the cross-DSO CFI check function.
5358 void EmitCfiCheckStub();
5359
5360 /// Emit a cross-DSO CFI failure handling function.
5361 void EmitCfiCheckFail();
5362
5363 /// Create a check for a function parameter that may potentially be
5364 /// declared as non-null.
5365 void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc,
5366 AbstractCallee AC, unsigned ParmNum);
5367
5368 void EmitNonNullArgCheck(Address Addr, QualType ArgType,
5369 SourceLocation ArgLoc, AbstractCallee AC,
5370 unsigned ParmNum);
5371
5372 /// EmitWriteback - Emit callbacks for function.
5373 void EmitWritebacks(const CallArgList &Args);
5374
5375 /// EmitCallArg - Emit a single call argument.
5376 void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
5377
5378 /// EmitDelegateCallArg - We are performing a delegate call; that
5379 /// is, the current function is delegating to another one. Produce
5380 /// a r-value suitable for passing the given parameter.
5381 void EmitDelegateCallArg(CallArgList &args, const VarDecl *param,
5382 SourceLocation loc);
5383
5384 /// SetFPAccuracy - Set the minimum required accuracy of the given floating
5385 /// point operation, expressed as the maximum relative error in ulp.
5386 void SetFPAccuracy(llvm::Value *Val, float Accuracy);
5387
5388 /// Set the minimum required accuracy of the given sqrt operation
5389 /// based on CodeGenOpts.
5390 void SetSqrtFPAccuracy(llvm::Value *Val);
5391
5392 /// Set the minimum required accuracy of the given sqrt operation based on
5393 /// CodeGenOpts.
5394 void SetDivFPAccuracy(llvm::Value *Val);
5395
5396 /// Set the codegen fast-math flags.
5397 void SetFastMathFlags(FPOptions FPFeatures);
5398
5399 // Truncate or extend a boolean vector to the requested number of elements.
5400 llvm::Value *emitBoolVecConversion(llvm::Value *SrcVec,
5401 unsigned NumElementsDst,
5402 const llvm::Twine &Name = "");
5403
5404 void maybeAttachRangeForLoad(llvm::LoadInst *Load, QualType Ty,
5405 SourceLocation Loc);
5406
5407private:
5408 // Emits a convergence_loop instruction for the given |BB|, with |ParentToken|
5409 // as it's parent convergence instr.
5410 llvm::ConvergenceControlInst *emitConvergenceLoopToken(llvm::BasicBlock *BB);
5411
5412 // Adds a convergence_ctrl token with |ParentToken| as parent convergence
5413 // instr to the call |Input|.
5414 llvm::CallBase *addConvergenceControlToken(llvm::CallBase *Input);
5415
5416 // Find the convergence_entry instruction |F|, or emits ones if none exists.
5417 // Returns the convergence instruction.
5418 llvm::ConvergenceControlInst *
5419 getOrEmitConvergenceEntryToken(llvm::Function *F);
5420
5421private:
5422 llvm::MDNode *getRangeForLoadFromType(QualType Ty);
5423 void EmitReturnOfRValue(RValue RV, QualType Ty);
5424
5425 void deferPlaceholderReplacement(llvm::Instruction *Old, llvm::Value *New);
5426
5427 llvm::SmallVector<std::pair<llvm::WeakTrackingVH, llvm::Value *>, 4>
5428 DeferredReplacements;
5429
5430 /// Set the address of a local variable.
5431 void setAddrOfLocalVar(const VarDecl *VD, Address Addr) {
5432 assert(!LocalDeclMap.count(VD) && "Decl already exists in LocalDeclMap!");
5433 LocalDeclMap.insert(KV: {VD, Addr});
5434 }
5435
5436 /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
5437 /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
5438 ///
5439 /// \param AI - The first function argument of the expansion.
5440 void ExpandTypeFromArgs(QualType Ty, LValue Dst,
5441 llvm::Function::arg_iterator &AI);
5442
5443 /// ExpandTypeToArgs - Expand an CallArg \arg Arg, with the LLVM type for \arg
5444 /// Ty, into individual arguments on the provided vector \arg IRCallArgs,
5445 /// starting at index \arg IRCallArgPos. See ABIArgInfo::Expand.
5446 void ExpandTypeToArgs(QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
5447 SmallVectorImpl<llvm::Value *> &IRCallArgs,
5448 unsigned &IRCallArgPos);
5449
5450 std::pair<llvm::Value *, llvm::Type *>
5451 EmitAsmInput(const TargetInfo::ConstraintInfo &Info, const Expr *InputExpr,
5452 std::string &ConstraintStr);
5453
5454 std::pair<llvm::Value *, llvm::Type *>
5455 EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info, LValue InputValue,
5456 QualType InputType, std::string &ConstraintStr,
5457 SourceLocation Loc);
5458
5459 /// Attempts to statically evaluate the object size of E. If that
5460 /// fails, emits code to figure the size of E out for us. This is
5461 /// pass_object_size aware.
5462 ///
5463 /// If EmittedExpr is non-null, this will use that instead of re-emitting E.
5464 llvm::Value *evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
5465 llvm::IntegerType *ResType,
5466 llvm::Value *EmittedE,
5467 bool IsDynamic);
5468
5469 /// Emits the size of E, as required by __builtin_object_size. This
5470 /// function is aware of pass_object_size parameters, and will act accordingly
5471 /// if E is a parameter with the pass_object_size attribute.
5472 llvm::Value *emitBuiltinObjectSize(const Expr *E, unsigned Type,
5473 llvm::IntegerType *ResType,
5474 llvm::Value *EmittedE, bool IsDynamic);
5475
5476 llvm::Value *emitCountedBySize(const Expr *E, llvm::Value *EmittedE,
5477 unsigned Type, llvm::IntegerType *ResType);
5478
5479 llvm::Value *emitCountedByMemberSize(const MemberExpr *E, const Expr *Idx,
5480 llvm::Value *EmittedE,
5481 QualType CastedArrayElementTy,
5482 unsigned Type,
5483 llvm::IntegerType *ResType);
5484
5485 llvm::Value *emitCountedByPointerSize(const ImplicitCastExpr *E,
5486 const Expr *Idx, llvm::Value *EmittedE,
5487 QualType CastedArrayElementTy,
5488 unsigned Type,
5489 llvm::IntegerType *ResType);
5490
5491 void emitZeroOrPatternForAutoVarInit(QualType type, const VarDecl &D,
5492 Address Loc);
5493
5494public:
5495 enum class EvaluationOrder {
5496 ///! No language constraints on evaluation order.
5497 Default,
5498 ///! Language semantics require left-to-right evaluation.
5499 ForceLeftToRight,
5500 ///! Language semantics require right-to-left evaluation.
5501 ForceRightToLeft
5502 };
5503
5504 // Wrapper for function prototype sources. Wraps either a FunctionProtoType or
5505 // an ObjCMethodDecl.
5506 struct PrototypeWrapper {
5507 llvm::PointerUnion<const FunctionProtoType *, const ObjCMethodDecl *> P;
5508
5509 PrototypeWrapper(const FunctionProtoType *FT) : P(FT) {}
5510 PrototypeWrapper(const ObjCMethodDecl *MD) : P(MD) {}
5511 };
5512
5513 void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype,
5514 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
5515 AbstractCallee AC = AbstractCallee(),
5516 unsigned ParamsToSkip = 0,
5517 EvaluationOrder Order = EvaluationOrder::Default);
5518
5519 /// EmitPointerWithAlignment - Given an expression with a pointer type,
5520 /// emit the value and compute our best estimate of the alignment of the
5521 /// pointee.
5522 ///
5523 /// \param BaseInfo - If non-null, this will be initialized with
5524 /// information about the source of the alignment and the may-alias
5525 /// attribute. Note that this function will conservatively fall back on
5526 /// the type when it doesn't recognize the expression and may-alias will
5527 /// be set to false.
5528 ///
5529 /// One reasonable way to use this information is when there's a language
5530 /// guarantee that the pointer must be aligned to some stricter value, and
5531 /// we're simply trying to ensure that sufficiently obvious uses of under-
5532 /// aligned objects don't get miscompiled; for example, a placement new
5533 /// into the address of a local variable. In such a case, it's quite
5534 /// reasonable to just ignore the returned alignment when it isn't from an
5535 /// explicit source.
5536 Address
5537 EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo = nullptr,
5538 TBAAAccessInfo *TBAAInfo = nullptr,
5539 KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
5540
5541 /// If \p E references a parameter with pass_object_size info or a constant
5542 /// array size modifier, emit the object size divided by the size of \p EltTy.
5543 /// Otherwise return null.
5544 llvm::Value *LoadPassedObjectSize(const Expr *E, QualType EltTy);
5545
5546 void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK);
5547
5548 struct FMVResolverOption {
5549 llvm::Function *Function;
5550 llvm::SmallVector<StringRef, 8> Features;
5551 std::optional<StringRef> Architecture;
5552
5553 FMVResolverOption(llvm::Function *F, ArrayRef<StringRef> Feats,
5554 std::optional<StringRef> Arch = std::nullopt)
5555 : Function(F), Features(Feats), Architecture(Arch) {}
5556 };
5557
5558 // Emits the body of a multiversion function's resolver. Assumes that the
5559 // options are already sorted in the proper order, with the 'default' option
5560 // last (if it exists).
5561 void EmitMultiVersionResolver(llvm::Function *Resolver,
5562 ArrayRef<FMVResolverOption> Options);
5563 void EmitX86MultiVersionResolver(llvm::Function *Resolver,
5564 ArrayRef<FMVResolverOption> Options);
5565 void EmitAArch64MultiVersionResolver(llvm::Function *Resolver,
5566 ArrayRef<FMVResolverOption> Options);
5567 void EmitRISCVMultiVersionResolver(llvm::Function *Resolver,
5568 ArrayRef<FMVResolverOption> Options);
5569
5570 Address EmitAddressOfPFPField(Address RecordPtr, const PFPField &Field);
5571 Address EmitAddressOfPFPField(Address RecordPtr, Address FieldPtr,
5572 const FieldDecl *Field);
5573
5574private:
5575 QualType getVarArgType(const Expr *Arg);
5576
5577 void EmitDeclMetadata();
5578
5579 BlockByrefHelpers *buildByrefHelpers(llvm::StructType &byrefType,
5580 const AutoVarEmission &emission);
5581
5582 void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
5583
5584 llvm::Value *GetValueForARMHint(unsigned BuiltinID);
5585 llvm::Value *EmitX86CpuIs(const CallExpr *E);
5586 llvm::Value *EmitX86CpuIs(StringRef CPUStr);
5587 llvm::Value *EmitX86CpuSupports(const CallExpr *E);
5588 llvm::Value *EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs);
5589 llvm::Value *EmitX86CpuSupports(std::array<uint32_t, 4> FeatureMask);
5590 llvm::Value *EmitX86CpuInit();
5591 llvm::Value *FormX86ResolverCondition(const FMVResolverOption &RO);
5592 llvm::Value *EmitAArch64CpuInit();
5593 llvm::Value *FormAArch64ResolverCondition(const FMVResolverOption &RO);
5594 llvm::Value *EmitAArch64CpuSupports(const CallExpr *E);
5595 llvm::Value *EmitAArch64CpuSupports(ArrayRef<StringRef> FeatureStrs);
5596};
5597
5598inline DominatingLLVMValue::saved_type
5599DominatingLLVMValue::save(CodeGenFunction &CGF, llvm::Value *value) {
5600 if (!needsSaving(value))
5601 return saved_type(value, false);
5602
5603 // Otherwise, we need an alloca.
5604 auto align = CharUnits::fromQuantity(
5605 Quantity: CGF.CGM.getDataLayout().getPrefTypeAlign(Ty: value->getType()));
5606 Address alloca =
5607 CGF.CreateTempAlloca(Ty: value->getType(), align, Name: "cond-cleanup.save");
5608 CGF.Builder.CreateStore(Val: value, Addr: alloca);
5609
5610 return saved_type(alloca.emitRawPointer(CGF), true);
5611}
5612
5613inline llvm::Value *DominatingLLVMValue::restore(CodeGenFunction &CGF,
5614 saved_type value) {
5615 // If the value says it wasn't saved, trust that it's still dominating.
5616 if (!value.getInt())
5617 return value.getPointer();
5618
5619 // Otherwise, it should be an alloca instruction, as set up in save().
5620 auto alloca = cast<llvm::AllocaInst>(Val: value.getPointer());
5621 return CGF.Builder.CreateAlignedLoad(Ty: alloca->getAllocatedType(), Ptr: alloca,
5622 Align: alloca->getAlign());
5623}
5624
5625} // end namespace CodeGen
5626
5627// Map the LangOption for floating point exception behavior into
5628// the corresponding enum in the IR.
5629llvm::fp::ExceptionBehavior
5630ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind);
5631} // end namespace clang
5632
5633#endif
5634